Commit a196d02e authored by Paul B Mahol's avatar Paul B Mahol

avfilter/af_aresample: switch to activate

parent 0e7fa8b3
......@@ -32,6 +32,7 @@
#include "libswresample/swresample.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
typedef struct AResampleContext {
......@@ -41,6 +42,7 @@ typedef struct AResampleContext {
struct SwrContext *swr;
int64_t next_pts;
int more_data;
int eof;
} AResampleContext;
static av_cold int preinit(AVFilterContext *ctx)
......@@ -169,7 +171,8 @@ static int config_output(AVFilterLink *outlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AResampleContext *aresample = inlink->dst->priv;
AVFilterContext *ctx = inlink->dst;
AResampleContext *aresample = ctx->priv;
const int n_in = insamplesref->nb_samples;
int64_t delay;
int n_out = n_in * aresample->ratio + 32;
......@@ -214,6 +217,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (n_out <= 0) {
av_frame_free(&outsamplesref);
av_frame_free(&insamplesref);
ff_inlink_request_frame(inlink);
return 0;
}
......@@ -260,8 +264,10 @@ static int flush_frame(AVFilterLink *outlink, int final, AVFrame **outsamplesref
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
AResampleContext *aresample = ctx->priv;
int ret;
int ret = 0, status;
int64_t pts;
// First try to get data from the internal buffers
if (aresample->more_data) {
......@@ -273,19 +279,52 @@ static int request_frame(AVFilterLink *outlink)
}
aresample->more_data = 0;
if (!aresample->eof && ff_inlink_acknowledge_status(inlink, &status, &pts))
aresample->eof = 1;
// Second request more data from the input
ret = ff_request_frame(ctx->inputs[0]);
if (!aresample->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
// Third if we hit the end flush
if (ret == AVERROR_EOF) {
if (aresample->eof) {
AVFrame *outsamplesref;
if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0)
if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0) {
if (ret == AVERROR_EOF) {
ff_outlink_set_status(outlink, AVERROR_EOF, aresample->next_pts);
return 0;
}
return ret;
}
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
ff_filter_set_ready(ctx, 100);
return 0;
}
static int activate(AVFilterContext *ctx)
{
AResampleContext *aresample = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (!aresample->eof && ff_inlink_queued_frames(inlink)) {
AVFrame *frame = NULL;
int ret;
ret = ff_inlink_consume_frame(inlink, &frame);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, frame);
}
return request_frame(outlink);
}
static const AVClass *resample_child_class_iterate(void **iter)
......@@ -322,7 +361,6 @@ static const AVFilterPad aresample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
};
......@@ -330,7 +368,6 @@ static const AVFilterPad aresample_outputs[] = {
{
.name = "default",
.config_props = config_output,
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
};
......@@ -339,6 +376,7 @@ const AVFilter ff_af_aresample = {
.name = "aresample",
.description = NULL_IF_CONFIG_SMALL("Resample audio data."),
.preinit = preinit,
.activate = activate,
.uninit = uninit,
.priv_size = sizeof(AResampleContext),
.priv_class = &aresample_class,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment