Commit 2c77d915 authored by Paul B Mahol's avatar Paul B Mahol

avfilter/af_crossfeed: always return same number of samples with block processing

parent 52a14b85
...@@ -40,6 +40,9 @@ typedef struct CrossfeedContext { ...@@ -40,6 +40,9 @@ typedef struct CrossfeedContext {
double w1, w2; double w1, w2;
int64_t pts;
int nb_samples;
double *mid; double *mid;
double *side[3]; double *side[3];
} CrossfeedContext; } CrossfeedContext;
...@@ -126,7 +129,7 @@ static void filter_samples(double *dst, const double *src, ...@@ -126,7 +129,7 @@ static void filter_samples(double *dst, const double *src,
*sw2 = w2; *sw2 = w2;
} }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) static int filter_frame(AVFilterLink *inlink, AVFrame *in, int eof)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
...@@ -140,12 +143,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) ...@@ -140,12 +143,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
const double a1 = -s->a1; const double a1 = -s->a1;
const double a2 = -s->a2; const double a2 = -s->a2;
AVFrame *out; AVFrame *out;
int drop = 0;
double *dst; double *dst;
if (av_frame_is_writable(in)) { if (av_frame_is_writable(in) && s->block_samples == 0) {
out = in; out = in;
} else { } else {
out = ff_get_audio_buffer(outlink, in->nb_samples); out = ff_get_audio_buffer(outlink, s->block_samples > 0 ? s->block_samples : in->nb_samples);
if (!out) { if (!out) {
av_frame_free(&in); av_frame_free(&in);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
...@@ -154,6 +158,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) ...@@ -154,6 +158,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
} }
dst = (double *)out->data[0]; dst = (double *)out->data[0];
if (s->block_samples > 0 && s->pts == AV_NOPTS_VALUE)
drop = 1;
if (s->block_samples == 0) { if (s->block_samples == 0) {
double w1 = s->w1; double w1 = s->w1;
double w2 = s->w2; double w2 = s->w2;
...@@ -177,6 +184,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) ...@@ -177,6 +184,20 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
s->w1 = w1; s->w1 = w1;
s->w2 = w2; s->w2 = w2;
} else if (eof) {
const double *src = (const double *)in->data[0];
double *ssrc = s->side[1] + s->block_samples;
double *msrc = s->mid;
for (int n = 0; n < out->nb_samples; n++, src += 2, dst += 2) {
if (ctx->is_disabled) {
dst[0] = src[0];
dst[1] = src[1];
} else {
dst[0] = (msrc[n] + ssrc[n]) * level_out;
dst[1] = (msrc[n] - ssrc[n]) * level_out;
}
}
} else { } else {
double *mdst = s->mid + s->block_samples; double *mdst = s->mid + s->block_samples;
double *sdst = s->side[0] + s->block_samples; double *sdst = s->side[0] + s->block_samples;
...@@ -209,7 +230,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) ...@@ -209,7 +230,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
b0, b1, b2, a1, a2, b0, b1, b2, a1, a2,
&w1, &w2); &w1, &w2);
reverse_samples(s->side[1], s->side[2] + s->block_samples, s->block_samples); reverse_samples(s->side[1], s->side[2], s->block_samples * 2);
src = (const double *)in->data[0]; src = (const double *)in->data[0];
ssrc = s->side[1]; ssrc = s->side[1];
...@@ -229,9 +250,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) ...@@ -229,9 +250,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
s->block_samples * sizeof(*s->side[0])); s->block_samples * sizeof(*s->side[0]));
} }
if (s->block_samples > 0) {
int nb_samples = in->nb_samples;
int64_t pts = in->pts;
out->pts = s->pts;
out->nb_samples = s->nb_samples;
s->pts = pts;
s->nb_samples = nb_samples;
}
if (out != in) if (out != in)
av_frame_free(&in); av_frame_free(&in);
return ff_filter_frame(outlink, out); if (!drop) {
return ff_filter_frame(outlink, out);
} else {
av_frame_free(&out);
ff_filter_set_ready(ctx, 10);
return 0;
}
} }
static int activate(AVFilterContext *ctx) static int activate(AVFilterContext *ctx)
...@@ -240,6 +277,8 @@ static int activate(AVFilterContext *ctx) ...@@ -240,6 +277,8 @@ static int activate(AVFilterContext *ctx)
AVFilterLink *outlink = ctx->outputs[0]; AVFilterLink *outlink = ctx->outputs[0];
CrossfeedContext *s = ctx->priv; CrossfeedContext *s = ctx->priv;
AVFrame *in = NULL; AVFrame *in = NULL;
int64_t pts;
int status;
int ret; int ret;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
...@@ -252,14 +291,27 @@ static int activate(AVFilterContext *ctx) ...@@ -252,14 +291,27 @@ static int activate(AVFilterContext *ctx)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret > 0) if (ret > 0)
return filter_frame(inlink, in); return filter_frame(inlink, in, 0);
if (s->block_samples > 0 && ff_inlink_queued_samples(inlink) >= s->block_samples) { if (s->block_samples > 0 && ff_inlink_queued_samples(inlink) >= s->block_samples) {
ff_filter_set_ready(ctx, 10); ff_filter_set_ready(ctx, 10);
return 0; return 0;
} }
FF_FILTER_FORWARD_STATUS(inlink, outlink); if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (s->block_samples > 0) {
AVFrame *in = ff_get_audio_buffer(outlink, s->block_samples);
if (!in)
return AVERROR(ENOMEM);
ret = filter_frame(inlink, in, 1);
}
ff_outlink_set_status(outlink, status, pts);
return ret;
}
FF_FILTER_FORWARD_WANTED(outlink, inlink); FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY; return FFERROR_NOT_READY;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment