Commit 550a36be authored by Stefan Westerfeld's avatar Stefan Westerfeld

avfilter/asubprocess: split filter frame function into read/write part

Signed-off-by: Stefan Westerfeld's avatarStefan Westerfeld <stefan@space.twc.de>
parent 284da7ab
......@@ -212,7 +212,6 @@ typedef struct AndioWMarkContext {
int out_bit_depth;
SP *sp;
void *sample_buffer;
int nb_samples;
int eof;
} ASubProcessContext;
......@@ -252,7 +251,7 @@ static av_cold void uninit(AVFilterContext *ctx)
#endif
}
#define NB_BUFFER_SAMPLES 1024
#define NB_BUFFER_SAMPLES 4096
static void write_wav_header (ASubProcessContext *s, int nb_channels, int sample_rate)
{
......@@ -304,7 +303,6 @@ static int config_input(AVFilterLink *inlink)
s->nb_samples = rubberband_get_samples_required(s->rbs);
s->first_pts = AV_NOPTS_VALUE;
#endif
s->nb_samples = 1024;
s->state = STATE_EXPECT_RIFF;
s->sample_buffer = av_malloc (NB_BUFFER_SAMPLES * inlink->ch_layout.nb_channels * 4 /* max 32 bit */); // TODO: leak
return 0;
......@@ -389,14 +387,9 @@ static void write_samples(ASubProcessContext *s, int32_t *data, int count)
sp_write(s->sp, s->sample_buffer, sample_size * count);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static int write_frame(ASubProcessContext *s, AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ASubProcessContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int ret = 0, nb_samples;
bool gen_output;
int ret = 0;
write_samples(s, (int32_t *)in->data[0], in->nb_samples * inlink->ch_layout.nb_channels);
if (s->eof)
......@@ -405,6 +398,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
;
printf ("eof\n");
}
av_frame_free(&in);
return ret;
}
static int try_read_frame(ASubProcessContext *s, AVFilterLink *outlink)
{
AVFrame *out;
int ret;
if (s->state == STATE_EXPECT_RIFF && sp_can_read (s->sp) >= 12)
{
char buffer[12];
......@@ -442,56 +444,27 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
s->state = STATE_EXPECT_CHUNK;
}
gen_output = s->state == STATE_IN_DATA_CHUNK;
while (gen_output)
if (s->state == STATE_IN_DATA_CHUNK)
{
int sample_size = s->out_bit_depth / 8;
int avail = sp_can_read (s->sp) / sample_size / inlink->ch_layout.nb_channels;
int avail = sp_can_read (s->sp) / sample_size / outlink->ch_layout.nb_channels;
if (avail > NB_BUFFER_SAMPLES)
avail = NB_BUFFER_SAMPLES;
if (avail > 0)
if (avail == NB_BUFFER_SAMPLES || (s->eof && avail > 0))
{
out = ff_get_audio_buffer(outlink, avail);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
read_samples(s, (int32_t *)out->data[0], avail * inlink->ch_layout.nb_channels);
read_samples(s, (int32_t *)out->data[0], avail * outlink->ch_layout.nb_channels);
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
return avail;
}
else {
gen_output = false;
}
}
#if 0
if (s->first_pts == AV_NOPTS_VALUE)
s->first_pts = in->pts;
rubberband_process(s->rbs, (const float *const *)in->extended_data, in->nb_samples, s->eof);
s->nb_samples_in += in->nb_samples;
nb_samples = rubberband_available(s->rbs);
if (nb_samples > 0) {
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
out->pts = s->first_pts + av_rescale_q(s->nb_samples_out,
(AVRational){ 1, outlink->sample_rate },
outlink->time_base);
s->last_pts = out->pts;
nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->extended_data, nb_samples);
out->nb_samples = nb_samples;
ret = ff_filter_frame(outlink, out);
s->nb_samples_out += nb_samples;
}
#endif
av_frame_free(&in);
if (ff_inlink_queued_samples(inlink) >= 0)
ff_filter_set_ready(ctx, 100);
return ret < 0 ? ret : nb_samples;
return 0;
}
static int activate(AVFilterContext *ctx)
......@@ -505,7 +478,11 @@ static int activate(AVFilterContext *ctx)
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
ret = try_read_frame(s, outlink);
if (ret != 0)
return ret;
ret = ff_inlink_consume_samples(inlink, NB_BUFFER_SAMPLES, NB_BUFFER_SAMPLES, &in);
//ret = ff_inlink_consume_frame(inlink, &in);
if (ff_inlink_acknowledge_status(inlink, &status, &pts))
s->eof |= status == AVERROR_EOF;
......@@ -513,10 +490,17 @@ static int activate(AVFilterContext *ctx)
if (ret < 0)
return ret;
if (ret > 0) {
ret = filter_frame(inlink, in);
ret = write_frame(s, inlink, in);
if (ff_inlink_queued_samples(inlink) >= 0)
ff_filter_set_ready(ctx, 100);
if (ret != 0)
return ret;
}
ret = try_read_frame(s, outlink);
if (ret != 0)
return ret;
if (s->eof) {
// s->last_pts
......@@ -529,24 +513,6 @@ static int activate(AVFilterContext *ctx)
return FFERROR_NOT_READY;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
printf ("TODO: process\n");
#if 0
RubberBandContext *s = ctx->priv;
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
rubberband_set_time_ratio(s->rbs, 1. / s->tempo);
rubberband_set_pitch_scale(s->rbs, s->pitch);
s->nb_samples = rubberband_get_samples_required(s->rbs);
#endif
return 0;
}
static const AVFilterPad asubprocess_inputs[] = {
{
.name = "default",
......@@ -566,5 +532,4 @@ const AVFilter ff_af_asubprocess = {
FILTER_INPUTS(asubprocess_inputs),
FILTER_OUTPUTS(ff_audio_default_filterpad),
FILTER_SINGLE_SAMPLEFMT(AV_SAMPLE_FMT_S32),
.process_command = process_command,
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment