Commit 7bd3b737 authored by Andreas Rheinhardt's avatar Andreas Rheinhardt

avcodec/vp9: Switch to ProgressFrames

This already fixes a race in the vp9-encparams test. In this test,
side data is added to the current frame after having been decoded
(and therefore after ff_thread_finish_setup() has been called).
Yet the update_thread_context callback called ff_thread_ref_frame()
and therefore av_frame_ref() with this frame as source frame and
the ensuing read was unsynchronised with adding the side data,
i.e. there was a data race.

By switching to the ProgressFrame API the implicit av_frame_ref()
is removed and the race fixed except if this frame is later reused by
a show-existing-frame which uses an explicit av_frame_ref().
The vp9-encparams test does not cover this, so this commit
already fixes all the races in this test.

This decoder kept multiple references to the same ThreadFrames
in the same context and therefore had lots of implicit av_frame_ref()
even when decoding single-threaded. This incurred lots of small
allocations: When decoding an ordinary 10s video in single-threaded
mode the number of allocations reported by Valgrind went down
from 57,814 to 20,908; for 10 threads it went down from 84,223 to
21,901.
Reviewed-by: 's avatarAnton Khirnov <anton@khirnov.net>
Signed-off-by: 's avatarAndreas Rheinhardt <andreas.rheinhardt@outlook.com>
parent 444bd353
......@@ -79,7 +79,7 @@ int ff_dxva2_vp9_fill_picture_parameters(const AVCodecContext *avctx, AVDXVACont
pp->Reserved8Bits = 0;
for (i = 0; i < 8; i++) {
if (h->refs[i].f->buf[0]) {
if (h->refs[i].f) {
fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f, 0), 0);
pp->ref_frame_coded_width[i] = h->refs[i].f->width;
pp->ref_frame_coded_height[i] = h->refs[i].f->height;
......@@ -89,7 +89,7 @@ int ff_dxva2_vp9_fill_picture_parameters(const AVCodecContext *avctx, AVDXVACont
for (i = 0; i < 3; i++) {
uint8_t refidx = h->h.refidx[i];
if (h->refs[refidx].f->buf[0])
if (h->refs[refidx].f)
fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f, 0), 0);
else
pp->frame_refs[i].bPicEntry = 0xFF;
......
......@@ -100,7 +100,7 @@ static int vaapi_vp9_start_frame(AVCodecContext *avctx,
}
for (i = 0; i < 8; i++) {
if (h->refs[i].f->buf[0])
if (h->refs[i].f)
pic_param.reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
else
pic_param.reference_frames[i] = VA_INVALID_ID;
......
This diff is collapsed.
......@@ -36,7 +36,7 @@ static void FN(inter_pred)(VP9TileData *td)
const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col;
const ThreadFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2;
const ProgressFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2;
const AVFrame *ref1 = tref1->f, *ref2;
int w1 = ref1->width, h1 = ref1->height, w2, h2;
ptrdiff_t ls_y = td->y_stride, ls_uv = td->uv_stride;
......
......@@ -22,8 +22,9 @@
*/
#include "libavutil/avassert.h"
#include "libavutil/frame.h"
#include "threadframe.h"
#include "progressframe.h"
#include "vp89_rac.h"
#include "vp9.h"
#include "vp9data.h"
......@@ -113,7 +114,7 @@ static void decode_mode(VP9TileData *td)
uint8_t *refsegmap = s->s.frames[REF_FRAME_SEGMAP].segmentation_map;
if (!s->s.frames[REF_FRAME_SEGMAP].uses_2pass)
ff_thread_await_progress(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3, 0);
ff_progress_frame_await(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3);
for (y = 0; y < h4; y++) {
int idx_base = (y + row) * 8 * s->sb_cols + col;
for (x = 0; x < w4; x++)
......
......@@ -29,8 +29,8 @@
#include <stdatomic.h>
#include "libavutil/mem_internal.h"
#include "libavutil/pixfmt.h"
#include "libavutil/thread.h"
#include "libavutil/internal.h"
#include "get_bits.h"
#include "videodsp.h"
......@@ -120,7 +120,7 @@ typedef struct VP9Context {
int w, h;
enum AVPixelFormat pix_fmt, last_fmt, gf_fmt;
unsigned sb_cols, sb_rows, rows, cols;
ThreadFrame next_refs[8];
ProgressFrame next_refs[8];
struct {
uint8_t lim_lut[64];
......@@ -245,7 +245,7 @@ void ff_vp9_decode_block(VP9TileData *td, int row, int col,
VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl, enum BlockPartition bp);
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl,
void ff_vp9_loopfilter_sb(struct AVCodecContext *avctx, VP9Filter *lflvl,
int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff);
void ff_vp9_intra_recon_8bpp(VP9TileData *td,
......
......@@ -21,6 +21,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "vp9dec.h"
static av_always_inline void filter_plane_cols(VP9Context *s, int col, int ss_h, int ss_v,
......
......@@ -21,7 +21,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "threadframe.h"
#include "progressframe.h"
#include "vp89_rac.h"
#include "vp9data.h"
#include "vp9dec.h"
......@@ -175,7 +175,7 @@ static void find_ref_mvs(VP9TileData *td,
VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
ff_progress_frame_await(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3);
if (mv->ref[0] == ref)
RETURN_MV(mv->mv[0]);
else if (mv->ref[1] == ref)
......
......@@ -22,9 +22,10 @@
*/
#include "libavutil/avassert.h"
#include "libavutil/frame.h"
#include "libavutil/mem_internal.h"
#include "threadframe.h"
#include "progressframe.h"
#include "videodsp.h"
#include "vp9data.h"
#include "vp9dec.h"
......@@ -298,7 +299,7 @@ void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off
static av_always_inline void mc_luma_unscaled(VP9TileData *td, const vp9_mc_func (*mc)[2],
uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
const ThreadFrame *ref_frame,
const ProgressFrame *ref_frame,
ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
int bw, int bh, int w, int h, int bytesperpixel)
{
......@@ -314,7 +315,7 @@ static av_always_inline void mc_luma_unscaled(VP9TileData *td, const vp9_mc_func
// we use +7 because the last 7 pixels of each sbrow can be changed in
// the longest loopfilter of the next sbrow
th = (y + bh + 4 * !!my + 7) >> 6;
ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
ff_progress_frame_await(ref_frame, FFMAX(th, 0));
// The arm/aarch64 _hv filters read one more row than what actually is
// needed, so switch to emulated edge one pixel sooner vertically
// (!!my * 5) than horizontally (!!mx * 4).
......@@ -336,7 +337,7 @@ static av_always_inline void mc_chroma_unscaled(VP9TileData *td, const vp9_mc_fu
ptrdiff_t dst_stride,
const uint8_t *ref_u, ptrdiff_t src_stride_u,
const uint8_t *ref_v, ptrdiff_t src_stride_v,
const ThreadFrame *ref_frame,
const ProgressFrame *ref_frame,
ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
int bw, int bh, int w, int h, int bytesperpixel)
{
......@@ -353,7 +354,7 @@ static av_always_inline void mc_chroma_unscaled(VP9TileData *td, const vp9_mc_fu
// we use +7 because the last 7 pixels of each sbrow can be changed in
// the longest loopfilter of the next sbrow
th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
ff_progress_frame_await(ref_frame, FFMAX(th, 0));
// The arm/aarch64 _hv filters read one more row than what actually is
// needed, so switch to emulated edge one pixel sooner vertically
// (!!my * 5) than horizontally (!!mx * 4).
......@@ -407,7 +408,7 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
const vp9_mc_func (*mc)[2],
uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
const ThreadFrame *ref_frame,
const ProgressFrame *ref_frame,
ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
int px, int py, int pw, int ph,
int bw, int bh, int w, int h, int bytesperpixel,
......@@ -444,7 +445,7 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
// we use +7 because the last 7 pixels of each sbrow can be changed in
// the longest loopfilter of the next sbrow
th = (y + refbh_m1 + 4 + 7) >> 6;
ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
ff_progress_frame_await(ref_frame, FFMAX(th, 0));
// The arm/aarch64 _hv filters read one more row than what actually is
// needed, so switch to emulated edge one pixel sooner vertically
// (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
......@@ -467,7 +468,7 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
ptrdiff_t dst_stride,
const uint8_t *ref_u, ptrdiff_t src_stride_u,
const uint8_t *ref_v, ptrdiff_t src_stride_v,
const ThreadFrame *ref_frame,
const ProgressFrame *ref_frame,
ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
int px, int py, int pw, int ph,
int bw, int bh, int w, int h, int bytesperpixel,
......@@ -514,7 +515,7 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
// we use +7 because the last 7 pixels of each sbrow can be changed in
// the longest loopfilter of the next sbrow
th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
ff_progress_frame_await(ref_frame, FFMAX(th, 0));
// The arm/aarch64 _hv filters read one more row than what actually is
// needed, so switch to emulated edge one pixel sooner vertically
// (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
......
......@@ -29,8 +29,8 @@
#include "libavutil/mem_internal.h"
#include "progressframe.h"
#include "vp9.h"
#include "threadframe.h"
enum BlockPartition {
PARTITION_NONE, // [ ] <-.
......@@ -63,7 +63,7 @@ typedef struct VP9mvrefPair {
} VP9mvrefPair;
typedef struct VP9Frame {
ThreadFrame tf;
ProgressFrame tf;
void *extradata; ///< RefStruct reference
uint8_t *segmentation_map;
VP9mvrefPair *mv;
......@@ -164,7 +164,7 @@ typedef struct VP9BitstreamHeader {
typedef struct VP9SharedContext {
VP9BitstreamHeader h;
ThreadFrame refs[8];
ProgressFrame refs[8];
#define CUR_FRAME 0
#define REF_FRAME_MVPAIR 1
#define REF_FRAME_SEGMAP 2
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment