Commit b429c86d authored by Shiyou Yin's avatar Shiyou Yin Committed by Michael Niedermayer

avcodec/mips: [loongson] optimize put_hevc_qpel_h_8 with mmi.

Optimize put_hevc_qpel_h_8 with mmi in the case width=4/8/12/16/24/32/48/64.
This optimization improved HEVC decoding performance 2%(2.39x to 2.44x, tested on loongson 3A3000).
Signed-off-by: 's avatarMichael Niedermayer <michael@niedermayer.cc>
parent dceefb2b
......@@ -25,6 +25,15 @@ static av_cold void hevc_dsp_init_mmi(HEVCDSPContext *c,
const int bit_depth)
{
if (8 == bit_depth) {
c->put_hevc_qpel[1][0][1] = ff_hevc_put_hevc_qpel_h4_8_mmi;
c->put_hevc_qpel[3][0][1] = ff_hevc_put_hevc_qpel_h8_8_mmi;
c->put_hevc_qpel[4][0][1] = ff_hevc_put_hevc_qpel_h12_8_mmi;
c->put_hevc_qpel[5][0][1] = ff_hevc_put_hevc_qpel_h16_8_mmi;
c->put_hevc_qpel[6][0][1] = ff_hevc_put_hevc_qpel_h24_8_mmi;
c->put_hevc_qpel[7][0][1] = ff_hevc_put_hevc_qpel_h32_8_mmi;
c->put_hevc_qpel[8][0][1] = ff_hevc_put_hevc_qpel_h48_8_mmi;
c->put_hevc_qpel[9][0][1] = ff_hevc_put_hevc_qpel_h64_8_mmi;
c->put_hevc_qpel[1][1][1] = ff_hevc_put_hevc_qpel_hv4_8_mmi;
c->put_hevc_qpel[3][1][1] = ff_hevc_put_hevc_qpel_hv8_8_mmi;
c->put_hevc_qpel[4][1][1] = ff_hevc_put_hevc_qpel_hv12_8_mmi;
......
......@@ -488,6 +488,15 @@ void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_##TYPE(int16_t *dst, \
intptr_t mx, \
intptr_t my, \
int width)
L_MC(qpel, h, 4, mmi);
L_MC(qpel, h, 8, mmi);
L_MC(qpel, h, 12, mmi);
L_MC(qpel, h, 16, mmi);
L_MC(qpel, h, 24, mmi);
L_MC(qpel, h, 32, mmi);
L_MC(qpel, h, 48, mmi);
L_MC(qpel, h, 64, mmi);
L_MC(qpel, hv, 4, mmi);
L_MC(qpel, hv, 8, mmi);
L_MC(qpel, hv, 12, mmi);
......
......@@ -23,6 +23,103 @@
#include "libavcodec/mips/hevcdsp_mips.h"
#include "libavutil/mips/mmiutils.h"
#define PUT_HEVC_QPEL_H(w, x_step, src_step, dst_step) \
void ff_hevc_put_hevc_qpel_h##w##_8_mmi(int16_t *dst, uint8_t *_src, \
ptrdiff_t _srcstride, \
int height, intptr_t mx, \
intptr_t my, int width) \
{ \
int x, y; \
pixel *src = (pixel*)_src - 3; \
ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
uint64_t ftmp[15]; \
uint64_t rtmp[1]; \
const int8_t *filter = ff_hevc_qpel_filters[mx - 1]; \
\
x = x_step; \
y = height; \
__asm__ volatile( \
MMI_LDC1(%[ftmp1], %[filter], 0x00) \
"li %[rtmp0], 0x08 \n\t" \
"dmtc1 %[rtmp0], %[ftmp0] \n\t" \
"punpckhbh %[ftmp2], %[ftmp0], %[ftmp1] \n\t" \
"punpcklbh %[ftmp1], %[ftmp0], %[ftmp1] \n\t" \
"psrah %[ftmp1], %[ftmp1], %[ftmp0] \n\t" \
"psrah %[ftmp2], %[ftmp2], %[ftmp0] \n\t" \
"xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t" \
\
"1: \n\t" \
"2: \n\t" \
"gsldlc1 %[ftmp3], 0x07(%[src]) \n\t" \
"gsldrc1 %[ftmp3], 0x00(%[src]) \n\t" \
"gsldlc1 %[ftmp4], 0x08(%[src]) \n\t" \
"gsldrc1 %[ftmp4], 0x01(%[src]) \n\t" \
"gsldlc1 %[ftmp5], 0x09(%[src]) \n\t" \
"gsldrc1 %[ftmp5], 0x02(%[src]) \n\t" \
"gsldlc1 %[ftmp6], 0x0a(%[src]) \n\t" \
"gsldrc1 %[ftmp6], 0x03(%[src]) \n\t" \
"punpcklbh %[ftmp7], %[ftmp3], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp3], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp3], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp4], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp4], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp4], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp5], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp5], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp5], %[ftmp7], %[ftmp8] \n\t" \
"punpcklbh %[ftmp7], %[ftmp6], %[ftmp0] \n\t" \
"punpckhbh %[ftmp8], %[ftmp6], %[ftmp0] \n\t" \
"pmullh %[ftmp7], %[ftmp7], %[ftmp1] \n\t" \
"pmullh %[ftmp8], %[ftmp8], %[ftmp2] \n\t" \
"paddh %[ftmp6], %[ftmp7], %[ftmp8] \n\t" \
TRANSPOSE_4H(%[ftmp3], %[ftmp4], %[ftmp5], %[ftmp6], \
%[ftmp7], %[ftmp8], %[ftmp9], %[ftmp10]) \
"paddh %[ftmp3], %[ftmp3], %[ftmp4] \n\t" \
"paddh %[ftmp5], %[ftmp5], %[ftmp6] \n\t" \
"paddh %[ftmp3], %[ftmp3], %[ftmp5] \n\t" \
"gssdlc1 %[ftmp3], 0x07(%[dst]) \n\t" \
"gssdrc1 %[ftmp3], 0x00(%[dst]) \n\t" \
\
"daddi %[x], %[x], -0x01 \n\t" \
PTR_ADDIU "%[src], %[src], 0x04 \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x08 \n\t" \
"bnez %[x], 2b \n\t" \
\
"daddi %[y], %[y], -0x01 \n\t" \
"li %[x], " #x_step " \n\t" \
PTR_ADDIU "%[src], %[src], " #src_step " \n\t" \
PTR_ADDIU "%[dst], %[dst], " #dst_step " \n\t" \
PTR_ADDU "%[src], %[src], %[stride] \n\t" \
PTR_ADDIU "%[dst], %[dst], 0x80 \n\t" \
"bnez %[y], 1b \n\t" \
: [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]), \
[ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]), \
[ftmp4]"=&f"(ftmp[4]), [ftmp5]"=&f"(ftmp[5]), \
[ftmp6]"=&f"(ftmp[6]), [ftmp7]"=&f"(ftmp[7]), \
[ftmp8]"=&f"(ftmp[8]), [ftmp9]"=&f"(ftmp[9]), \
[ftmp10]"=&f"(ftmp[10]), [rtmp0]"=&r"(rtmp[0]), \
[src]"+&r"(src), [dst]"+&r"(dst), [y]"+&r"(y), \
[x]"+&r"(x) \
: [filter]"r"(filter), [stride]"r"(srcstride) \
: "memory" \
); \
}
PUT_HEVC_QPEL_H(4, 1, -4, -8);
PUT_HEVC_QPEL_H(8, 2, -8, -16);
PUT_HEVC_QPEL_H(12, 3, -12, -24);
PUT_HEVC_QPEL_H(16, 4, -16, -32);
PUT_HEVC_QPEL_H(24, 6, -24, -48);
PUT_HEVC_QPEL_H(32, 8, -32, -64);
PUT_HEVC_QPEL_H(48, 12, -48, -96);
PUT_HEVC_QPEL_H(64, 16, -64, -128);
#define PUT_HEVC_QPEL_HV(w, x_step, src_step, dst_step) \
void ff_hevc_put_hevc_qpel_hv##w##_8_mmi(int16_t *dst, uint8_t *_src, \
ptrdiff_t _srcstride, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment