summaryrefslogtreecommitdiff
path: root/media/ffvpx
diff options
context:
space:
mode:
Diffstat (limited to 'media/ffvpx')
-rw-r--r--media/ffvpx/README_MCP2
-rw-r--r--media/ffvpx/compat/atomics/win32/stdatomic.h2
-rw-r--r--media/ffvpx/compat/w32pthreads.h26
-rw-r--r--media/ffvpx/libavcodec/allcodecs.c1652
-rw-r--r--media/ffvpx/libavcodec/avcodec.c716
-rw-r--r--media/ffvpx/libavcodec/avcodec.h3670
-rw-r--r--media/ffvpx/libavcodec/avpacket.c450
-rw-r--r--media/ffvpx/libavcodec/bitstream.c304
-rw-r--r--media/ffvpx/libavcodec/bitstream_filters.c111
-rw-r--r--media/ffvpx/libavcodec/blockdsp.h7
-rw-r--r--media/ffvpx/libavcodec/bsf.c294
-rw-r--r--media/ffvpx/libavcodec/bsf.h314
-rw-r--r--media/ffvpx/libavcodec/bsf_internal.h60
-rw-r--r--media/ffvpx/libavcodec/bsf_list.c4
-rw-r--r--media/ffvpx/libavcodec/bytestream.h10
-rw-r--r--media/ffvpx/libavcodec/codec.h375
-rw-r--r--media/ffvpx/libavcodec/codec_desc.c763
-rw-r--r--media/ffvpx/libavcodec/codec_desc.h128
-rw-r--r--media/ffvpx/libavcodec/codec_id.h661
-rw-r--r--media/ffvpx/libavcodec/codec_internal.h330
-rw-r--r--media/ffvpx/libavcodec/codec_list.c2
-rw-r--r--media/ffvpx/libavcodec/codec_par.c263
-rw-r--r--media/ffvpx/libavcodec/codec_par.h247
-rw-r--r--media/ffvpx/libavcodec/decode.c1296
-rw-r--r--media/ffvpx/libavcodec/decode.h76
-rw-r--r--media/ffvpx/libavcodec/defs.h192
-rw-r--r--media/ffvpx/libavcodec/dummy_funcs.c30
-rw-r--r--media/ffvpx/libavcodec/encode.c774
-rw-r--r--media/ffvpx/libavcodec/encode.h99
-rw-r--r--media/ffvpx/libavcodec/error_resilience.h10
-rw-r--r--media/ffvpx/libavcodec/fft-internal.h40
-rw-r--r--media/ffvpx/libavcodec/fft.h39
-rw-r--r--media/ffvpx/libavcodec/fft_float.c1
-rw-r--r--media/ffvpx/libavcodec/fft_template.c61
-rw-r--r--media/ffvpx/libavcodec/flac.c75
-rw-r--r--media/ffvpx/libavcodec/flac.h92
-rw-r--r--media/ffvpx/libavcodec/flac_parse.h89
-rw-r--r--media/ffvpx/libavcodec/flac_parser.c341
-rw-r--r--media/ffvpx/libavcodec/flacdata.c2
-rw-r--r--media/ffvpx/libavcodec/flacdata.h2
-rw-r--r--media/ffvpx/libavcodec/flacdec.c359
-rw-r--r--media/ffvpx/libavcodec/flacdsp.c17
-rw-r--r--media/ffvpx/libavcodec/flacdsp.h8
-rw-r--r--media/ffvpx/libavcodec/flacdsp_lpc_template.c2
-rw-r--r--media/ffvpx/libavcodec/flacdsp_template.c2
-rw-r--r--media/ffvpx/libavcodec/frame_thread_encoder.h5
-rw-r--r--media/ffvpx/libavcodec/get_bits.h347
-rw-r--r--media/ffvpx/libavcodec/get_buffer.c304
-rw-r--r--media/ffvpx/libavcodec/golomb.c2
-rw-r--r--media/ffvpx/libavcodec/golomb.h147
-rw-r--r--media/ffvpx/libavcodec/h264chroma.h3
-rw-r--r--media/ffvpx/libavcodec/h264dsp.h8
-rw-r--r--media/ffvpx/libavcodec/h264pred.c184
-rw-r--r--media/ffvpx/libavcodec/h264pred.h4
-rw-r--r--media/ffvpx/libavcodec/h264pred_template.c34
-rw-r--r--media/ffvpx/libavcodec/hpeldsp.h1
-rw-r--r--media/ffvpx/libavcodec/hwaccels.h11
-rw-r--r--media/ffvpx/libavcodec/hwconfig.h (renamed from media/ffvpx/libavcodec/hwaccel.h)26
-rw-r--r--media/ffvpx/libavcodec/idctdsp.h17
-rw-r--r--media/ffvpx/libavcodec/imgconvert.c188
-rw-r--r--media/ffvpx/libavcodec/internal.h312
-rw-r--r--media/ffvpx/libavcodec/mathops.h24
-rw-r--r--media/ffvpx/libavcodec/mathtables.c9
-rw-r--r--media/ffvpx/libavcodec/me_cmp.h13
-rw-r--r--media/ffvpx/libavcodec/motion_est.h19
-rw-r--r--media/ffvpx/libavcodec/moz.build8
-rw-r--r--media/ffvpx/libavcodec/mpeg12data.h4
-rw-r--r--media/ffvpx/libavcodec/mpegpicture.h23
-rw-r--r--media/ffvpx/libavcodec/mpegutils.h18
-rw-r--r--media/ffvpx/libavcodec/mpegvideo.h288
-rw-r--r--media/ffvpx/libavcodec/mpegvideodata.h8
-rw-r--r--media/ffvpx/libavcodec/mpegvideoencdsp.h10
-rw-r--r--media/ffvpx/libavcodec/null_bsf.c14
-rw-r--r--media/ffvpx/libavcodec/options.c190
-rw-r--r--media/ffvpx/libavcodec/options_table.h207
-rw-r--r--media/ffvpx/libavcodec/packet.h731
-rw-r--r--media/ffvpx/libavcodec/packet_internal.h73
-rw-r--r--media/ffvpx/libavcodec/parser.c73
-rw-r--r--media/ffvpx/libavcodec/parser.h2
-rw-r--r--media/ffvpx/libavcodec/parser_list.c2
-rw-r--r--media/ffvpx/libavcodec/parsers.c133
-rw-r--r--media/ffvpx/libavcodec/pixblockdsp.h7
-rw-r--r--media/ffvpx/libavcodec/profiles.c7
-rw-r--r--media/ffvpx/libavcodec/profiles.h36
-rw-r--r--media/ffvpx/libavcodec/pthread.c43
-rw-r--r--media/ffvpx/libavcodec/pthread_frame.c630
-rw-r--r--media/ffvpx/libavcodec/pthread_internal.h32
-rw-r--r--media/ffvpx/libavcodec/pthread_slice.c104
-rw-r--r--media/ffvpx/libavcodec/put_bits.h201
-rw-r--r--media/ffvpx/libavcodec/ratecontrol.h3
-rw-r--r--media/ffvpx/libavcodec/raw.c44
-rw-r--r--media/ffvpx/libavcodec/raw.h16
-rw-r--r--media/ffvpx/libavcodec/rdft.c5
-rw-r--r--media/ffvpx/libavcodec/rl.h49
-rw-r--r--media/ffvpx/libavcodec/startcode.h36
-rw-r--r--media/ffvpx/libavcodec/thread.h58
-rw-r--r--media/ffvpx/libavcodec/threadframe.h89
-rw-r--r--media/ffvpx/libavcodec/utils.c1465
-rw-r--r--media/ffvpx/libavcodec/version.c50
-rw-r--r--media/ffvpx/libavcodec/version.h101
-rw-r--r--media/ffvpx/libavcodec/version_major.h52
-rw-r--r--media/ffvpx/libavcodec/videodsp.c29
-rw-r--r--media/ffvpx/libavcodec/videodsp.h4
-rw-r--r--media/ffvpx/libavcodec/videodsp_template.c8
-rw-r--r--media/ffvpx/libavcodec/vlc.c378
-rw-r--r--media/ffvpx/libavcodec/vlc.h89
-rw-r--r--media/ffvpx/libavcodec/vorbis_parser.c4
-rw-r--r--media/ffvpx/libavcodec/vp3dsp.h2
-rw-r--r--media/ffvpx/libavcodec/vp56.h196
-rw-r--r--media/ffvpx/libavcodec/vp8.c714
-rw-r--r--media/ffvpx/libavcodec/vp8.h52
-rw-r--r--media/ffvpx/libavcodec/vp89_rac.h66
-rw-r--r--media/ffvpx/libavcodec/vp8_parser.c2
-rw-r--r--media/ffvpx/libavcodec/vp8data.h6
-rw-r--r--media/ffvpx/libavcodec/vp8dsp.c54
-rw-r--r--media/ffvpx/libavcodec/vp8dsp.h10
-rw-r--r--media/ffvpx/libavcodec/vp9.c452
-rw-r--r--media/ffvpx/libavcodec/vp9_mc_template.c12
-rw-r--r--media/ffvpx/libavcodec/vp9_parser.c2
-rw-r--r--media/ffvpx/libavcodec/vp9_superframe_split_bsf.c14
-rw-r--r--media/ffvpx/libavcodec/vp9block.c244
-rw-r--r--media/ffvpx/libavcodec/vp9dec.h39
-rw-r--r--media/ffvpx/libavcodec/vp9dsp.c26
-rw-r--r--media/ffvpx/libavcodec/vp9dsp.h4
-rw-r--r--media/ffvpx/libavcodec/vp9mvs.c52
-rw-r--r--media/ffvpx/libavcodec/vp9prob.c2
-rw-r--r--media/ffvpx/libavcodec/vp9recon.c56
-rw-r--r--media/ffvpx/libavcodec/vp9shared.h12
-rw-r--r--media/ffvpx/libavcodec/vpx_rac.c (renamed from media/ffvpx/libavcodec/vp56rac.c)10
-rw-r--r--media/ffvpx/libavcodec/vpx_rac.h135
-rw-r--r--media/ffvpx/libavcodec/x86/constants.c3
-rw-r--r--media/ffvpx/libavcodec/x86/fft.asm263
-rw-r--r--media/ffvpx/libavcodec/x86/fft.h6
-rw-r--r--media/ffvpx/libavcodec/x86/fft_init.c14
-rw-r--r--media/ffvpx/libavcodec/x86/flacdsp.asm31
-rw-r--r--media/ffvpx/libavcodec/x86/flacdsp_init.c56
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred.asm743
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm110
-rw-r--r--media/ffvpx/libavcodec/x86/h264_intrapred_init.c82
-rw-r--r--media/ffvpx/libavcodec/x86/videodsp.asm40
-rw-r--r--media/ffvpx/libavcodec/x86/videodsp_init.c74
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp.asm149
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp_init.c148
-rw-r--r--media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm354
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init.c1
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init.h3
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c5
-rw-r--r--media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c1
-rw-r--r--media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm105
-rw-r--r--media/ffvpx/libavcodec/x86/vp9mc.asm5
-rw-r--r--media/ffvpx/libavcodec/x86/vpx_arith.h (renamed from media/ffvpx/libavcodec/x86/vp56_arith.h)16
-rw-r--r--media/ffvpx/libavcodec/xiph.c6
-rw-r--r--media/ffvpx/libavcodec/xiph.h2
-rw-r--r--media/ffvpx/libavutil/adler32.c5
-rw-r--r--media/ffvpx/libavutil/adler32.h7
-rw-r--r--media/ffvpx/libavutil/attributes.h8
-rw-r--r--media/ffvpx/libavutil/attributes_internal.h34
-rw-r--r--media/ffvpx/libavutil/avassert.h2
-rw-r--r--media/ffvpx/libavutil/avsscanf.c970
-rw-r--r--media/ffvpx/libavutil/avstring.c42
-rw-r--r--media/ffvpx/libavutil/avstring.h34
-rw-r--r--media/ffvpx/libavutil/avutil.h6
-rw-r--r--media/ffvpx/libavutil/base64.c15
-rw-r--r--media/ffvpx/libavutil/bprint.c39
-rw-r--r--media/ffvpx/libavutil/bprint.h78
-rw-r--r--media/ffvpx/libavutil/bswap.h2
-rw-r--r--media/ffvpx/libavutil/buffer.c137
-rw-r--r--media/ffvpx/libavutil/buffer.h53
-rw-r--r--media/ffvpx/libavutil/buffer_internal.h30
-rw-r--r--media/ffvpx/libavutil/channel_layout.c853
-rw-r--r--media/ffvpx/libavutil/channel_layout.h611
-rw-r--r--media/ffvpx/libavutil/common.h238
-rw-r--r--media/ffvpx/libavutil/cpu.c227
-rw-r--r--media/ffvpx/libavutil/cpu.h46
-rw-r--r--media/ffvpx/libavutil/cpu_internal.h12
-rw-r--r--media/ffvpx/libavutil/crc.c2
-rw-r--r--media/ffvpx/libavutil/crc.h4
-rw-r--r--media/ffvpx/libavutil/dict.c102
-rw-r--r--media/ffvpx/libavutil/dict.h127
-rw-r--r--media/ffvpx/libavutil/dict_internal.h37
-rw-r--r--media/ffvpx/libavutil/error.c7
-rw-r--r--media/ffvpx/libavutil/error.h2
-rw-r--r--media/ffvpx/libavutil/eval.c40
-rw-r--r--media/ffvpx/libavutil/eval.h31
-rw-r--r--media/ffvpx/libavutil/fftime.h1
-rw-r--r--media/ffvpx/libavutil/fifo.c368
-rw-r--r--media/ffvpx/libavutil/fifo.h275
-rw-r--r--media/ffvpx/libavutil/fixed_dsp.c10
-rw-r--r--media/ffvpx/libavutil/fixed_dsp.h3
-rw-r--r--media/ffvpx/libavutil/float_dsp.c24
-rw-r--r--media/ffvpx/libavutil/float_dsp.h1
-rw-r--r--media/ffvpx/libavutil/frame.c393
-rw-r--r--media/ffvpx/libavutil/frame.h255
-rw-r--r--media/ffvpx/libavutil/hwcontext.c112
-rw-r--r--media/ffvpx/libavutil/hwcontext.h28
-rw-r--r--media/ffvpx/libavutil/hwcontext_internal.h4
-rw-r--r--media/ffvpx/libavutil/imgutils.c123
-rw-r--r--media/ffvpx/libavutil/imgutils.h56
-rw-r--r--media/ffvpx/libavutil/integer.c16
-rw-r--r--media/ffvpx/libavutil/integer.h2
-rw-r--r--media/ffvpx/libavutil/internal.h214
-rw-r--r--media/ffvpx/libavutil/intmath.h5
-rw-r--r--media/ffvpx/libavutil/lls.c8
-rw-r--r--media/ffvpx/libavutil/lls.h3
-rw-r--r--media/ffvpx/libavutil/log.c131
-rw-r--r--media/ffvpx/libavutil/log.h57
-rw-r--r--media/ffvpx/libavutil/macros.h30
-rw-r--r--media/ffvpx/libavutil/mathematics.c12
-rw-r--r--media/ffvpx/libavutil/mathematics.h6
-rw-r--r--media/ffvpx/libavutil/mem.c120
-rw-r--r--media/ffvpx/libavutil/mem.h123
-rw-r--r--media/ffvpx/libavutil/mem_internal.h131
-rw-r--r--media/ffvpx/libavutil/moz.build2
-rw-r--r--media/ffvpx/libavutil/opt.c314
-rw-r--r--media/ffvpx/libavutil/opt.h54
-rw-r--r--media/ffvpx/libavutil/parseutils.c2
-rw-r--r--media/ffvpx/libavutil/parseutils.h24
-rw-r--r--media/ffvpx/libavutil/pixdesc.c1640
-rw-r--r--media/ffvpx/libavutil/pixdesc.h67
-rw-r--r--media/ffvpx/libavutil/pixelutils.c12
-rw-r--r--media/ffvpx/libavutil/pixelutils.h1
-rw-r--r--media/ffvpx/libavutil/pixfmt.h219
-rw-r--r--media/ffvpx/libavutil/qsort.h2
-rw-r--r--media/ffvpx/libavutil/rational.c9
-rw-r--r--media/ffvpx/libavutil/rational.h9
-rw-r--r--media/ffvpx/libavutil/samplefmt.c19
-rw-r--r--media/ffvpx/libavutil/samplefmt.h9
-rw-r--r--media/ffvpx/libavutil/slicethread.c8
-rw-r--r--media/ffvpx/libavutil/thread.h47
-rw-r--r--media/ffvpx/libavutil/threadmessage.c40
-rw-r--r--media/ffvpx/libavutil/time.c4
-rw-r--r--media/ffvpx/libavutil/timecode.c121
-rw-r--r--media/ffvpx/libavutil/timecode.h67
-rw-r--r--media/ffvpx/libavutil/timer.h19
-rw-r--r--media/ffvpx/libavutil/utils.c48
-rw-r--r--media/ffvpx/libavutil/version.h37
-rw-r--r--media/ffvpx/libavutil/video_enc_params.c80
-rw-r--r--media/ffvpx/libavutil/video_enc_params.h171
-rw-r--r--media/ffvpx/libavutil/x86/bswap.h1
-rw-r--r--media/ffvpx/libavutil/x86/cpu.c16
-rw-r--r--media/ffvpx/libavutil/x86/cpu.h1
-rw-r--r--media/ffvpx/libavutil/x86/cpuid.asm2
-rw-r--r--media/ffvpx/libavutil/x86/emms.asm2
-rw-r--r--media/ffvpx/libavutil/x86/emms.h5
-rw-r--r--media/ffvpx/libavutil/x86/fixed_dsp.asm2
-rw-r--r--media/ffvpx/libavutil/x86/fixed_dsp_init.c2
-rw-r--r--media/ffvpx/libavutil/x86/float_dsp.asm172
-rw-r--r--media/ffvpx/libavutil/x86/float_dsp_init.c7
-rw-r--r--media/ffvpx/libavutil/x86/imgutils_init.c3
-rw-r--r--media/ffvpx/libavutil/x86/intmath.h38
-rw-r--r--media/ffvpx/libavutil/x86/intreadwrite.h2
-rw-r--r--media/ffvpx/libavutil/x86/lls.asm6
-rw-r--r--media/ffvpx/libavutil/x86/lls_init.c1
-rw-r--r--media/ffvpx/libavutil/x86/pixelutils.asm60
-rw-r--r--media/ffvpx/libavutil/x86/pixelutils_init.c9
-rw-r--r--media/ffvpx/libavutil/x86/x86inc.asm99
256 files changed, 19495 insertions, 15629 deletions
diff --git a/media/ffvpx/README_MCP b/media/ffvpx/README_MCP
index 6b09b24e04..be1e680b65 100644
--- a/media/ffvpx/README_MCP
+++ b/media/ffvpx/README_MCP
@@ -1,6 +1,6 @@
This directory contains files used in Goanna builds from FFmpeg
(http://ffmpeg.org). The current files are from FFmpeg as of
-Release n4.2.7
+Release n6.0
All source files match their path from the library's source archive.
The FFmpeg project recommends to use the branch tip, however we
diff --git a/media/ffvpx/compat/atomics/win32/stdatomic.h b/media/ffvpx/compat/atomics/win32/stdatomic.h
index bb8e6e7e15..28a627bfd3 100644
--- a/media/ffvpx/compat/atomics/win32/stdatomic.h
+++ b/media/ffvpx/compat/atomics/win32/stdatomic.h
@@ -96,7 +96,7 @@ do { \
atomic_load(object)
#define atomic_exchange(object, desired) \
- InterlockedExchangePointer(object, desired);
+ InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
#define atomic_exchange_explicit(object, desired, order) \
atomic_exchange(object, desired)
diff --git a/media/ffvpx/compat/w32pthreads.h b/media/ffvpx/compat/w32pthreads.h
index 21acfd2ba1..b7a65b233b 100644
--- a/media/ffvpx/compat/w32pthreads.h
+++ b/media/ffvpx/compat/w32pthreads.h
@@ -38,11 +38,13 @@
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
+#include <time.h>
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
+#include "libavutil/fftime.h"
typedef struct pthread_t {
void *handle;
@@ -61,6 +63,9 @@ typedef CONDITION_VARIABLE pthread_cond_t;
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
+#define PTHREAD_CANCEL_ENABLE 1
+#define PTHREAD_CANCEL_DISABLE 0
+
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
{
pthread_t *h = (pthread_t*)arg;
@@ -156,10 +161,31 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
return 0;
}
+static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
+ DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
+
+ if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
+ DWORD err = GetLastError();
+ if (err == ERROR_TIMEOUT)
+ return ETIMEDOUT;
+ else
+ return EINVAL;
+ }
+ return 0;
+}
+
static inline int pthread_cond_signal(pthread_cond_t *cond)
{
WakeConditionVariable(cond);
return 0;
}
+static inline int pthread_setcancelstate(int state, int *oldstate)
+{
+ return 0;
+}
+
#endif /* COMPAT_W32PTHREADS_H */
diff --git a/media/ffvpx/libavcodec/allcodecs.c b/media/ffvpx/libavcodec/allcodecs.c
index d2f9a39ce5..e593ad19af 100644
--- a/media/ffvpx/libavcodec/allcodecs.c
+++ b/media/ffvpx/libavcodec/allcodecs.c
@@ -24,766 +24,878 @@
* Provide registration of all codecs, parsers and bitstream filters for libavcodec.
*/
+#include <stdint.h>
+#include <string.h>
+
#include "config.h"
+#include "config_components.h"
#include "libavutil/thread.h"
-#include "avcodec.h"
-#include "version.h"
-
-extern AVCodec ff_a64multi_encoder;
-extern AVCodec ff_a64multi5_encoder;
-extern AVCodec ff_aasc_decoder;
-extern AVCodec ff_aic_decoder;
-extern AVCodec ff_alias_pix_encoder;
-extern AVCodec ff_alias_pix_decoder;
-extern AVCodec ff_agm_decoder;
-extern AVCodec ff_amv_encoder;
-extern AVCodec ff_amv_decoder;
-extern AVCodec ff_anm_decoder;
-extern AVCodec ff_ansi_decoder;
-extern AVCodec ff_apng_encoder;
-extern AVCodec ff_apng_decoder;
-extern AVCodec ff_arbc_decoder;
-extern AVCodec ff_asv1_encoder;
-extern AVCodec ff_asv1_decoder;
-extern AVCodec ff_asv2_encoder;
-extern AVCodec ff_asv2_decoder;
-extern AVCodec ff_aura_decoder;
-extern AVCodec ff_aura2_decoder;
-extern AVCodec ff_avrp_encoder;
-extern AVCodec ff_avrp_decoder;
-extern AVCodec ff_avrn_decoder;
-extern AVCodec ff_avs_decoder;
-extern AVCodec ff_avui_encoder;
-extern AVCodec ff_avui_decoder;
-extern AVCodec ff_ayuv_encoder;
-extern AVCodec ff_ayuv_decoder;
-extern AVCodec ff_bethsoftvid_decoder;
-extern AVCodec ff_bfi_decoder;
-extern AVCodec ff_bink_decoder;
-extern AVCodec ff_bitpacked_decoder;
-extern AVCodec ff_bmp_encoder;
-extern AVCodec ff_bmp_decoder;
-extern AVCodec ff_bmv_video_decoder;
-extern AVCodec ff_brender_pix_decoder;
-extern AVCodec ff_c93_decoder;
-extern AVCodec ff_cavs_decoder;
-extern AVCodec ff_cdgraphics_decoder;
-extern AVCodec ff_cdxl_decoder;
-extern AVCodec ff_cfhd_decoder;
-extern AVCodec ff_cinepak_encoder;
-extern AVCodec ff_cinepak_decoder;
-extern AVCodec ff_clearvideo_decoder;
-extern AVCodec ff_cljr_encoder;
-extern AVCodec ff_cljr_decoder;
-extern AVCodec ff_cllc_decoder;
-extern AVCodec ff_comfortnoise_encoder;
-extern AVCodec ff_comfortnoise_decoder;
-extern AVCodec ff_cpia_decoder;
-extern AVCodec ff_cscd_decoder;
-extern AVCodec ff_cyuv_decoder;
-extern AVCodec ff_dds_decoder;
-extern AVCodec ff_dfa_decoder;
-extern AVCodec ff_dirac_decoder;
-extern AVCodec ff_dnxhd_encoder;
-extern AVCodec ff_dnxhd_decoder;
-extern AVCodec ff_dpx_encoder;
-extern AVCodec ff_dpx_decoder;
-extern AVCodec ff_dsicinvideo_decoder;
-extern AVCodec ff_dvaudio_decoder;
-extern AVCodec ff_dvvideo_encoder;
-extern AVCodec ff_dvvideo_decoder;
-extern AVCodec ff_dxa_decoder;
-extern AVCodec ff_dxtory_decoder;
-extern AVCodec ff_dxv_decoder;
-extern AVCodec ff_eacmv_decoder;
-extern AVCodec ff_eamad_decoder;
-extern AVCodec ff_eatgq_decoder;
-extern AVCodec ff_eatgv_decoder;
-extern AVCodec ff_eatqi_decoder;
-extern AVCodec ff_eightbps_decoder;
-extern AVCodec ff_eightsvx_exp_decoder;
-extern AVCodec ff_eightsvx_fib_decoder;
-extern AVCodec ff_escape124_decoder;
-extern AVCodec ff_escape130_decoder;
-extern AVCodec ff_exr_decoder;
-extern AVCodec ff_ffv1_encoder;
-extern AVCodec ff_ffv1_decoder;
-extern AVCodec ff_ffvhuff_encoder;
-extern AVCodec ff_ffvhuff_decoder;
-extern AVCodec ff_fic_decoder;
-extern AVCodec ff_fits_encoder;
-extern AVCodec ff_fits_decoder;
-extern AVCodec ff_flashsv_encoder;
-extern AVCodec ff_flashsv_decoder;
-extern AVCodec ff_flashsv2_encoder;
-extern AVCodec ff_flashsv2_decoder;
-extern AVCodec ff_flic_decoder;
-extern AVCodec ff_flv_encoder;
-extern AVCodec ff_flv_decoder;
-extern AVCodec ff_fmvc_decoder;
-extern AVCodec ff_fourxm_decoder;
-extern AVCodec ff_fraps_decoder;
-extern AVCodec ff_frwu_decoder;
-extern AVCodec ff_g2m_decoder;
-extern AVCodec ff_gdv_decoder;
-extern AVCodec ff_gif_encoder;
-extern AVCodec ff_gif_decoder;
-extern AVCodec ff_h261_encoder;
-extern AVCodec ff_h261_decoder;
-extern AVCodec ff_h263_encoder;
-extern AVCodec ff_h263_decoder;
-extern AVCodec ff_h263i_decoder;
-extern AVCodec ff_h263p_encoder;
-extern AVCodec ff_h263p_decoder;
-extern AVCodec ff_h263_v4l2m2m_decoder;
-extern AVCodec ff_h264_decoder;
-extern AVCodec ff_h264_crystalhd_decoder;
-extern AVCodec ff_h264_v4l2m2m_decoder;
-extern AVCodec ff_h264_mediacodec_decoder;
-extern AVCodec ff_h264_mmal_decoder;
-extern AVCodec ff_h264_qsv_decoder;
-extern AVCodec ff_h264_rkmpp_decoder;
-extern AVCodec ff_hap_encoder;
-extern AVCodec ff_hap_decoder;
-extern AVCodec ff_hevc_decoder;
-extern AVCodec ff_hevc_qsv_decoder;
-extern AVCodec ff_hevc_rkmpp_decoder;
-extern AVCodec ff_hevc_v4l2m2m_decoder;
-extern AVCodec ff_hnm4_video_decoder;
-extern AVCodec ff_hq_hqa_decoder;
-extern AVCodec ff_hqx_decoder;
-extern AVCodec ff_huffyuv_encoder;
-extern AVCodec ff_huffyuv_decoder;
-extern AVCodec ff_hymt_decoder;
-extern AVCodec ff_idcin_decoder;
-extern AVCodec ff_iff_ilbm_decoder;
-extern AVCodec ff_imm4_decoder;
-extern AVCodec ff_indeo2_decoder;
-extern AVCodec ff_indeo3_decoder;
-extern AVCodec ff_indeo4_decoder;
-extern AVCodec ff_indeo5_decoder;
-extern AVCodec ff_interplay_video_decoder;
-extern AVCodec ff_jpeg2000_encoder;
-extern AVCodec ff_jpeg2000_decoder;
-extern AVCodec ff_jpegls_encoder;
-extern AVCodec ff_jpegls_decoder;
-extern AVCodec ff_jv_decoder;
-extern AVCodec ff_kgv1_decoder;
-extern AVCodec ff_kmvc_decoder;
-extern AVCodec ff_lagarith_decoder;
-extern AVCodec ff_ljpeg_encoder;
-extern AVCodec ff_loco_decoder;
-extern AVCodec ff_lscr_decoder;
-extern AVCodec ff_m101_decoder;
-extern AVCodec ff_magicyuv_encoder;
-extern AVCodec ff_magicyuv_decoder;
-extern AVCodec ff_mdec_decoder;
-extern AVCodec ff_mimic_decoder;
-extern AVCodec ff_mjpeg_encoder;
-extern AVCodec ff_mjpeg_decoder;
-extern AVCodec ff_mjpegb_decoder;
-extern AVCodec ff_mmvideo_decoder;
-extern AVCodec ff_motionpixels_decoder;
-extern AVCodec ff_mpeg1video_encoder;
-extern AVCodec ff_mpeg1video_decoder;
-extern AVCodec ff_mpeg2video_encoder;
-extern AVCodec ff_mpeg2video_decoder;
-extern AVCodec ff_mpeg4_encoder;
-extern AVCodec ff_mpeg4_decoder;
-extern AVCodec ff_mpeg4_crystalhd_decoder;
-extern AVCodec ff_mpeg4_v4l2m2m_decoder;
-extern AVCodec ff_mpeg4_mmal_decoder;
-extern AVCodec ff_mpegvideo_decoder;
-extern AVCodec ff_mpeg1_v4l2m2m_decoder;
-extern AVCodec ff_mpeg2_mmal_decoder;
-extern AVCodec ff_mpeg2_crystalhd_decoder;
-extern AVCodec ff_mpeg2_v4l2m2m_decoder;
-extern AVCodec ff_mpeg2_qsv_decoder;
-extern AVCodec ff_mpeg2_mediacodec_decoder;
-extern AVCodec ff_msa1_decoder;
-extern AVCodec ff_mscc_decoder;
-extern AVCodec ff_msmpeg4v1_decoder;
-extern AVCodec ff_msmpeg4v2_encoder;
-extern AVCodec ff_msmpeg4v2_decoder;
-extern AVCodec ff_msmpeg4v3_encoder;
-extern AVCodec ff_msmpeg4v3_decoder;
-extern AVCodec ff_msmpeg4_crystalhd_decoder;
-extern AVCodec ff_msrle_decoder;
-extern AVCodec ff_mss1_decoder;
-extern AVCodec ff_mss2_decoder;
-extern AVCodec ff_msvideo1_encoder;
-extern AVCodec ff_msvideo1_decoder;
-extern AVCodec ff_mszh_decoder;
-extern AVCodec ff_mts2_decoder;
-extern AVCodec ff_mvc1_decoder;
-extern AVCodec ff_mvc2_decoder;
-extern AVCodec ff_mwsc_decoder;
-extern AVCodec ff_mxpeg_decoder;
-extern AVCodec ff_nuv_decoder;
-extern AVCodec ff_paf_video_decoder;
-extern AVCodec ff_pam_encoder;
-extern AVCodec ff_pam_decoder;
-extern AVCodec ff_pbm_encoder;
-extern AVCodec ff_pbm_decoder;
-extern AVCodec ff_pcx_encoder;
-extern AVCodec ff_pcx_decoder;
-extern AVCodec ff_pgm_encoder;
-extern AVCodec ff_pgm_decoder;
-extern AVCodec ff_pgmyuv_encoder;
-extern AVCodec ff_pgmyuv_decoder;
-extern AVCodec ff_pictor_decoder;
-extern AVCodec ff_pixlet_decoder;
-extern AVCodec ff_png_encoder;
-extern AVCodec ff_png_decoder;
-extern AVCodec ff_ppm_encoder;
-extern AVCodec ff_ppm_decoder;
-extern AVCodec ff_prores_encoder;
-extern AVCodec ff_prores_decoder;
-extern AVCodec ff_prores_aw_encoder;
-extern AVCodec ff_prores_ks_encoder;
-extern AVCodec ff_prosumer_decoder;
-extern AVCodec ff_psd_decoder;
-extern AVCodec ff_ptx_decoder;
-extern AVCodec ff_qdraw_decoder;
-extern AVCodec ff_qpeg_decoder;
-extern AVCodec ff_qtrle_encoder;
-extern AVCodec ff_qtrle_decoder;
-extern AVCodec ff_r10k_encoder;
-extern AVCodec ff_r10k_decoder;
-extern AVCodec ff_r210_encoder;
-extern AVCodec ff_r210_decoder;
-extern AVCodec ff_rasc_decoder;
-extern AVCodec ff_rawvideo_encoder;
-extern AVCodec ff_rawvideo_decoder;
-extern AVCodec ff_rl2_decoder;
-extern AVCodec ff_roq_encoder;
-extern AVCodec ff_roq_decoder;
-extern AVCodec ff_rpza_decoder;
-extern AVCodec ff_rscc_decoder;
-extern AVCodec ff_rv10_encoder;
-extern AVCodec ff_rv10_decoder;
-extern AVCodec ff_rv20_encoder;
-extern AVCodec ff_rv20_decoder;
-extern AVCodec ff_rv30_decoder;
-extern AVCodec ff_rv40_decoder;
-extern AVCodec ff_s302m_encoder;
-extern AVCodec ff_s302m_decoder;
-extern AVCodec ff_sanm_decoder;
-extern AVCodec ff_scpr_decoder;
-extern AVCodec ff_screenpresso_decoder;
-extern AVCodec ff_sdx2_dpcm_decoder;
-extern AVCodec ff_sgi_encoder;
-extern AVCodec ff_sgi_decoder;
-extern AVCodec ff_sgirle_decoder;
-extern AVCodec ff_sheervideo_decoder;
-extern AVCodec ff_smacker_decoder;
-extern AVCodec ff_smc_decoder;
-extern AVCodec ff_smvjpeg_decoder;
-extern AVCodec ff_snow_encoder;
-extern AVCodec ff_snow_decoder;
-extern AVCodec ff_sp5x_decoder;
-extern AVCodec ff_speedhq_decoder;
-extern AVCodec ff_srgc_decoder;
-extern AVCodec ff_sunrast_encoder;
-extern AVCodec ff_sunrast_decoder;
-extern AVCodec ff_svq1_encoder;
-extern AVCodec ff_svq1_decoder;
-extern AVCodec ff_svq3_decoder;
-extern AVCodec ff_targa_encoder;
-extern AVCodec ff_targa_decoder;
-extern AVCodec ff_targa_y216_decoder;
-extern AVCodec ff_tdsc_decoder;
-extern AVCodec ff_theora_decoder;
-extern AVCodec ff_thp_decoder;
-extern AVCodec ff_tiertexseqvideo_decoder;
-extern AVCodec ff_tiff_encoder;
-extern AVCodec ff_tiff_decoder;
-extern AVCodec ff_tmv_decoder;
-extern AVCodec ff_truemotion1_decoder;
-extern AVCodec ff_truemotion2_decoder;
-extern AVCodec ff_truemotion2rt_decoder;
-extern AVCodec ff_tscc_decoder;
-extern AVCodec ff_tscc2_decoder;
-extern AVCodec ff_txd_decoder;
-extern AVCodec ff_ulti_decoder;
-extern AVCodec ff_utvideo_encoder;
-extern AVCodec ff_utvideo_decoder;
-extern AVCodec ff_v210_encoder;
-extern AVCodec ff_v210_decoder;
-extern AVCodec ff_v210x_decoder;
-extern AVCodec ff_v308_encoder;
-extern AVCodec ff_v308_decoder;
-extern AVCodec ff_v408_encoder;
-extern AVCodec ff_v408_decoder;
-extern AVCodec ff_v410_encoder;
-extern AVCodec ff_v410_decoder;
-extern AVCodec ff_vb_decoder;
-extern AVCodec ff_vble_decoder;
-extern AVCodec ff_vc1_decoder;
-extern AVCodec ff_vc1_crystalhd_decoder;
-extern AVCodec ff_vc1image_decoder;
-extern AVCodec ff_vc1_mmal_decoder;
-extern AVCodec ff_vc1_qsv_decoder;
-extern AVCodec ff_vc1_v4l2m2m_decoder;
-extern AVCodec ff_vc2_encoder;
-extern AVCodec ff_vcr1_decoder;
-extern AVCodec ff_vmdvideo_decoder;
-extern AVCodec ff_vmnc_decoder;
-extern AVCodec ff_vp3_decoder;
-extern AVCodec ff_vp4_decoder;
-extern AVCodec ff_vp5_decoder;
-extern AVCodec ff_vp6_decoder;
-extern AVCodec ff_vp6a_decoder;
-extern AVCodec ff_vp6f_decoder;
-extern AVCodec ff_vp7_decoder;
-extern AVCodec ff_vp8_decoder;
-extern AVCodec ff_vp8_rkmpp_decoder;
-extern AVCodec ff_vp8_v4l2m2m_decoder;
-extern AVCodec ff_vp9_decoder;
-extern AVCodec ff_vp9_rkmpp_decoder;
-extern AVCodec ff_vp9_v4l2m2m_decoder;
-extern AVCodec ff_vqa_decoder;
-extern AVCodec ff_webp_decoder;
-extern AVCodec ff_wcmv_decoder;
-extern AVCodec ff_wrapped_avframe_encoder;
-extern AVCodec ff_wrapped_avframe_decoder;
-extern AVCodec ff_wmv1_encoder;
-extern AVCodec ff_wmv1_decoder;
-extern AVCodec ff_wmv2_encoder;
-extern AVCodec ff_wmv2_decoder;
-extern AVCodec ff_wmv3_decoder;
-extern AVCodec ff_wmv3_crystalhd_decoder;
-extern AVCodec ff_wmv3image_decoder;
-extern AVCodec ff_wnv1_decoder;
-extern AVCodec ff_xan_wc3_decoder;
-extern AVCodec ff_xan_wc4_decoder;
-extern AVCodec ff_xbm_encoder;
-extern AVCodec ff_xbm_decoder;
-extern AVCodec ff_xface_encoder;
-extern AVCodec ff_xface_decoder;
-extern AVCodec ff_xl_decoder;
-extern AVCodec ff_xpm_decoder;
-extern AVCodec ff_xwd_encoder;
-extern AVCodec ff_xwd_decoder;
-extern AVCodec ff_y41p_encoder;
-extern AVCodec ff_y41p_decoder;
-extern AVCodec ff_ylc_decoder;
-extern AVCodec ff_yop_decoder;
-extern AVCodec ff_yuv4_encoder;
-extern AVCodec ff_yuv4_decoder;
-extern AVCodec ff_zero12v_decoder;
-extern AVCodec ff_zerocodec_decoder;
-extern AVCodec ff_zlib_encoder;
-extern AVCodec ff_zlib_decoder;
-extern AVCodec ff_zmbv_encoder;
-extern AVCodec ff_zmbv_decoder;
+#include "codec.h"
+#include "codec_id.h"
+#include "codec_internal.h"
+
+extern const FFCodec ff_a64multi_encoder;
+extern const FFCodec ff_a64multi5_encoder;
+extern const FFCodec ff_aasc_decoder;
+extern const FFCodec ff_aic_decoder;
+extern const FFCodec ff_alias_pix_encoder;
+extern const FFCodec ff_alias_pix_decoder;
+extern const FFCodec ff_agm_decoder;
+extern const FFCodec ff_amv_encoder;
+extern const FFCodec ff_amv_decoder;
+extern const FFCodec ff_anm_decoder;
+extern const FFCodec ff_ansi_decoder;
+extern const FFCodec ff_apng_encoder;
+extern const FFCodec ff_apng_decoder;
+extern const FFCodec ff_arbc_decoder;
+extern const FFCodec ff_argo_decoder;
+extern const FFCodec ff_asv1_encoder;
+extern const FFCodec ff_asv1_decoder;
+extern const FFCodec ff_asv2_encoder;
+extern const FFCodec ff_asv2_decoder;
+extern const FFCodec ff_aura_decoder;
+extern const FFCodec ff_aura2_decoder;
+extern const FFCodec ff_avrp_encoder;
+extern const FFCodec ff_avrp_decoder;
+extern const FFCodec ff_avrn_decoder;
+extern const FFCodec ff_avs_decoder;
+extern const FFCodec ff_avui_encoder;
+extern const FFCodec ff_avui_decoder;
+#if FF_API_AYUV_CODECID
+extern const FFCodec ff_ayuv_encoder;
+extern const FFCodec ff_ayuv_decoder;
+#endif
+extern const FFCodec ff_bethsoftvid_decoder;
+extern const FFCodec ff_bfi_decoder;
+extern const FFCodec ff_bink_decoder;
+extern const FFCodec ff_bitpacked_decoder;
+extern const FFCodec ff_bitpacked_encoder;
+extern const FFCodec ff_bmp_encoder;
+extern const FFCodec ff_bmp_decoder;
+extern const FFCodec ff_bmv_video_decoder;
+extern const FFCodec ff_brender_pix_decoder;
+extern const FFCodec ff_c93_decoder;
+extern const FFCodec ff_cavs_decoder;
+extern const FFCodec ff_cdgraphics_decoder;
+extern const FFCodec ff_cdtoons_decoder;
+extern const FFCodec ff_cdxl_decoder;
+extern const FFCodec ff_cfhd_encoder;
+extern const FFCodec ff_cfhd_decoder;
+extern const FFCodec ff_cinepak_encoder;
+extern const FFCodec ff_cinepak_decoder;
+extern const FFCodec ff_clearvideo_decoder;
+extern const FFCodec ff_cljr_encoder;
+extern const FFCodec ff_cljr_decoder;
+extern const FFCodec ff_cllc_decoder;
+extern const FFCodec ff_comfortnoise_encoder;
+extern const FFCodec ff_comfortnoise_decoder;
+extern const FFCodec ff_cpia_decoder;
+extern const FFCodec ff_cri_decoder;
+extern const FFCodec ff_cscd_decoder;
+extern const FFCodec ff_cyuv_decoder;
+extern const FFCodec ff_dds_decoder;
+extern const FFCodec ff_dfa_decoder;
+extern const FFCodec ff_dirac_decoder;
+extern const FFCodec ff_dnxhd_encoder;
+extern const FFCodec ff_dnxhd_decoder;
+extern const FFCodec ff_dpx_encoder;
+extern const FFCodec ff_dpx_decoder;
+extern const FFCodec ff_dsicinvideo_decoder;
+extern const FFCodec ff_dvaudio_decoder;
+extern const FFCodec ff_dvvideo_encoder;
+extern const FFCodec ff_dvvideo_decoder;
+extern const FFCodec ff_dxa_decoder;
+extern const FFCodec ff_dxtory_decoder;
+extern const FFCodec ff_dxv_decoder;
+extern const FFCodec ff_eacmv_decoder;
+extern const FFCodec ff_eamad_decoder;
+extern const FFCodec ff_eatgq_decoder;
+extern const FFCodec ff_eatgv_decoder;
+extern const FFCodec ff_eatqi_decoder;
+extern const FFCodec ff_eightbps_decoder;
+extern const FFCodec ff_eightsvx_exp_decoder;
+extern const FFCodec ff_eightsvx_fib_decoder;
+extern const FFCodec ff_escape124_decoder;
+extern const FFCodec ff_escape130_decoder;
+extern const FFCodec ff_exr_encoder;
+extern const FFCodec ff_exr_decoder;
+extern const FFCodec ff_ffv1_encoder;
+extern const FFCodec ff_ffv1_decoder;
+extern const FFCodec ff_ffvhuff_encoder;
+extern const FFCodec ff_ffvhuff_decoder;
+extern const FFCodec ff_fic_decoder;
+extern const FFCodec ff_fits_encoder;
+extern const FFCodec ff_fits_decoder;
+extern const FFCodec ff_flashsv_encoder;
+extern const FFCodec ff_flashsv_decoder;
+extern const FFCodec ff_flashsv2_encoder;
+extern const FFCodec ff_flashsv2_decoder;
+extern const FFCodec ff_flic_decoder;
+extern const FFCodec ff_flv_encoder;
+extern const FFCodec ff_flv_decoder;
+extern const FFCodec ff_fmvc_decoder;
+extern const FFCodec ff_fourxm_decoder;
+extern const FFCodec ff_fraps_decoder;
+extern const FFCodec ff_frwu_decoder;
+extern const FFCodec ff_g2m_decoder;
+extern const FFCodec ff_gdv_decoder;
+extern const FFCodec ff_gem_decoder;
+extern const FFCodec ff_gif_encoder;
+extern const FFCodec ff_gif_decoder;
+extern const FFCodec ff_h261_encoder;
+extern const FFCodec ff_h261_decoder;
+extern const FFCodec ff_h263_encoder;
+extern const FFCodec ff_h263_decoder;
+extern const FFCodec ff_h263i_decoder;
+extern const FFCodec ff_h263p_encoder;
+extern const FFCodec ff_h263p_decoder;
+extern const FFCodec ff_h263_v4l2m2m_decoder;
+extern const FFCodec ff_h264_decoder;
+extern const FFCodec ff_h264_crystalhd_decoder;
+extern const FFCodec ff_h264_v4l2m2m_decoder;
+extern const FFCodec ff_h264_mediacodec_decoder;
+extern const FFCodec ff_h264_mediacodec_encoder;
+extern const FFCodec ff_h264_mmal_decoder;
+extern const FFCodec ff_h264_qsv_decoder;
+extern const FFCodec ff_h264_rkmpp_decoder;
+extern const FFCodec ff_hap_encoder;
+extern const FFCodec ff_hap_decoder;
+extern const FFCodec ff_hevc_decoder;
+extern const FFCodec ff_hevc_qsv_decoder;
+extern const FFCodec ff_hevc_rkmpp_decoder;
+extern const FFCodec ff_hevc_v4l2m2m_decoder;
+extern const FFCodec ff_hnm4_video_decoder;
+extern const FFCodec ff_hq_hqa_decoder;
+extern const FFCodec ff_hqx_decoder;
+extern const FFCodec ff_huffyuv_encoder;
+extern const FFCodec ff_huffyuv_decoder;
+extern const FFCodec ff_hymt_decoder;
+extern const FFCodec ff_idcin_decoder;
+extern const FFCodec ff_iff_ilbm_decoder;
+extern const FFCodec ff_imm4_decoder;
+extern const FFCodec ff_imm5_decoder;
+extern const FFCodec ff_indeo2_decoder;
+extern const FFCodec ff_indeo3_decoder;
+extern const FFCodec ff_indeo4_decoder;
+extern const FFCodec ff_indeo5_decoder;
+extern const FFCodec ff_interplay_video_decoder;
+extern const FFCodec ff_ipu_decoder;
+extern const FFCodec ff_jpeg2000_encoder;
+extern const FFCodec ff_jpeg2000_decoder;
+extern const FFCodec ff_jpegls_encoder;
+extern const FFCodec ff_jpegls_decoder;
+extern const FFCodec ff_jv_decoder;
+extern const FFCodec ff_kgv1_decoder;
+extern const FFCodec ff_kmvc_decoder;
+extern const FFCodec ff_lagarith_decoder;
+extern const FFCodec ff_ljpeg_encoder;
+extern const FFCodec ff_loco_decoder;
+extern const FFCodec ff_lscr_decoder;
+extern const FFCodec ff_m101_decoder;
+extern const FFCodec ff_magicyuv_encoder;
+extern const FFCodec ff_magicyuv_decoder;
+extern const FFCodec ff_mdec_decoder;
+extern const FFCodec ff_media100_decoder;
+extern const FFCodec ff_mimic_decoder;
+extern const FFCodec ff_mjpeg_encoder;
+extern const FFCodec ff_mjpeg_decoder;
+extern const FFCodec ff_mjpegb_decoder;
+extern const FFCodec ff_mmvideo_decoder;
+extern const FFCodec ff_mobiclip_decoder;
+extern const FFCodec ff_motionpixels_decoder;
+extern const FFCodec ff_mpeg1video_encoder;
+extern const FFCodec ff_mpeg1video_decoder;
+extern const FFCodec ff_mpeg2video_encoder;
+extern const FFCodec ff_mpeg2video_decoder;
+extern const FFCodec ff_mpeg4_encoder;
+extern const FFCodec ff_mpeg4_decoder;
+extern const FFCodec ff_mpeg4_crystalhd_decoder;
+extern const FFCodec ff_mpeg4_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg4_mmal_decoder;
+extern const FFCodec ff_mpegvideo_decoder;
+extern const FFCodec ff_mpeg1_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg2_mmal_decoder;
+extern const FFCodec ff_mpeg2_crystalhd_decoder;
+extern const FFCodec ff_mpeg2_v4l2m2m_decoder;
+extern const FFCodec ff_mpeg2_qsv_decoder;
+extern const FFCodec ff_mpeg2_mediacodec_decoder;
+extern const FFCodec ff_msa1_decoder;
+extern const FFCodec ff_mscc_decoder;
+extern const FFCodec ff_msmpeg4v1_decoder;
+extern const FFCodec ff_msmpeg4v2_encoder;
+extern const FFCodec ff_msmpeg4v2_decoder;
+extern const FFCodec ff_msmpeg4v3_encoder;
+extern const FFCodec ff_msmpeg4v3_decoder;
+extern const FFCodec ff_msmpeg4_crystalhd_decoder;
+extern const FFCodec ff_msp2_decoder;
+extern const FFCodec ff_msrle_decoder;
+extern const FFCodec ff_mss1_decoder;
+extern const FFCodec ff_mss2_decoder;
+extern const FFCodec ff_msvideo1_encoder;
+extern const FFCodec ff_msvideo1_decoder;
+extern const FFCodec ff_mszh_decoder;
+extern const FFCodec ff_mts2_decoder;
+extern const FFCodec ff_mv30_decoder;
+extern const FFCodec ff_mvc1_decoder;
+extern const FFCodec ff_mvc2_decoder;
+extern const FFCodec ff_mvdv_decoder;
+extern const FFCodec ff_mvha_decoder;
+extern const FFCodec ff_mwsc_decoder;
+extern const FFCodec ff_mxpeg_decoder;
+extern const FFCodec ff_notchlc_decoder;
+extern const FFCodec ff_nuv_decoder;
+extern const FFCodec ff_paf_video_decoder;
+extern const FFCodec ff_pam_encoder;
+extern const FFCodec ff_pam_decoder;
+extern const FFCodec ff_pbm_encoder;
+extern const FFCodec ff_pbm_decoder;
+extern const FFCodec ff_pcx_encoder;
+extern const FFCodec ff_pcx_decoder;
+extern const FFCodec ff_pfm_encoder;
+extern const FFCodec ff_pfm_decoder;
+extern const FFCodec ff_pgm_encoder;
+extern const FFCodec ff_pgm_decoder;
+extern const FFCodec ff_pgmyuv_encoder;
+extern const FFCodec ff_pgmyuv_decoder;
+extern const FFCodec ff_pgx_decoder;
+extern const FFCodec ff_phm_encoder;
+extern const FFCodec ff_phm_decoder;
+extern const FFCodec ff_photocd_decoder;
+extern const FFCodec ff_pictor_decoder;
+extern const FFCodec ff_pixlet_decoder;
+extern const FFCodec ff_png_encoder;
+extern const FFCodec ff_png_decoder;
+extern const FFCodec ff_ppm_encoder;
+extern const FFCodec ff_ppm_decoder;
+extern const FFCodec ff_prores_encoder;
+extern const FFCodec ff_prores_decoder;
+extern const FFCodec ff_prores_aw_encoder;
+extern const FFCodec ff_prores_ks_encoder;
+extern const FFCodec ff_prosumer_decoder;
+extern const FFCodec ff_psd_decoder;
+extern const FFCodec ff_ptx_decoder;
+extern const FFCodec ff_qdraw_decoder;
+extern const FFCodec ff_qoi_encoder;
+extern const FFCodec ff_qoi_decoder;
+extern const FFCodec ff_qpeg_decoder;
+extern const FFCodec ff_qtrle_encoder;
+extern const FFCodec ff_qtrle_decoder;
+extern const FFCodec ff_r10k_encoder;
+extern const FFCodec ff_r10k_decoder;
+extern const FFCodec ff_r210_encoder;
+extern const FFCodec ff_r210_decoder;
+extern const FFCodec ff_rasc_decoder;
+extern const FFCodec ff_rawvideo_encoder;
+extern const FFCodec ff_rawvideo_decoder;
+extern const FFCodec ff_rka_decoder;
+extern const FFCodec ff_rl2_decoder;
+extern const FFCodec ff_roq_encoder;
+extern const FFCodec ff_roq_decoder;
+extern const FFCodec ff_rpza_encoder;
+extern const FFCodec ff_rpza_decoder;
+extern const FFCodec ff_rscc_decoder;
+extern const FFCodec ff_rv10_encoder;
+extern const FFCodec ff_rv10_decoder;
+extern const FFCodec ff_rv20_encoder;
+extern const FFCodec ff_rv20_decoder;
+extern const FFCodec ff_rv30_decoder;
+extern const FFCodec ff_rv40_decoder;
+extern const FFCodec ff_s302m_encoder;
+extern const FFCodec ff_s302m_decoder;
+extern const FFCodec ff_sanm_decoder;
+extern const FFCodec ff_scpr_decoder;
+extern const FFCodec ff_screenpresso_decoder;
+extern const FFCodec ff_sga_decoder;
+extern const FFCodec ff_sgi_encoder;
+extern const FFCodec ff_sgi_decoder;
+extern const FFCodec ff_sgirle_decoder;
+extern const FFCodec ff_sheervideo_decoder;
+extern const FFCodec ff_simbiosis_imx_decoder;
+extern const FFCodec ff_smacker_decoder;
+extern const FFCodec ff_smc_encoder;
+extern const FFCodec ff_smc_decoder;
+extern const FFCodec ff_smvjpeg_decoder;
+extern const FFCodec ff_snow_encoder;
+extern const FFCodec ff_snow_decoder;
+extern const FFCodec ff_sp5x_decoder;
+extern const FFCodec ff_speedhq_decoder;
+extern const FFCodec ff_speedhq_encoder;
+extern const FFCodec ff_speex_decoder;
+extern const FFCodec ff_srgc_decoder;
+extern const FFCodec ff_sunrast_encoder;
+extern const FFCodec ff_sunrast_decoder;
+extern const FFCodec ff_svq1_encoder;
+extern const FFCodec ff_svq1_decoder;
+extern const FFCodec ff_svq3_decoder;
+extern const FFCodec ff_targa_encoder;
+extern const FFCodec ff_targa_decoder;
+extern const FFCodec ff_targa_y216_decoder;
+extern const FFCodec ff_tdsc_decoder;
+extern const FFCodec ff_theora_decoder;
+extern const FFCodec ff_thp_decoder;
+extern const FFCodec ff_tiertexseqvideo_decoder;
+extern const FFCodec ff_tiff_encoder;
+extern const FFCodec ff_tiff_decoder;
+extern const FFCodec ff_tmv_decoder;
+extern const FFCodec ff_truemotion1_decoder;
+extern const FFCodec ff_truemotion2_decoder;
+extern const FFCodec ff_truemotion2rt_decoder;
+extern const FFCodec ff_tscc_decoder;
+extern const FFCodec ff_tscc2_decoder;
+extern const FFCodec ff_txd_decoder;
+extern const FFCodec ff_ulti_decoder;
+extern const FFCodec ff_utvideo_encoder;
+extern const FFCodec ff_utvideo_decoder;
+extern const FFCodec ff_v210_encoder;
+extern const FFCodec ff_v210_decoder;
+extern const FFCodec ff_v210x_decoder;
+extern const FFCodec ff_v308_encoder;
+extern const FFCodec ff_v308_decoder;
+extern const FFCodec ff_v408_encoder;
+extern const FFCodec ff_v408_decoder;
+extern const FFCodec ff_v410_encoder;
+extern const FFCodec ff_v410_decoder;
+extern const FFCodec ff_vb_decoder;
+extern const FFCodec ff_vbn_encoder;
+extern const FFCodec ff_vbn_decoder;
+extern const FFCodec ff_vble_decoder;
+extern const FFCodec ff_vc1_decoder;
+extern const FFCodec ff_vc1_crystalhd_decoder;
+extern const FFCodec ff_vc1image_decoder;
+extern const FFCodec ff_vc1_mmal_decoder;
+extern const FFCodec ff_vc1_qsv_decoder;
+extern const FFCodec ff_vc1_v4l2m2m_decoder;
+extern const FFCodec ff_vc2_encoder;
+extern const FFCodec ff_vcr1_decoder;
+extern const FFCodec ff_vmdvideo_decoder;
+extern const FFCodec ff_vmnc_decoder;
+extern const FFCodec ff_vp3_decoder;
+extern const FFCodec ff_vp4_decoder;
+extern const FFCodec ff_vp5_decoder;
+extern const FFCodec ff_vp6_decoder;
+extern const FFCodec ff_vp6a_decoder;
+extern const FFCodec ff_vp6f_decoder;
+extern const FFCodec ff_vp7_decoder;
+extern const FFCodec ff_vp8_decoder;
+extern const FFCodec ff_vp8_rkmpp_decoder;
+extern const FFCodec ff_vp8_v4l2m2m_decoder;
+extern const FFCodec ff_vp9_decoder;
+extern const FFCodec ff_vp9_rkmpp_decoder;
+extern const FFCodec ff_vp9_v4l2m2m_decoder;
+extern const FFCodec ff_vqa_decoder;
+extern const FFCodec ff_vqc_decoder;
+extern const FFCodec ff_wbmp_decoder;
+extern const FFCodec ff_wbmp_encoder;
+extern const FFCodec ff_webp_decoder;
+extern const FFCodec ff_wcmv_decoder;
+extern const FFCodec ff_wrapped_avframe_encoder;
+extern const FFCodec ff_wrapped_avframe_decoder;
+extern const FFCodec ff_wmv1_encoder;
+extern const FFCodec ff_wmv1_decoder;
+extern const FFCodec ff_wmv2_encoder;
+extern const FFCodec ff_wmv2_decoder;
+extern const FFCodec ff_wmv3_decoder;
+extern const FFCodec ff_wmv3_crystalhd_decoder;
+extern const FFCodec ff_wmv3image_decoder;
+extern const FFCodec ff_wnv1_decoder;
+extern const FFCodec ff_xan_wc3_decoder;
+extern const FFCodec ff_xan_wc4_decoder;
+extern const FFCodec ff_xbm_encoder;
+extern const FFCodec ff_xbm_decoder;
+extern const FFCodec ff_xface_encoder;
+extern const FFCodec ff_xface_decoder;
+extern const FFCodec ff_xl_decoder;
+extern const FFCodec ff_xpm_decoder;
+extern const FFCodec ff_xwd_encoder;
+extern const FFCodec ff_xwd_decoder;
+extern const FFCodec ff_y41p_encoder;
+extern const FFCodec ff_y41p_decoder;
+extern const FFCodec ff_ylc_decoder;
+extern const FFCodec ff_yop_decoder;
+extern const FFCodec ff_yuv4_encoder;
+extern const FFCodec ff_yuv4_decoder;
+extern const FFCodec ff_zero12v_decoder;
+extern const FFCodec ff_zerocodec_decoder;
+extern const FFCodec ff_zlib_encoder;
+extern const FFCodec ff_zlib_decoder;
+extern const FFCodec ff_zmbv_encoder;
+extern const FFCodec ff_zmbv_decoder;
/* audio codecs */
-extern AVCodec ff_aac_encoder;
-extern AVCodec ff_aac_decoder;
-extern AVCodec ff_aac_fixed_decoder;
-extern AVCodec ff_aac_latm_decoder;
-extern AVCodec ff_ac3_encoder;
-extern AVCodec ff_ac3_decoder;
-extern AVCodec ff_ac3_fixed_encoder;
-extern AVCodec ff_ac3_fixed_decoder;
-extern AVCodec ff_alac_encoder;
-extern AVCodec ff_alac_decoder;
-extern AVCodec ff_als_decoder;
-extern AVCodec ff_amrnb_decoder;
-extern AVCodec ff_amrwb_decoder;
-extern AVCodec ff_ape_decoder;
-extern AVCodec ff_aptx_encoder;
-extern AVCodec ff_aptx_decoder;
-extern AVCodec ff_aptx_hd_encoder;
-extern AVCodec ff_aptx_hd_decoder;
-extern AVCodec ff_atrac1_decoder;
-extern AVCodec ff_atrac3_decoder;
-extern AVCodec ff_atrac3al_decoder;
-extern AVCodec ff_atrac3p_decoder;
-extern AVCodec ff_atrac3pal_decoder;
-extern AVCodec ff_atrac9_decoder;
-extern AVCodec ff_binkaudio_dct_decoder;
-extern AVCodec ff_binkaudio_rdft_decoder;
-extern AVCodec ff_bmv_audio_decoder;
-extern AVCodec ff_cook_decoder;
-extern AVCodec ff_dca_encoder;
-extern AVCodec ff_dca_decoder;
-extern AVCodec ff_dolby_e_decoder;
-extern AVCodec ff_dsd_lsbf_decoder;
-extern AVCodec ff_dsd_msbf_decoder;
-extern AVCodec ff_dsd_lsbf_planar_decoder;
-extern AVCodec ff_dsd_msbf_planar_decoder;
-extern AVCodec ff_dsicinaudio_decoder;
-extern AVCodec ff_dss_sp_decoder;
-extern AVCodec ff_dst_decoder;
-extern AVCodec ff_eac3_encoder;
-extern AVCodec ff_eac3_decoder;
-extern AVCodec ff_evrc_decoder;
-extern AVCodec ff_ffwavesynth_decoder;
-extern AVCodec ff_flac_encoder;
-extern AVCodec ff_flac_decoder;
-extern AVCodec ff_g723_1_encoder;
-extern AVCodec ff_g723_1_decoder;
-extern AVCodec ff_g729_decoder;
-extern AVCodec ff_gsm_decoder;
-extern AVCodec ff_gsm_ms_decoder;
-extern AVCodec ff_hcom_decoder;
-extern AVCodec ff_iac_decoder;
-extern AVCodec ff_ilbc_decoder;
-extern AVCodec ff_imc_decoder;
-extern AVCodec ff_interplay_acm_decoder;
-extern AVCodec ff_mace3_decoder;
-extern AVCodec ff_mace6_decoder;
-extern AVCodec ff_metasound_decoder;
-extern AVCodec ff_mlp_encoder;
-extern AVCodec ff_mlp_decoder;
-extern AVCodec ff_mp1_decoder;
-extern AVCodec ff_mp1float_decoder;
-extern AVCodec ff_mp2_encoder;
-extern AVCodec ff_mp2_decoder;
-extern AVCodec ff_mp2float_decoder;
-extern AVCodec ff_mp2fixed_encoder;
-extern AVCodec ff_mp3float_decoder;
-extern AVCodec ff_mp3_decoder;
-extern AVCodec ff_mp3adufloat_decoder;
-extern AVCodec ff_mp3adu_decoder;
-extern AVCodec ff_mp3on4float_decoder;
-extern AVCodec ff_mp3on4_decoder;
-extern AVCodec ff_mpc7_decoder;
-extern AVCodec ff_mpc8_decoder;
-extern AVCodec ff_nellymoser_encoder;
-extern AVCodec ff_nellymoser_decoder;
-extern AVCodec ff_on2avc_decoder;
-extern AVCodec ff_opus_encoder;
-extern AVCodec ff_opus_decoder;
-extern AVCodec ff_paf_audio_decoder;
-extern AVCodec ff_qcelp_decoder;
-extern AVCodec ff_qdm2_decoder;
-extern AVCodec ff_qdmc_decoder;
-extern AVCodec ff_ra_144_encoder;
-extern AVCodec ff_ra_144_decoder;
-extern AVCodec ff_ra_288_decoder;
-extern AVCodec ff_ralf_decoder;
-extern AVCodec ff_sbc_encoder;
-extern AVCodec ff_sbc_decoder;
-extern AVCodec ff_shorten_decoder;
-extern AVCodec ff_sipr_decoder;
-extern AVCodec ff_smackaud_decoder;
-extern AVCodec ff_sonic_encoder;
-extern AVCodec ff_sonic_decoder;
-extern AVCodec ff_sonic_ls_encoder;
-extern AVCodec ff_tak_decoder;
-extern AVCodec ff_truehd_encoder;
-extern AVCodec ff_truehd_decoder;
-extern AVCodec ff_truespeech_decoder;
-extern AVCodec ff_tta_encoder;
-extern AVCodec ff_tta_decoder;
-extern AVCodec ff_twinvq_decoder;
-extern AVCodec ff_vmdaudio_decoder;
-extern AVCodec ff_vorbis_encoder;
-extern AVCodec ff_vorbis_decoder;
-extern AVCodec ff_wavpack_encoder;
-extern AVCodec ff_wavpack_decoder;
-extern AVCodec ff_wmalossless_decoder;
-extern AVCodec ff_wmapro_decoder;
-extern AVCodec ff_wmav1_encoder;
-extern AVCodec ff_wmav1_decoder;
-extern AVCodec ff_wmav2_encoder;
-extern AVCodec ff_wmav2_decoder;
-extern AVCodec ff_wmavoice_decoder;
-extern AVCodec ff_ws_snd1_decoder;
-extern AVCodec ff_xma1_decoder;
-extern AVCodec ff_xma2_decoder;
+extern const FFCodec ff_aac_encoder;
+extern const FFCodec ff_aac_decoder;
+extern const FFCodec ff_aac_fixed_decoder;
+extern const FFCodec ff_aac_latm_decoder;
+extern const FFCodec ff_ac3_encoder;
+extern const FFCodec ff_ac3_decoder;
+extern const FFCodec ff_ac3_fixed_encoder;
+extern const FFCodec ff_ac3_fixed_decoder;
+extern const FFCodec ff_acelp_kelvin_decoder;
+extern const FFCodec ff_alac_encoder;
+extern const FFCodec ff_alac_decoder;
+extern const FFCodec ff_als_decoder;
+extern const FFCodec ff_amrnb_decoder;
+extern const FFCodec ff_amrwb_decoder;
+extern const FFCodec ff_apac_decoder;
+extern const FFCodec ff_ape_decoder;
+extern const FFCodec ff_aptx_encoder;
+extern const FFCodec ff_aptx_decoder;
+extern const FFCodec ff_aptx_hd_encoder;
+extern const FFCodec ff_aptx_hd_decoder;
+extern const FFCodec ff_atrac1_decoder;
+extern const FFCodec ff_atrac3_decoder;
+extern const FFCodec ff_atrac3al_decoder;
+extern const FFCodec ff_atrac3p_decoder;
+extern const FFCodec ff_atrac3pal_decoder;
+extern const FFCodec ff_atrac9_decoder;
+extern const FFCodec ff_binkaudio_dct_decoder;
+extern const FFCodec ff_binkaudio_rdft_decoder;
+extern const FFCodec ff_bmv_audio_decoder;
+extern const FFCodec ff_bonk_decoder;
+extern const FFCodec ff_cook_decoder;
+extern const FFCodec ff_dca_encoder;
+extern const FFCodec ff_dca_decoder;
+extern const FFCodec ff_dfpwm_encoder;
+extern const FFCodec ff_dfpwm_decoder;
+extern const FFCodec ff_dolby_e_decoder;
+extern const FFCodec ff_dsd_lsbf_decoder;
+extern const FFCodec ff_dsd_msbf_decoder;
+extern const FFCodec ff_dsd_lsbf_planar_decoder;
+extern const FFCodec ff_dsd_msbf_planar_decoder;
+extern const FFCodec ff_dsicinaudio_decoder;
+extern const FFCodec ff_dss_sp_decoder;
+extern const FFCodec ff_dst_decoder;
+extern const FFCodec ff_eac3_encoder;
+extern const FFCodec ff_eac3_decoder;
+extern const FFCodec ff_evrc_decoder;
+extern const FFCodec ff_fastaudio_decoder;
+extern const FFCodec ff_ffwavesynth_decoder;
+extern const FFCodec ff_flac_encoder;
+extern const FFCodec ff_flac_decoder;
+extern const FFCodec ff_ftr_decoder;
+extern const FFCodec ff_g723_1_encoder;
+extern const FFCodec ff_g723_1_decoder;
+extern const FFCodec ff_g729_decoder;
+extern const FFCodec ff_gsm_decoder;
+extern const FFCodec ff_gsm_ms_decoder;
+extern const FFCodec ff_hca_decoder;
+extern const FFCodec ff_hcom_decoder;
+extern const FFCodec ff_hdr_encoder;
+extern const FFCodec ff_hdr_decoder;
+extern const FFCodec ff_iac_decoder;
+extern const FFCodec ff_ilbc_decoder;
+extern const FFCodec ff_imc_decoder;
+extern const FFCodec ff_interplay_acm_decoder;
+extern const FFCodec ff_mace3_decoder;
+extern const FFCodec ff_mace6_decoder;
+extern const FFCodec ff_metasound_decoder;
+extern const FFCodec ff_misc4_decoder;
+extern const FFCodec ff_mlp_encoder;
+extern const FFCodec ff_mlp_decoder;
+extern const FFCodec ff_mp1_decoder;
+extern const FFCodec ff_mp1float_decoder;
+extern const FFCodec ff_mp2_encoder;
+extern const FFCodec ff_mp2_decoder;
+extern const FFCodec ff_mp2float_decoder;
+extern const FFCodec ff_mp2fixed_encoder;
+extern const FFCodec ff_mp3float_decoder;
+extern const FFCodec ff_mp3_decoder;
+extern const FFCodec ff_mp3adufloat_decoder;
+extern const FFCodec ff_mp3adu_decoder;
+extern const FFCodec ff_mp3on4float_decoder;
+extern const FFCodec ff_mp3on4_decoder;
+extern const FFCodec ff_mpc7_decoder;
+extern const FFCodec ff_mpc8_decoder;
+extern const FFCodec ff_msnsiren_decoder;
+extern const FFCodec ff_nellymoser_encoder;
+extern const FFCodec ff_nellymoser_decoder;
+extern const FFCodec ff_on2avc_decoder;
+extern const FFCodec ff_opus_encoder;
+extern const FFCodec ff_opus_decoder;
+extern const FFCodec ff_paf_audio_decoder;
+extern const FFCodec ff_qcelp_decoder;
+extern const FFCodec ff_qdm2_decoder;
+extern const FFCodec ff_qdmc_decoder;
+extern const FFCodec ff_ra_144_encoder;
+extern const FFCodec ff_ra_144_decoder;
+extern const FFCodec ff_ra_288_decoder;
+extern const FFCodec ff_ralf_decoder;
+extern const FFCodec ff_sbc_encoder;
+extern const FFCodec ff_sbc_decoder;
+extern const FFCodec ff_shorten_decoder;
+extern const FFCodec ff_sipr_decoder;
+extern const FFCodec ff_siren_decoder;
+extern const FFCodec ff_smackaud_decoder;
+extern const FFCodec ff_sonic_encoder;
+extern const FFCodec ff_sonic_decoder;
+extern const FFCodec ff_sonic_ls_encoder;
+extern const FFCodec ff_tak_decoder;
+extern const FFCodec ff_truehd_encoder;
+extern const FFCodec ff_truehd_decoder;
+extern const FFCodec ff_truespeech_decoder;
+extern const FFCodec ff_tta_encoder;
+extern const FFCodec ff_tta_decoder;
+extern const FFCodec ff_twinvq_decoder;
+extern const FFCodec ff_vmdaudio_decoder;
+extern const FFCodec ff_vorbis_encoder;
+extern const FFCodec ff_vorbis_decoder;
+extern const FFCodec ff_wavarc_decoder;
+extern const FFCodec ff_wavpack_encoder;
+extern const FFCodec ff_wavpack_decoder;
+extern const FFCodec ff_wmalossless_decoder;
+extern const FFCodec ff_wmapro_decoder;
+extern const FFCodec ff_wmav1_encoder;
+extern const FFCodec ff_wmav1_decoder;
+extern const FFCodec ff_wmav2_encoder;
+extern const FFCodec ff_wmav2_decoder;
+extern const FFCodec ff_wmavoice_decoder;
+extern const FFCodec ff_ws_snd1_decoder;
+extern const FFCodec ff_xma1_decoder;
+extern const FFCodec ff_xma2_decoder;
/* PCM codecs */
-extern AVCodec ff_pcm_alaw_encoder;
-extern AVCodec ff_pcm_alaw_decoder;
-extern AVCodec ff_pcm_bluray_decoder;
-extern AVCodec ff_pcm_dvd_encoder;
-extern AVCodec ff_pcm_dvd_decoder;
-extern AVCodec ff_pcm_f16le_decoder;
-extern AVCodec ff_pcm_f24le_decoder;
-extern AVCodec ff_pcm_f32be_encoder;
-extern AVCodec ff_pcm_f32be_decoder;
-extern AVCodec ff_pcm_f32le_encoder;
-extern AVCodec ff_pcm_f32le_decoder;
-extern AVCodec ff_pcm_f64be_encoder;
-extern AVCodec ff_pcm_f64be_decoder;
-extern AVCodec ff_pcm_f64le_encoder;
-extern AVCodec ff_pcm_f64le_decoder;
-extern AVCodec ff_pcm_lxf_decoder;
-extern AVCodec ff_pcm_mulaw_encoder;
-extern AVCodec ff_pcm_mulaw_decoder;
-extern AVCodec ff_pcm_s8_encoder;
-extern AVCodec ff_pcm_s8_decoder;
-extern AVCodec ff_pcm_s8_planar_encoder;
-extern AVCodec ff_pcm_s8_planar_decoder;
-extern AVCodec ff_pcm_s16be_encoder;
-extern AVCodec ff_pcm_s16be_decoder;
-extern AVCodec ff_pcm_s16be_planar_encoder;
-extern AVCodec ff_pcm_s16be_planar_decoder;
-extern AVCodec ff_pcm_s16le_encoder;
-extern AVCodec ff_pcm_s16le_decoder;
-extern AVCodec ff_pcm_s16le_planar_encoder;
-extern AVCodec ff_pcm_s16le_planar_decoder;
-extern AVCodec ff_pcm_s24be_encoder;
-extern AVCodec ff_pcm_s24be_decoder;
-extern AVCodec ff_pcm_s24daud_encoder;
-extern AVCodec ff_pcm_s24daud_decoder;
-extern AVCodec ff_pcm_s24le_encoder;
-extern AVCodec ff_pcm_s24le_decoder;
-extern AVCodec ff_pcm_s24le_planar_encoder;
-extern AVCodec ff_pcm_s24le_planar_decoder;
-extern AVCodec ff_pcm_s32be_encoder;
-extern AVCodec ff_pcm_s32be_decoder;
-extern AVCodec ff_pcm_s32le_encoder;
-extern AVCodec ff_pcm_s32le_decoder;
-extern AVCodec ff_pcm_s32le_planar_encoder;
-extern AVCodec ff_pcm_s32le_planar_decoder;
-extern AVCodec ff_pcm_s64be_encoder;
-extern AVCodec ff_pcm_s64be_decoder;
-extern AVCodec ff_pcm_s64le_encoder;
-extern AVCodec ff_pcm_s64le_decoder;
-extern AVCodec ff_pcm_u8_encoder;
-extern AVCodec ff_pcm_u8_decoder;
-extern AVCodec ff_pcm_u16be_encoder;
-extern AVCodec ff_pcm_u16be_decoder;
-extern AVCodec ff_pcm_u16le_encoder;
-extern AVCodec ff_pcm_u16le_decoder;
-extern AVCodec ff_pcm_u24be_encoder;
-extern AVCodec ff_pcm_u24be_decoder;
-extern AVCodec ff_pcm_u24le_encoder;
-extern AVCodec ff_pcm_u24le_decoder;
-extern AVCodec ff_pcm_u32be_encoder;
-extern AVCodec ff_pcm_u32be_decoder;
-extern AVCodec ff_pcm_u32le_encoder;
-extern AVCodec ff_pcm_u32le_decoder;
-extern AVCodec ff_pcm_vidc_encoder;
-extern AVCodec ff_pcm_vidc_decoder;
-extern AVCodec ff_pcm_zork_decoder;
+extern const FFCodec ff_pcm_alaw_encoder;
+extern const FFCodec ff_pcm_alaw_decoder;
+extern const FFCodec ff_pcm_bluray_encoder;
+extern const FFCodec ff_pcm_bluray_decoder;
+extern const FFCodec ff_pcm_dvd_encoder;
+extern const FFCodec ff_pcm_dvd_decoder;
+extern const FFCodec ff_pcm_f16le_decoder;
+extern const FFCodec ff_pcm_f24le_decoder;
+extern const FFCodec ff_pcm_f32be_encoder;
+extern const FFCodec ff_pcm_f32be_decoder;
+extern const FFCodec ff_pcm_f32le_encoder;
+extern const FFCodec ff_pcm_f32le_decoder;
+extern const FFCodec ff_pcm_f64be_encoder;
+extern const FFCodec ff_pcm_f64be_decoder;
+extern const FFCodec ff_pcm_f64le_encoder;
+extern const FFCodec ff_pcm_f64le_decoder;
+extern const FFCodec ff_pcm_lxf_decoder;
+extern const FFCodec ff_pcm_mulaw_encoder;
+extern const FFCodec ff_pcm_mulaw_decoder;
+extern const FFCodec ff_pcm_s8_encoder;
+extern const FFCodec ff_pcm_s8_decoder;
+extern const FFCodec ff_pcm_s8_planar_encoder;
+extern const FFCodec ff_pcm_s8_planar_decoder;
+extern const FFCodec ff_pcm_s16be_encoder;
+extern const FFCodec ff_pcm_s16be_decoder;
+extern const FFCodec ff_pcm_s16be_planar_encoder;
+extern const FFCodec ff_pcm_s16be_planar_decoder;
+extern const FFCodec ff_pcm_s16le_encoder;
+extern const FFCodec ff_pcm_s16le_decoder;
+extern const FFCodec ff_pcm_s16le_planar_encoder;
+extern const FFCodec ff_pcm_s16le_planar_decoder;
+extern const FFCodec ff_pcm_s24be_encoder;
+extern const FFCodec ff_pcm_s24be_decoder;
+extern const FFCodec ff_pcm_s24daud_encoder;
+extern const FFCodec ff_pcm_s24daud_decoder;
+extern const FFCodec ff_pcm_s24le_encoder;
+extern const FFCodec ff_pcm_s24le_decoder;
+extern const FFCodec ff_pcm_s24le_planar_encoder;
+extern const FFCodec ff_pcm_s24le_planar_decoder;
+extern const FFCodec ff_pcm_s32be_encoder;
+extern const FFCodec ff_pcm_s32be_decoder;
+extern const FFCodec ff_pcm_s32le_encoder;
+extern const FFCodec ff_pcm_s32le_decoder;
+extern const FFCodec ff_pcm_s32le_planar_encoder;
+extern const FFCodec ff_pcm_s32le_planar_decoder;
+extern const FFCodec ff_pcm_s64be_encoder;
+extern const FFCodec ff_pcm_s64be_decoder;
+extern const FFCodec ff_pcm_s64le_encoder;
+extern const FFCodec ff_pcm_s64le_decoder;
+extern const FFCodec ff_pcm_sga_decoder;
+extern const FFCodec ff_pcm_u8_encoder;
+extern const FFCodec ff_pcm_u8_decoder;
+extern const FFCodec ff_pcm_u16be_encoder;
+extern const FFCodec ff_pcm_u16be_decoder;
+extern const FFCodec ff_pcm_u16le_encoder;
+extern const FFCodec ff_pcm_u16le_decoder;
+extern const FFCodec ff_pcm_u24be_encoder;
+extern const FFCodec ff_pcm_u24be_decoder;
+extern const FFCodec ff_pcm_u24le_encoder;
+extern const FFCodec ff_pcm_u24le_decoder;
+extern const FFCodec ff_pcm_u32be_encoder;
+extern const FFCodec ff_pcm_u32be_decoder;
+extern const FFCodec ff_pcm_u32le_encoder;
+extern const FFCodec ff_pcm_u32le_decoder;
+extern const FFCodec ff_pcm_vidc_encoder;
+extern const FFCodec ff_pcm_vidc_decoder;
/* DPCM codecs */
-extern AVCodec ff_gremlin_dpcm_decoder;
-extern AVCodec ff_interplay_dpcm_decoder;
-extern AVCodec ff_roq_dpcm_encoder;
-extern AVCodec ff_roq_dpcm_decoder;
-extern AVCodec ff_sol_dpcm_decoder;
-extern AVCodec ff_xan_dpcm_decoder;
+extern const FFCodec ff_cbd2_dpcm_decoder;
+extern const FFCodec ff_derf_dpcm_decoder;
+extern const FFCodec ff_gremlin_dpcm_decoder;
+extern const FFCodec ff_interplay_dpcm_decoder;
+extern const FFCodec ff_roq_dpcm_encoder;
+extern const FFCodec ff_roq_dpcm_decoder;
+extern const FFCodec ff_sdx2_dpcm_decoder;
+extern const FFCodec ff_sol_dpcm_decoder;
+extern const FFCodec ff_xan_dpcm_decoder;
+extern const FFCodec ff_wady_dpcm_decoder;
/* ADPCM codecs */
-extern AVCodec ff_adpcm_4xm_decoder;
-extern AVCodec ff_adpcm_adx_encoder;
-extern AVCodec ff_adpcm_adx_decoder;
-extern AVCodec ff_adpcm_afc_decoder;
-extern AVCodec ff_adpcm_agm_decoder;
-extern AVCodec ff_adpcm_aica_decoder;
-extern AVCodec ff_adpcm_ct_decoder;
-extern AVCodec ff_adpcm_dtk_decoder;
-extern AVCodec ff_adpcm_ea_decoder;
-extern AVCodec ff_adpcm_ea_maxis_xa_decoder;
-extern AVCodec ff_adpcm_ea_r1_decoder;
-extern AVCodec ff_adpcm_ea_r2_decoder;
-extern AVCodec ff_adpcm_ea_r3_decoder;
-extern AVCodec ff_adpcm_ea_xas_decoder;
-extern AVCodec ff_adpcm_g722_encoder;
-extern AVCodec ff_adpcm_g722_decoder;
-extern AVCodec ff_adpcm_g726_encoder;
-extern AVCodec ff_adpcm_g726_decoder;
-extern AVCodec ff_adpcm_g726le_encoder;
-extern AVCodec ff_adpcm_g726le_decoder;
-extern AVCodec ff_adpcm_ima_amv_decoder;
-extern AVCodec ff_adpcm_ima_apc_decoder;
-extern AVCodec ff_adpcm_ima_dat4_decoder;
-extern AVCodec ff_adpcm_ima_dk3_decoder;
-extern AVCodec ff_adpcm_ima_dk4_decoder;
-extern AVCodec ff_adpcm_ima_ea_eacs_decoder;
-extern AVCodec ff_adpcm_ima_ea_sead_decoder;
-extern AVCodec ff_adpcm_ima_iss_decoder;
-extern AVCodec ff_adpcm_ima_oki_decoder;
-extern AVCodec ff_adpcm_ima_qt_encoder;
-extern AVCodec ff_adpcm_ima_qt_decoder;
-extern AVCodec ff_adpcm_ima_rad_decoder;
-extern AVCodec ff_adpcm_ima_smjpeg_decoder;
-extern AVCodec ff_adpcm_ima_wav_encoder;
-extern AVCodec ff_adpcm_ima_wav_decoder;
-extern AVCodec ff_adpcm_ima_ws_decoder;
-extern AVCodec ff_adpcm_ms_encoder;
-extern AVCodec ff_adpcm_ms_decoder;
-extern AVCodec ff_adpcm_mtaf_decoder;
-extern AVCodec ff_adpcm_psx_decoder;
-extern AVCodec ff_adpcm_sbpro_2_decoder;
-extern AVCodec ff_adpcm_sbpro_3_decoder;
-extern AVCodec ff_adpcm_sbpro_4_decoder;
-extern AVCodec ff_adpcm_swf_encoder;
-extern AVCodec ff_adpcm_swf_decoder;
-extern AVCodec ff_adpcm_thp_decoder;
-extern AVCodec ff_adpcm_thp_le_decoder;
-extern AVCodec ff_adpcm_vima_decoder;
-extern AVCodec ff_adpcm_xa_decoder;
-extern AVCodec ff_adpcm_yamaha_encoder;
-extern AVCodec ff_adpcm_yamaha_decoder;
+extern const FFCodec ff_adpcm_4xm_decoder;
+extern const FFCodec ff_adpcm_adx_encoder;
+extern const FFCodec ff_adpcm_adx_decoder;
+extern const FFCodec ff_adpcm_afc_decoder;
+extern const FFCodec ff_adpcm_agm_decoder;
+extern const FFCodec ff_adpcm_aica_decoder;
+extern const FFCodec ff_adpcm_argo_decoder;
+extern const FFCodec ff_adpcm_argo_encoder;
+extern const FFCodec ff_adpcm_ct_decoder;
+extern const FFCodec ff_adpcm_dtk_decoder;
+extern const FFCodec ff_adpcm_ea_decoder;
+extern const FFCodec ff_adpcm_ea_maxis_xa_decoder;
+extern const FFCodec ff_adpcm_ea_r1_decoder;
+extern const FFCodec ff_adpcm_ea_r2_decoder;
+extern const FFCodec ff_adpcm_ea_r3_decoder;
+extern const FFCodec ff_adpcm_ea_xas_decoder;
+extern const FFCodec ff_adpcm_g722_encoder;
+extern const FFCodec ff_adpcm_g722_decoder;
+extern const FFCodec ff_adpcm_g726_encoder;
+extern const FFCodec ff_adpcm_g726_decoder;
+extern const FFCodec ff_adpcm_g726le_encoder;
+extern const FFCodec ff_adpcm_g726le_decoder;
+extern const FFCodec ff_adpcm_ima_acorn_decoder;
+extern const FFCodec ff_adpcm_ima_amv_decoder;
+extern const FFCodec ff_adpcm_ima_amv_encoder;
+extern const FFCodec ff_adpcm_ima_alp_decoder;
+extern const FFCodec ff_adpcm_ima_alp_encoder;
+extern const FFCodec ff_adpcm_ima_apc_decoder;
+extern const FFCodec ff_adpcm_ima_apm_decoder;
+extern const FFCodec ff_adpcm_ima_apm_encoder;
+extern const FFCodec ff_adpcm_ima_cunning_decoder;
+extern const FFCodec ff_adpcm_ima_dat4_decoder;
+extern const FFCodec ff_adpcm_ima_dk3_decoder;
+extern const FFCodec ff_adpcm_ima_dk4_decoder;
+extern const FFCodec ff_adpcm_ima_ea_eacs_decoder;
+extern const FFCodec ff_adpcm_ima_ea_sead_decoder;
+extern const FFCodec ff_adpcm_ima_iss_decoder;
+extern const FFCodec ff_adpcm_ima_moflex_decoder;
+extern const FFCodec ff_adpcm_ima_mtf_decoder;
+extern const FFCodec ff_adpcm_ima_oki_decoder;
+extern const FFCodec ff_adpcm_ima_qt_encoder;
+extern const FFCodec ff_adpcm_ima_qt_decoder;
+extern const FFCodec ff_adpcm_ima_rad_decoder;
+extern const FFCodec ff_adpcm_ima_ssi_decoder;
+extern const FFCodec ff_adpcm_ima_ssi_encoder;
+extern const FFCodec ff_adpcm_ima_smjpeg_decoder;
+extern const FFCodec ff_adpcm_ima_wav_encoder;
+extern const FFCodec ff_adpcm_ima_wav_decoder;
+extern const FFCodec ff_adpcm_ima_ws_encoder;
+extern const FFCodec ff_adpcm_ima_ws_decoder;
+extern const FFCodec ff_adpcm_ms_encoder;
+extern const FFCodec ff_adpcm_ms_decoder;
+extern const FFCodec ff_adpcm_mtaf_decoder;
+extern const FFCodec ff_adpcm_psx_decoder;
+extern const FFCodec ff_adpcm_sbpro_2_decoder;
+extern const FFCodec ff_adpcm_sbpro_3_decoder;
+extern const FFCodec ff_adpcm_sbpro_4_decoder;
+extern const FFCodec ff_adpcm_swf_encoder;
+extern const FFCodec ff_adpcm_swf_decoder;
+extern const FFCodec ff_adpcm_thp_decoder;
+extern const FFCodec ff_adpcm_thp_le_decoder;
+extern const FFCodec ff_adpcm_vima_decoder;
+extern const FFCodec ff_adpcm_xa_decoder;
+extern const FFCodec ff_adpcm_xmd_decoder;
+extern const FFCodec ff_adpcm_yamaha_encoder;
+extern const FFCodec ff_adpcm_yamaha_decoder;
+extern const FFCodec ff_adpcm_zork_decoder;
/* subtitles */
-extern AVCodec ff_ssa_encoder;
-extern AVCodec ff_ssa_decoder;
-extern AVCodec ff_ass_encoder;
-extern AVCodec ff_ass_decoder;
-extern AVCodec ff_ccaption_decoder;
-extern AVCodec ff_dvbsub_encoder;
-extern AVCodec ff_dvbsub_decoder;
-extern AVCodec ff_dvdsub_encoder;
-extern AVCodec ff_dvdsub_decoder;
-extern AVCodec ff_jacosub_decoder;
-extern AVCodec ff_microdvd_decoder;
-extern AVCodec ff_movtext_encoder;
-extern AVCodec ff_movtext_decoder;
-extern AVCodec ff_mpl2_decoder;
-extern AVCodec ff_pgssub_decoder;
-extern AVCodec ff_pjs_decoder;
-extern AVCodec ff_realtext_decoder;
-extern AVCodec ff_sami_decoder;
-extern AVCodec ff_srt_encoder;
-extern AVCodec ff_srt_decoder;
-extern AVCodec ff_stl_decoder;
-extern AVCodec ff_subrip_encoder;
-extern AVCodec ff_subrip_decoder;
-extern AVCodec ff_subviewer_decoder;
-extern AVCodec ff_subviewer1_decoder;
-extern AVCodec ff_text_encoder;
-extern AVCodec ff_text_decoder;
-extern AVCodec ff_vplayer_decoder;
-extern AVCodec ff_webvtt_encoder;
-extern AVCodec ff_webvtt_decoder;
-extern AVCodec ff_xsub_encoder;
-extern AVCodec ff_xsub_decoder;
+extern const FFCodec ff_ssa_encoder;
+extern const FFCodec ff_ssa_decoder;
+extern const FFCodec ff_ass_encoder;
+extern const FFCodec ff_ass_decoder;
+extern const FFCodec ff_ccaption_decoder;
+extern const FFCodec ff_dvbsub_encoder;
+extern const FFCodec ff_dvbsub_decoder;
+extern const FFCodec ff_dvdsub_encoder;
+extern const FFCodec ff_dvdsub_decoder;
+extern const FFCodec ff_jacosub_decoder;
+extern const FFCodec ff_microdvd_decoder;
+extern const FFCodec ff_movtext_encoder;
+extern const FFCodec ff_movtext_decoder;
+extern const FFCodec ff_mpl2_decoder;
+extern const FFCodec ff_pgssub_decoder;
+extern const FFCodec ff_pjs_decoder;
+extern const FFCodec ff_realtext_decoder;
+extern const FFCodec ff_sami_decoder;
+extern const FFCodec ff_srt_encoder;
+extern const FFCodec ff_srt_decoder;
+extern const FFCodec ff_stl_decoder;
+extern const FFCodec ff_subrip_encoder;
+extern const FFCodec ff_subrip_decoder;
+extern const FFCodec ff_subviewer_decoder;
+extern const FFCodec ff_subviewer1_decoder;
+extern const FFCodec ff_text_encoder;
+extern const FFCodec ff_text_decoder;
+extern const FFCodec ff_ttml_encoder;
+extern const FFCodec ff_vplayer_decoder;
+extern const FFCodec ff_webvtt_encoder;
+extern const FFCodec ff_webvtt_decoder;
+extern const FFCodec ff_xsub_encoder;
+extern const FFCodec ff_xsub_decoder;
/* external libraries */
-extern AVCodec ff_aac_at_encoder;
-extern AVCodec ff_aac_at_decoder;
-extern AVCodec ff_ac3_at_decoder;
-extern AVCodec ff_adpcm_ima_qt_at_decoder;
-extern AVCodec ff_alac_at_encoder;
-extern AVCodec ff_alac_at_decoder;
-extern AVCodec ff_amr_nb_at_decoder;
-extern AVCodec ff_eac3_at_decoder;
-extern AVCodec ff_gsm_ms_at_decoder;
-extern AVCodec ff_ilbc_at_encoder;
-extern AVCodec ff_ilbc_at_decoder;
-extern AVCodec ff_mp1_at_decoder;
-extern AVCodec ff_mp2_at_decoder;
-extern AVCodec ff_mp3_at_decoder;
-extern AVCodec ff_pcm_alaw_at_encoder;
-extern AVCodec ff_pcm_alaw_at_decoder;
-extern AVCodec ff_pcm_mulaw_at_encoder;
-extern AVCodec ff_pcm_mulaw_at_decoder;
-extern AVCodec ff_qdmc_at_decoder;
-extern AVCodec ff_qdm2_at_decoder;
-extern AVCodec ff_libaom_av1_decoder;
-extern AVCodec ff_libaom_av1_encoder;
-extern AVCodec ff_libaribb24_decoder;
-extern AVCodec ff_libcelt_decoder;
-extern AVCodec ff_libcodec2_encoder;
-extern AVCodec ff_libcodec2_decoder;
-extern AVCodec ff_libdav1d_decoder;
-extern AVCodec ff_libdavs2_decoder;
-extern AVCodec ff_libfdk_aac_encoder;
-extern AVCodec ff_libfdk_aac_decoder;
-extern AVCodec ff_libgsm_encoder;
-extern AVCodec ff_libgsm_decoder;
-extern AVCodec ff_libgsm_ms_encoder;
-extern AVCodec ff_libgsm_ms_decoder;
-extern AVCodec ff_libilbc_encoder;
-extern AVCodec ff_libilbc_decoder;
-extern AVCodec ff_libmp3lame_encoder;
-extern AVCodec ff_libopencore_amrnb_encoder;
-extern AVCodec ff_libopencore_amrnb_decoder;
-extern AVCodec ff_libopencore_amrwb_decoder;
-extern AVCodec ff_libopenjpeg_encoder;
-extern AVCodec ff_libopenjpeg_decoder;
-extern AVCodec ff_libopus_encoder;
-extern AVCodec ff_libopus_decoder;
-extern AVCodec ff_librsvg_decoder;
-extern AVCodec ff_libshine_encoder;
-extern AVCodec ff_libspeex_encoder;
-extern AVCodec ff_libspeex_decoder;
-extern AVCodec ff_libtheora_encoder;
-extern AVCodec ff_libtwolame_encoder;
-extern AVCodec ff_libvo_amrwbenc_encoder;
-extern AVCodec ff_libvorbis_encoder;
-extern AVCodec ff_libvorbis_decoder;
-extern AVCodec ff_libvpx_vp8_encoder;
-extern AVCodec ff_libvpx_vp8_decoder;
-extern AVCodec ff_libvpx_vp9_encoder;
-extern AVCodec ff_libvpx_vp9_decoder;
-extern AVCodec ff_libwavpack_encoder;
+extern const FFCodec ff_aac_at_encoder;
+extern const FFCodec ff_aac_at_decoder;
+extern const FFCodec ff_ac3_at_decoder;
+extern const FFCodec ff_adpcm_ima_qt_at_decoder;
+extern const FFCodec ff_alac_at_encoder;
+extern const FFCodec ff_alac_at_decoder;
+extern const FFCodec ff_amr_nb_at_decoder;
+extern const FFCodec ff_eac3_at_decoder;
+extern const FFCodec ff_gsm_ms_at_decoder;
+extern const FFCodec ff_ilbc_at_encoder;
+extern const FFCodec ff_ilbc_at_decoder;
+extern const FFCodec ff_mp1_at_decoder;
+extern const FFCodec ff_mp2_at_decoder;
+extern const FFCodec ff_mp3_at_decoder;
+extern const FFCodec ff_pcm_alaw_at_encoder;
+extern const FFCodec ff_pcm_alaw_at_decoder;
+extern const FFCodec ff_pcm_mulaw_at_encoder;
+extern const FFCodec ff_pcm_mulaw_at_decoder;
+extern const FFCodec ff_qdmc_at_decoder;
+extern const FFCodec ff_qdm2_at_decoder;
+extern FFCodec ff_libaom_av1_encoder;
+extern const FFCodec ff_libaribb24_decoder;
+extern const FFCodec ff_libcelt_decoder;
+extern const FFCodec ff_libcodec2_encoder;
+extern const FFCodec ff_libcodec2_decoder;
+extern const FFCodec ff_libdav1d_decoder;
+extern const FFCodec ff_libdavs2_decoder;
+extern const FFCodec ff_libfdk_aac_encoder;
+extern const FFCodec ff_libfdk_aac_decoder;
+extern const FFCodec ff_libgsm_encoder;
+extern const FFCodec ff_libgsm_decoder;
+extern const FFCodec ff_libgsm_ms_encoder;
+extern const FFCodec ff_libgsm_ms_decoder;
+extern const FFCodec ff_libilbc_encoder;
+extern const FFCodec ff_libilbc_decoder;
+extern const FFCodec ff_libjxl_decoder;
+extern const FFCodec ff_libjxl_encoder;
+extern const FFCodec ff_libmp3lame_encoder;
+extern const FFCodec ff_libopencore_amrnb_encoder;
+extern const FFCodec ff_libopencore_amrnb_decoder;
+extern const FFCodec ff_libopencore_amrwb_decoder;
+extern const FFCodec ff_libopenjpeg_encoder;
+extern const FFCodec ff_libopenjpeg_decoder;
+extern const FFCodec ff_libopus_encoder;
+extern const FFCodec ff_libopus_decoder;
+extern const FFCodec ff_librav1e_encoder;
+extern const FFCodec ff_librsvg_decoder;
+extern const FFCodec ff_libshine_encoder;
+extern const FFCodec ff_libspeex_encoder;
+extern const FFCodec ff_libspeex_decoder;
+extern const FFCodec ff_libsvtav1_encoder;
+extern const FFCodec ff_libtheora_encoder;
+extern const FFCodec ff_libtwolame_encoder;
+extern const FFCodec ff_libuavs3d_decoder;
+extern const FFCodec ff_libvo_amrwbenc_encoder;
+extern const FFCodec ff_libvorbis_encoder;
+extern const FFCodec ff_libvorbis_decoder;
+extern const FFCodec ff_libvpx_vp8_encoder;
+extern const FFCodec ff_libvpx_vp8_decoder;
+extern FFCodec ff_libvpx_vp9_encoder;
+extern FFCodec ff_libvpx_vp9_decoder;
/* preferred over libwebp */
-extern AVCodec ff_libwebp_anim_encoder;
-extern AVCodec ff_libwebp_encoder;
-extern AVCodec ff_libx262_encoder;
-extern AVCodec ff_libx264_encoder;
-extern AVCodec ff_libx264rgb_encoder;
-extern AVCodec ff_libx265_encoder;
-extern AVCodec ff_libxavs_encoder;
-extern AVCodec ff_libxavs2_encoder;
-extern AVCodec ff_libxvid_encoder;
-extern AVCodec ff_libzvbi_teletext_decoder;
+extern const FFCodec ff_libwebp_anim_encoder;
+extern const FFCodec ff_libwebp_encoder;
+extern const FFCodec ff_libx262_encoder;
+#if CONFIG_LIBX264_ENCODER
+#include <x264.h>
+#if X264_BUILD < 153
+#define LIBX264_CONST
+#else
+#define LIBX264_CONST const
+#endif
+extern LIBX264_CONST FFCodec ff_libx264_encoder;
+#endif
+extern const FFCodec ff_libx264rgb_encoder;
+extern FFCodec ff_libx265_encoder;
+extern const FFCodec ff_libxavs_encoder;
+extern const FFCodec ff_libxavs2_encoder;
+extern const FFCodec ff_libxvid_encoder;
+extern const FFCodec ff_libzvbi_teletext_decoder;
/* text */
-extern AVCodec ff_bintext_decoder;
-extern AVCodec ff_xbin_decoder;
-extern AVCodec ff_idf_decoder;
+extern const FFCodec ff_bintext_decoder;
+extern const FFCodec ff_xbin_decoder;
+extern const FFCodec ff_idf_decoder;
/* external libraries, that shouldn't be used by default if one of the
* above is available */
-extern AVCodec ff_h263_v4l2m2m_encoder;
-extern AVCodec ff_libopenh264_encoder;
-extern AVCodec ff_libopenh264_decoder;
-extern AVCodec ff_h264_amf_encoder;
-extern AVCodec ff_h264_cuvid_decoder;
-extern AVCodec ff_h264_nvenc_encoder;
-extern AVCodec ff_h264_omx_encoder;
-extern AVCodec ff_h264_qsv_encoder;
-extern AVCodec ff_h264_v4l2m2m_encoder;
-extern AVCodec ff_h264_vaapi_encoder;
-extern AVCodec ff_h264_videotoolbox_encoder;
-#if FF_API_NVENC_OLD_NAME
-extern AVCodec ff_nvenc_encoder;
-extern AVCodec ff_nvenc_h264_encoder;
-extern AVCodec ff_nvenc_hevc_encoder;
-#endif
-extern AVCodec ff_hevc_amf_encoder;
-extern AVCodec ff_hevc_cuvid_decoder;
-extern AVCodec ff_hevc_mediacodec_decoder;
-extern AVCodec ff_hevc_nvenc_encoder;
-extern AVCodec ff_hevc_qsv_encoder;
-extern AVCodec ff_hevc_v4l2m2m_encoder;
-extern AVCodec ff_hevc_vaapi_encoder;
-extern AVCodec ff_hevc_videotoolbox_encoder;
-extern AVCodec ff_libkvazaar_encoder;
-extern AVCodec ff_mjpeg_cuvid_decoder;
-extern AVCodec ff_mjpeg_qsv_encoder;
-extern AVCodec ff_mjpeg_vaapi_encoder;
-extern AVCodec ff_mpeg1_cuvid_decoder;
-extern AVCodec ff_mpeg2_cuvid_decoder;
-extern AVCodec ff_mpeg2_qsv_encoder;
-extern AVCodec ff_mpeg2_vaapi_encoder;
-extern AVCodec ff_mpeg4_cuvid_decoder;
-extern AVCodec ff_mpeg4_mediacodec_decoder;
-extern AVCodec ff_mpeg4_v4l2m2m_encoder;
-extern AVCodec ff_vc1_cuvid_decoder;
-extern AVCodec ff_vp8_cuvid_decoder;
-extern AVCodec ff_vp8_mediacodec_decoder;
-extern AVCodec ff_vp8_qsv_decoder;
-extern AVCodec ff_vp8_v4l2m2m_encoder;
-extern AVCodec ff_vp8_vaapi_encoder;
-extern AVCodec ff_vp9_cuvid_decoder;
-extern AVCodec ff_vp9_mediacodec_decoder;
-extern AVCodec ff_vp9_vaapi_encoder;
+extern const FFCodec ff_aac_mf_encoder;
+extern const FFCodec ff_ac3_mf_encoder;
+extern const FFCodec ff_h263_v4l2m2m_encoder;
+extern const FFCodec ff_libaom_av1_decoder;
+/* hwaccel hooks only, so prefer external decoders */
+extern const FFCodec ff_av1_decoder;
+extern const FFCodec ff_av1_cuvid_decoder;
+extern const FFCodec ff_av1_mediacodec_decoder;
+extern const FFCodec ff_av1_nvenc_encoder;
+extern const FFCodec ff_av1_qsv_decoder;
+extern const FFCodec ff_av1_qsv_encoder;
+extern const FFCodec ff_av1_amf_encoder;
+extern const FFCodec ff_libopenh264_encoder;
+extern const FFCodec ff_libopenh264_decoder;
+extern const FFCodec ff_h264_amf_encoder;
+extern const FFCodec ff_h264_cuvid_decoder;
+extern const FFCodec ff_h264_mf_encoder;
+extern const FFCodec ff_h264_nvenc_encoder;
+extern const FFCodec ff_h264_omx_encoder;
+extern const FFCodec ff_h264_qsv_encoder;
+extern const FFCodec ff_h264_v4l2m2m_encoder;
+extern const FFCodec ff_h264_vaapi_encoder;
+extern const FFCodec ff_h264_videotoolbox_encoder;
+extern const FFCodec ff_hevc_amf_encoder;
+extern const FFCodec ff_hevc_cuvid_decoder;
+extern const FFCodec ff_hevc_mediacodec_decoder;
+extern const FFCodec ff_hevc_mediacodec_encoder;
+extern const FFCodec ff_hevc_mf_encoder;
+extern const FFCodec ff_hevc_nvenc_encoder;
+extern const FFCodec ff_hevc_qsv_encoder;
+extern const FFCodec ff_hevc_v4l2m2m_encoder;
+extern const FFCodec ff_hevc_vaapi_encoder;
+extern const FFCodec ff_hevc_videotoolbox_encoder;
+extern const FFCodec ff_libkvazaar_encoder;
+extern const FFCodec ff_mjpeg_cuvid_decoder;
+extern const FFCodec ff_mjpeg_qsv_encoder;
+extern const FFCodec ff_mjpeg_qsv_decoder;
+extern const FFCodec ff_mjpeg_vaapi_encoder;
+extern const FFCodec ff_mp3_mf_encoder;
+extern const FFCodec ff_mpeg1_cuvid_decoder;
+extern const FFCodec ff_mpeg2_cuvid_decoder;
+extern const FFCodec ff_mpeg2_qsv_encoder;
+extern const FFCodec ff_mpeg2_vaapi_encoder;
+extern const FFCodec ff_mpeg4_cuvid_decoder;
+extern const FFCodec ff_mpeg4_mediacodec_decoder;
+extern const FFCodec ff_mpeg4_omx_encoder;
+extern const FFCodec ff_mpeg4_v4l2m2m_encoder;
+extern const FFCodec ff_prores_videotoolbox_encoder;
+extern const FFCodec ff_vc1_cuvid_decoder;
+extern const FFCodec ff_vp8_cuvid_decoder;
+extern const FFCodec ff_vp8_mediacodec_decoder;
+extern const FFCodec ff_vp8_qsv_decoder;
+extern const FFCodec ff_vp8_v4l2m2m_encoder;
+extern const FFCodec ff_vp8_vaapi_encoder;
+extern const FFCodec ff_vp9_cuvid_decoder;
+extern const FFCodec ff_vp9_mediacodec_decoder;
+extern const FFCodec ff_vp9_qsv_decoder;
+extern const FFCodec ff_vp9_vaapi_encoder;
+extern const FFCodec ff_vp9_qsv_encoder;
+
+// null codecs
+extern const FFCodec ff_vnull_decoder;
+extern const FFCodec ff_vnull_encoder;
+extern const FFCodec ff_anull_decoder;
+extern const FFCodec ff_anull_encoder;
// The iterate API is not usable with ossfuzz due to the excessive size of binaries created
#if CONFIG_OSSFUZZ
-AVCodec * codec_list[] = {
+const FFCodec * codec_list[] = {
+ NULL,
NULL,
NULL
};
@@ -796,62 +908,24 @@ static void av_codec_init_static(void)
{
for (int i = 0; codec_list[i]; i++) {
if (codec_list[i]->init_static_data)
- codec_list[i]->init_static_data((AVCodec*)codec_list[i]);
+ codec_list[i]->init_static_data((FFCodec*)codec_list[i]);
}
}
const AVCodec *av_codec_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
- const AVCodec *c = codec_list[i];
+ const FFCodec *c = codec_list[i];
ff_thread_once(&av_codec_static_init, av_codec_init_static);
- if (c)
+ if (c) {
*opaque = (void*)(i + 1);
-
- return c;
-}
-
-#if FF_API_NEXT
-FF_DISABLE_DEPRECATION_WARNINGS
-static AVOnce av_codec_next_init = AV_ONCE_INIT;
-
-static void av_codec_init_next(void)
-{
- AVCodec *prev = NULL, *p;
- void *i = 0;
- while ((p = (AVCodec*)av_codec_iterate(&i))) {
- if (prev)
- prev->next = p;
- prev = p;
+ return &c->p;
}
+ return NULL;
}
-
-
-av_cold void avcodec_register(AVCodec *codec)
-{
- ff_thread_once(&av_codec_next_init, av_codec_init_next);
-}
-
-AVCodec *av_codec_next(const AVCodec *c)
-{
- ff_thread_once(&av_codec_next_init, av_codec_init_next);
-
- if (c)
- return c->next;
- else
- return (AVCodec*)codec_list[0];
-}
-
-void avcodec_register_all(void)
-{
- ff_thread_once(&av_codec_next_init, av_codec_init_next);
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id)
{
switch(id){
@@ -861,7 +935,7 @@ static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id)
}
}
-static AVCodec *find_codec(enum AVCodecID id, int (*x)(const AVCodec *))
+static const AVCodec *find_codec(enum AVCodecID id, int (*x)(const AVCodec *))
{
const AVCodec *p, *experimental = NULL;
void *i = 0;
@@ -875,24 +949,24 @@ static AVCodec *find_codec(enum AVCodecID id, int (*x)(const AVCodec *))
if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) {
experimental = p;
} else
- return (AVCodec*)p;
+ return p;
}
}
- return (AVCodec*)experimental;
+ return experimental;
}
-AVCodec *avcodec_find_encoder(enum AVCodecID id)
+const AVCodec *avcodec_find_encoder(enum AVCodecID id)
{
return find_codec(id, av_codec_is_encoder);
}
-AVCodec *avcodec_find_decoder(enum AVCodecID id)
+const AVCodec *avcodec_find_decoder(enum AVCodecID id)
{
return find_codec(id, av_codec_is_decoder);
}
-static AVCodec *find_codec_by_name(const char *name, int (*x)(const AVCodec *))
+static const AVCodec *find_codec_by_name(const char *name, int (*x)(const AVCodec *))
{
void *i = 0;
const AVCodec *p;
@@ -904,18 +978,18 @@ static AVCodec *find_codec_by_name(const char *name, int (*x)(const AVCodec *))
if (!x(p))
continue;
if (strcmp(name, p->name) == 0)
- return (AVCodec*)p;
+ return p;
}
return NULL;
}
-AVCodec *avcodec_find_encoder_by_name(const char *name)
+const AVCodec *avcodec_find_encoder_by_name(const char *name)
{
return find_codec_by_name(name, av_codec_is_encoder);
}
-AVCodec *avcodec_find_decoder_by_name(const char *name)
+const AVCodec *avcodec_find_decoder_by_name(const char *name)
{
return find_codec_by_name(name, av_codec_is_decoder);
}
diff --git a/media/ffvpx/libavcodec/avcodec.c b/media/ffvpx/libavcodec/avcodec.c
new file mode 100644
index 0000000000..fb1362290f
--- /dev/null
+++ b/media/ffvpx/libavcodec/avcodec.c
@@ -0,0 +1,716 @@
+/*
+ * AVCodecContext functions for libavcodec
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVCodecContext functions for libavcodec
+ */
+
+#include "config.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/fifo.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/thread.h"
+#include "avcodec.h"
+#include "bsf.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "encode.h"
+#include "frame_thread_encoder.h"
+#include "internal.h"
+#include "thread.h"
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int r = func(c, (char *)arg + i * size);
+ if (ret)
+ ret[i] = r;
+ }
+ emms_c();
+ return 0;
+}
+
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ int r = func(c, arg, i, 0);
+ if (ret)
+ ret[i] = r;
+ }
+ emms_c();
+ return 0;
+}
+
+static AVMutex codec_mutex = AV_MUTEX_INITIALIZER;
+
+static void lock_avcodec(const FFCodec *codec)
+{
+ if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
+ ff_mutex_lock(&codec_mutex);
+}
+
+static void unlock_avcodec(const FFCodec *codec)
+{
+ if (codec->caps_internal & FF_CODEC_CAP_NOT_INIT_THREADSAFE && codec->init)
+ ff_mutex_unlock(&codec_mutex);
+}
+
+static int64_t get_bit_rate(AVCodecContext *ctx)
+{
+ int64_t bit_rate;
+ int bits_per_sample;
+
+ switch (ctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_DATA:
+ case AVMEDIA_TYPE_SUBTITLE:
+ case AVMEDIA_TYPE_ATTACHMENT:
+ bit_rate = ctx->bit_rate;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
+ if (bits_per_sample) {
+ bit_rate = ctx->sample_rate * (int64_t)ctx->ch_layout.nb_channels;
+ if (bit_rate > INT64_MAX / bits_per_sample) {
+ bit_rate = 0;
+ } else
+ bit_rate *= bits_per_sample;
+ } else
+ bit_rate = ctx->bit_rate;
+ break;
+ default:
+ bit_rate = 0;
+ break;
+ }
+ return bit_rate;
+}
+
+int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
+{
+ int ret = 0;
+ AVCodecInternal *avci;
+ const FFCodec *codec2;
+
+ if (avcodec_is_open(avctx))
+ return 0;
+
+ if (!codec && !avctx->codec) {
+ av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n");
+ return AVERROR(EINVAL);
+ }
+ if (codec && avctx->codec && codec != avctx->codec) {
+ av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
+ "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name);
+ return AVERROR(EINVAL);
+ }
+ if (!codec)
+ codec = avctx->codec;
+ codec2 = ffcodec(codec);
+
+ if ((avctx->codec_type != AVMEDIA_TYPE_UNKNOWN && avctx->codec_type != codec->type) ||
+ (avctx->codec_id != AV_CODEC_ID_NONE && avctx->codec_id != codec->id)) {
+ av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
+ return AVERROR(EINVAL);
+ }
+
+ avctx->codec_type = codec->type;
+ avctx->codec_id = codec->id;
+ avctx->codec = codec;
+
+ if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
+ return AVERROR(EINVAL);
+
+ avci = av_mallocz(sizeof(*avci));
+ if (!avci) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+ avctx->internal = avci;
+
+ avci->buffer_frame = av_frame_alloc();
+ avci->buffer_pkt = av_packet_alloc();
+ if (!avci->buffer_frame || !avci->buffer_pkt) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+
+ if (codec2->priv_data_size > 0) {
+ if (!avctx->priv_data) {
+ avctx->priv_data = av_mallocz(codec2->priv_data_size);
+ if (!avctx->priv_data) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+ if (codec->priv_class) {
+ *(const AVClass **)avctx->priv_data = codec->priv_class;
+ av_opt_set_defaults(avctx->priv_data);
+ }
+ }
+ if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, options)) < 0)
+ goto free_and_end;
+ } else {
+ avctx->priv_data = NULL;
+ }
+ if ((ret = av_opt_set_dict(avctx, options)) < 0)
+ goto free_and_end;
+
+ if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+ // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions
+ if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height &&
+ (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) {
+ if (avctx->coded_width && avctx->coded_height)
+ ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
+ else if (avctx->width && avctx->height)
+ ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
+ if (ret < 0)
+ goto free_and_end;
+ }
+
+ if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
+ && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0
+ || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) {
+ av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n");
+ ff_set_dimensions(avctx, 0, 0);
+ }
+
+ if (avctx->width > 0 && avctx->height > 0) {
+ if (av_image_check_sar(avctx->width, avctx->height,
+ avctx->sample_aspect_ratio) < 0) {
+ av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
+ avctx->sample_aspect_ratio.num,
+ avctx->sample_aspect_ratio.den);
+ avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
+ }
+ }
+
+ if (avctx->sample_rate < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample rate: %d\n", avctx->sample_rate);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->block_align < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid block align: %d\n", avctx->block_align);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* compat wrapper for old-style callers */
+ if (avctx->channel_layout && !avctx->channels)
+ avctx->channels = av_popcount64(avctx->channel_layout);
+
+ if ((avctx->channels && avctx->ch_layout.nb_channels != avctx->channels) ||
+ (avctx->channel_layout && (avctx->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ avctx->ch_layout.u.mask != avctx->channel_layout))) {
+ av_channel_layout_uninit(&avctx->ch_layout);
+ if (avctx->channel_layout) {
+ av_channel_layout_from_mask(&avctx->ch_layout, avctx->channel_layout);
+ } else {
+ avctx->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ avctx->ch_layout.nb_channels = avctx->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ /* AV_CODEC_CAP_CHANNEL_CONF is a decoder-only flag; so the code below
+ * in particular checks that nb_channels is set for all audio encoders. */
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && !avctx->ch_layout.nb_channels
+ && !(codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)) {
+ av_log(avctx, AV_LOG_ERROR, "%s requires channel layout to be set\n",
+ av_codec_is_decoder(codec) ? "Decoder" : "Encoder");
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n");
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
+ av_log(avctx, AV_LOG_ERROR, "Too many channels: %d\n", avctx->ch_layout.nb_channels);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+ avctx->frame_num = 0;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
+
+ if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
+ avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
+ const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
+ const AVCodec *codec2;
+ av_log(avctx, AV_LOG_ERROR,
+ "The %s '%s' is experimental but experimental codecs are not enabled, "
+ "add '-strict %d' if you want to use it.\n",
+ codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
+ codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
+ if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
+ av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
+ codec_string, codec2->name);
+ ret = AVERROR_EXPERIMENTAL;
+ goto free_and_end;
+ }
+
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
+ (!avctx->time_base.num || !avctx->time_base.den)) {
+ avctx->time_base.num = 1;
+ avctx->time_base.den = avctx->sample_rate;
+ }
+
+ if (av_codec_is_encoder(avctx->codec))
+ ret = ff_encode_preinit(avctx);
+ else
+ ret = ff_decode_preinit(avctx);
+ if (ret < 0)
+ goto free_and_end;
+
+ if (HAVE_THREADS && !avci->frame_thread_encoder) {
+ /* Frame-threaded decoders call FFCodec.init for their child contexts. */
+ lock_avcodec(codec2);
+ ret = ff_thread_init(avctx);
+ unlock_avcodec(codec2);
+ if (ret < 0) {
+ goto free_and_end;
+ }
+ }
+ if (!HAVE_THREADS && !(codec2->caps_internal & FF_CODEC_CAP_AUTO_THREADS))
+ avctx->thread_count = 1;
+
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME) ||
+ avci->frame_thread_encoder) {
+ if (codec2->init) {
+ lock_avcodec(codec2);
+ ret = codec2->init(avctx);
+ unlock_avcodec(codec2);
+ if (ret < 0) {
+ avci->needs_close = codec2->caps_internal & FF_CODEC_CAP_INIT_CLEANUP;
+ goto free_and_end;
+ }
+ }
+ avci->needs_close = 1;
+ }
+
+ ret=0;
+
+ if (av_codec_is_decoder(avctx->codec)) {
+ if (!avctx->bit_rate)
+ avctx->bit_rate = get_bit_rate(avctx);
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* update the deprecated fields for old-style callers */
+ avctx->channels = avctx->ch_layout.nb_channels;
+ avctx->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ avctx->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ /* validate channel layout from the decoder */
+ if ((avctx->ch_layout.nb_channels && !av_channel_layout_check(&avctx->ch_layout)) ||
+ avctx->ch_layout.nb_channels > FF_SANE_NB_CHANNELS) {
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ if (avctx->bits_per_coded_sample < 0) {
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+ }
+ if (codec->priv_class)
+ av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
+
+end:
+
+ return ret;
+free_and_end:
+ avcodec_close(avctx);
+ goto end;
+}
+
+void avcodec_flush_buffers(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (av_codec_is_encoder(avctx->codec)) {
+ int caps = avctx->codec->capabilities;
+
+ if (!(caps & AV_CODEC_CAP_ENCODER_FLUSH)) {
+ // Only encoders that explicitly declare support for it can be
+ // flushed. Otherwise, this is a no-op.
+ av_log(avctx, AV_LOG_WARNING, "Ignoring attempt to flush encoder "
+ "that doesn't support it\n");
+ return;
+ }
+ if (avci->in_frame)
+ av_frame_unref(avci->in_frame);
+ if (avci->recon_frame)
+ av_frame_unref(avci->recon_frame);
+ } else {
+ av_packet_unref(avci->last_pkt_props);
+ av_packet_unref(avci->in_pkt);
+
+ avctx->pts_correction_last_pts =
+ avctx->pts_correction_last_dts = INT64_MIN;
+
+ av_bsf_flush(avci->bsf);
+ }
+
+ avci->draining = 0;
+ avci->draining_done = 0;
+ avci->nb_draining_errors = 0;
+ av_frame_unref(avci->buffer_frame);
+ av_packet_unref(avci->buffer_pkt);
+
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
+ ff_thread_flush(avctx);
+ else if (ffcodec(avctx->codec)->flush)
+ ffcodec(avctx->codec)->flush(avctx);
+}
+
+void avsubtitle_free(AVSubtitle *sub)
+{
+ int i;
+
+ for (i = 0; i < sub->num_rects; i++) {
+ AVSubtitleRect *const rect = sub->rects[i];
+
+ av_freep(&rect->data[0]);
+ av_freep(&rect->data[1]);
+ av_freep(&rect->data[2]);
+ av_freep(&rect->data[3]);
+ av_freep(&rect->text);
+ av_freep(&rect->ass);
+
+ av_freep(&sub->rects[i]);
+ }
+
+ av_freep(&sub->rects);
+
+ memset(sub, 0, sizeof(*sub));
+}
+
+av_cold int avcodec_close(AVCodecContext *avctx)
+{
+ int i;
+
+ if (!avctx)
+ return 0;
+
+ if (avcodec_is_open(avctx)) {
+ AVCodecInternal *avci = avctx->internal;
+
+ if (CONFIG_FRAME_THREAD_ENCODER &&
+ avci->frame_thread_encoder && avctx->thread_count > 1) {
+ ff_frame_thread_encoder_free(avctx);
+ }
+ if (HAVE_THREADS && avci->thread_ctx)
+ ff_thread_free(avctx);
+ if (avci->needs_close && ffcodec(avctx->codec)->close)
+ ffcodec(avctx->codec)->close(avctx);
+ avci->byte_buffer_size = 0;
+ av_freep(&avci->byte_buffer);
+ av_frame_free(&avci->buffer_frame);
+ av_packet_free(&avci->buffer_pkt);
+ av_packet_free(&avci->last_pkt_props);
+
+ av_packet_free(&avci->in_pkt);
+ av_frame_free(&avci->in_frame);
+ av_frame_free(&avci->recon_frame);
+
+ av_buffer_unref(&avci->pool);
+
+ if (avctx->hwaccel && avctx->hwaccel->uninit)
+ avctx->hwaccel->uninit(avctx);
+ av_freep(&avci->hwaccel_priv_data);
+
+ av_bsf_free(&avci->bsf);
+
+ av_channel_layout_uninit(&avci->initial_ch_layout);
+
+#if CONFIG_LCMS2
+ ff_icc_context_uninit(&avci->icc);
+#endif
+
+ av_freep(&avctx->internal);
+ }
+
+ for (i = 0; i < avctx->nb_coded_side_data; i++)
+ av_freep(&avctx->coded_side_data[i].data);
+ av_freep(&avctx->coded_side_data);
+ avctx->nb_coded_side_data = 0;
+
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ av_buffer_unref(&avctx->hw_device_ctx);
+
+ if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
+ av_opt_free(avctx->priv_data);
+ av_opt_free(avctx);
+ av_freep(&avctx->priv_data);
+ if (av_codec_is_encoder(avctx->codec)) {
+ av_freep(&avctx->extradata);
+ avctx->extradata_size = 0;
+ } else if (av_codec_is_decoder(avctx->codec))
+ av_freep(&avctx->subtitle_header);
+
+ avctx->codec = NULL;
+ avctx->active_thread_type = 0;
+
+ return 0;
+}
+
+static const char *unknown_if_null(const char *str)
+{
+ return str ? str : "unknown";
+}
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
+{
+ const char *codec_type;
+ const char *codec_name;
+ const char *profile = NULL;
+ AVBPrint bprint;
+ int64_t bitrate;
+ int new_line = 0;
+ AVRational display_aspect_ratio;
+ const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", ";
+ const char *str;
+
+ if (!buf || buf_size <= 0)
+ return;
+ av_bprint_init_for_buffer(&bprint, buf, buf_size);
+ codec_type = av_get_media_type_string(enc->codec_type);
+ codec_name = avcodec_get_name(enc->codec_id);
+ profile = avcodec_profile_name(enc->codec_id, enc->profile);
+
+ av_bprintf(&bprint, "%s: %s", codec_type ? codec_type : "unknown",
+ codec_name);
+ buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
+
+ if (enc->codec && strcmp(enc->codec->name, codec_name))
+ av_bprintf(&bprint, " (%s)", enc->codec->name);
+
+ if (profile)
+ av_bprintf(&bprint, " (%s)", profile);
+ if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
+ && av_log_get_level() >= AV_LOG_VERBOSE
+ && enc->refs)
+ av_bprintf(&bprint, ", %d reference frame%s",
+ enc->refs, enc->refs > 1 ? "s" : "");
+
+ if (enc->codec_tag)
+ av_bprintf(&bprint, " (%s / 0x%04X)",
+ av_fourcc2str(enc->codec_tag), enc->codec_tag);
+
+ switch (enc->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ {
+ unsigned len;
+
+ av_bprintf(&bprint, "%s%s", separator,
+ enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
+ unknown_if_null(av_get_pix_fmt_name(enc->pix_fmt)));
+
+ av_bprint_chars(&bprint, '(', 1);
+ len = bprint.len;
+
+ /* The following check ensures that '(' has been written
+ * and therefore allows us to erase it if it turns out
+ * to be unnecessary. */
+ if (!av_bprint_is_complete(&bprint))
+ return;
+
+ if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
+ enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
+ av_bprintf(&bprint, "%d bpc, ", enc->bits_per_raw_sample);
+ if (enc->color_range != AVCOL_RANGE_UNSPECIFIED &&
+ (str = av_color_range_name(enc->color_range)))
+ av_bprintf(&bprint, "%s, ", str);
+
+ if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
+ enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
+ enc->color_trc != AVCOL_TRC_UNSPECIFIED) {
+ const char *col = unknown_if_null(av_color_space_name(enc->colorspace));
+ const char *pri = unknown_if_null(av_color_primaries_name(enc->color_primaries));
+ const char *trc = unknown_if_null(av_color_transfer_name(enc->color_trc));
+ if (strcmp(col, pri) || strcmp(col, trc)) {
+ new_line = 1;
+ av_bprintf(&bprint, "%s/%s/%s, ", col, pri, trc);
+ } else
+ av_bprintf(&bprint, "%s, ", col);
+ }
+
+ if (enc->field_order != AV_FIELD_UNKNOWN) {
+ const char *field_order = "progressive";
+ if (enc->field_order == AV_FIELD_TT)
+ field_order = "top first";
+ else if (enc->field_order == AV_FIELD_BB)
+ field_order = "bottom first";
+ else if (enc->field_order == AV_FIELD_TB)
+ field_order = "top coded first (swapped)";
+ else if (enc->field_order == AV_FIELD_BT)
+ field_order = "bottom coded first (swapped)";
+
+ av_bprintf(&bprint, "%s, ", field_order);
+ }
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE &&
+ enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED &&
+ (str = av_chroma_location_name(enc->chroma_sample_location)))
+ av_bprintf(&bprint, "%s, ", str);
+
+ if (len == bprint.len) {
+ bprint.str[len - 1] = '\0';
+ bprint.len--;
+ } else {
+ if (bprint.len - 2 < bprint.size) {
+ /* Erase the last ", " */
+ bprint.len -= 2;
+ bprint.str[bprint.len] = '\0';
+ }
+ av_bprint_chars(&bprint, ')', 1);
+ }
+ }
+
+ if (enc->width) {
+ av_bprintf(&bprint, "%s%dx%d", new_line ? separator : ", ",
+ enc->width, enc->height);
+
+ if (av_log_get_level() >= AV_LOG_VERBOSE &&
+ (enc->width != enc->coded_width ||
+ enc->height != enc->coded_height))
+ av_bprintf(&bprint, " (%dx%d)",
+ enc->coded_width, enc->coded_height);
+
+ if (enc->sample_aspect_ratio.num) {
+ av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
+ enc->width * (int64_t)enc->sample_aspect_ratio.num,
+ enc->height * (int64_t)enc->sample_aspect_ratio.den,
+ 1024 * 1024);
+ av_bprintf(&bprint, " [SAR %d:%d DAR %d:%d]",
+ enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
+ display_aspect_ratio.num, display_aspect_ratio.den);
+ }
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ int g = av_gcd(enc->time_base.num, enc->time_base.den);
+ av_bprintf(&bprint, ", %d/%d",
+ enc->time_base.num / g, enc->time_base.den / g);
+ }
+ }
+ if (encode) {
+ av_bprintf(&bprint, ", q=%d-%d", enc->qmin, enc->qmax);
+ } else {
+ if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
+ av_bprintf(&bprint, ", Closed Captions");
+ if (enc->properties & FF_CODEC_PROPERTY_FILM_GRAIN)
+ av_bprintf(&bprint, ", Film Grain");
+ if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
+ av_bprintf(&bprint, ", lossless");
+ }
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ av_bprintf(&bprint, "%s", separator);
+
+ if (enc->sample_rate) {
+ av_bprintf(&bprint, "%d Hz, ", enc->sample_rate);
+ }
+ {
+ char buf[512];
+ int ret = av_channel_layout_describe(&enc->ch_layout, buf, sizeof(buf));
+ if (ret >= 0)
+ av_bprintf(&bprint, "%s", buf);
+ }
+ if (enc->sample_fmt != AV_SAMPLE_FMT_NONE &&
+ (str = av_get_sample_fmt_name(enc->sample_fmt))) {
+ av_bprintf(&bprint, ", %s", str);
+ }
+ if ( enc->bits_per_raw_sample > 0
+ && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
+ av_bprintf(&bprint, " (%d bit)", enc->bits_per_raw_sample);
+ if (av_log_get_level() >= AV_LOG_VERBOSE) {
+ if (enc->initial_padding)
+ av_bprintf(&bprint, ", delay %d", enc->initial_padding);
+ if (enc->trailing_padding)
+ av_bprintf(&bprint, ", padding %d", enc->trailing_padding);
+ }
+ break;
+ case AVMEDIA_TYPE_DATA:
+ if (av_log_get_level() >= AV_LOG_DEBUG) {
+ int g = av_gcd(enc->time_base.num, enc->time_base.den);
+ if (g)
+ av_bprintf(&bprint, ", %d/%d",
+ enc->time_base.num / g, enc->time_base.den / g);
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ if (enc->width)
+ av_bprintf(&bprint, ", %dx%d", enc->width, enc->height);
+ break;
+ default:
+ return;
+ }
+ if (encode) {
+ if (enc->flags & AV_CODEC_FLAG_PASS1)
+ av_bprintf(&bprint, ", pass 1");
+ if (enc->flags & AV_CODEC_FLAG_PASS2)
+ av_bprintf(&bprint, ", pass 2");
+ }
+ bitrate = get_bit_rate(enc);
+ if (bitrate != 0) {
+ av_bprintf(&bprint, ", %"PRId64" kb/s", bitrate / 1000);
+ } else if (enc->rc_max_rate > 0) {
+ av_bprintf(&bprint, ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
+ }
+}
+
+int avcodec_is_open(AVCodecContext *s)
+{
+ return !!s->internal;
+}
+
+int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ av_frame_unref(frame);
+
+ if (av_codec_is_decoder(avctx->codec))
+ return ff_decode_receive_frame(avctx, frame);
+ return ff_encode_receive_frame(avctx, frame);
+}
diff --git a/media/ffvpx/libavcodec/avcodec.h b/media/ffvpx/libavcodec/avcodec.h
index d234271c5b..39881a1d2b 100644
--- a/media/ffvpx/libavcodec/avcodec.h
+++ b/media/ffvpx/libavcodec/avcodec.h
@@ -27,21 +27,29 @@
* Libavcodec external API header
*/
-#include <errno.h>
#include "libavutil/samplefmt.h"
#include "libavutil/attributes.h"
#include "libavutil/avutil.h"
#include "libavutil/buffer.h"
-#include "libavutil/cpu.h"
-#include "libavutil/channel_layout.h"
#include "libavutil/dict.h"
#include "libavutil/frame.h"
-#include "libavutil/hwcontext.h"
#include "libavutil/log.h"
#include "libavutil/pixfmt.h"
#include "libavutil/rational.h"
+#include "codec.h"
+#include "codec_desc.h"
+#include "codec_par.h"
+#include "codec_id.h"
+#include "defs.h"
+#include "packet.h"
+#include "version_major.h"
+#ifndef HAVE_AV_CONFIG_H
+/* When included as part of the ffmpeg build, only include the major version
+ * to avoid unnecessary rebuilds. When included externally, keep including
+ * the full version information. */
#include "version.h"
+#endif
/**
* @defgroup libavc libavcodec
@@ -92,6 +100,7 @@
* compressed data in an AVPacket.
* - For encoding, call avcodec_send_frame() to give the encoder an AVFrame
* containing uncompressed audio or video.
+ *
* In both cases, it is recommended that AVPackets and AVFrames are
* refcounted, or libavcodec might have to copy the input data. (libavformat
* always returns refcounted AVPackets, and av_frame_get_buffer() allocates
@@ -102,6 +111,7 @@
* an AVFrame containing uncompressed audio or video data.
* - For encoding, call avcodec_receive_packet(). On success, it will return
* an AVPacket with a compressed frame.
+ *
* Repeat this call until it returns AVERROR(EAGAIN) or an error. The
* AVERROR(EAGAIN) return value means that new input data is required to
* return new output. In this case, continue with sending input. For each
@@ -147,29 +157,6 @@
* at least will not fail with AVERROR(EAGAIN). In general, no codec will
* permit unlimited buffering of input or output.
*
- * This API replaces the following legacy functions:
- * - avcodec_decode_video2() and avcodec_decode_audio4():
- * Use avcodec_send_packet() to feed input to the decoder, then use
- * avcodec_receive_frame() to receive decoded frames after each packet.
- * Unlike with the old video decoding API, multiple frames might result from
- * a packet. For audio, splitting the input packet into frames by partially
- * decoding packets becomes transparent to the API user. You never need to
- * feed an AVPacket to the API twice (unless it is rejected with AVERROR(EAGAIN) - then
- * no data was read from the packet).
- * Additionally, sending a flush/draining packet is required only once.
- * - avcodec_encode_video2()/avcodec_encode_audio2():
- * Use avcodec_send_frame() to feed input to the encoder, then use
- * avcodec_receive_packet() to receive encoded packets.
- * Providing user-allocated buffers for avcodec_receive_packet() is not
- * possible.
- * - The new API does not handle subtitles yet.
- *
- * Mixing new and old function calls on the same AVCodecContext is not allowed,
- * and will result in undefined behavior.
- *
- * Some codecs might require using the new API; using the old API will return
- * an error when calling it. All codecs support the new API.
- *
* A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This
* would be an invalid state, which could put the codec user into an endless
* loop. The API has no concept of time either: it cannot happen that trying to
@@ -196,599 +183,6 @@
* @{
*/
-
-/**
- * Identify the syntax and semantics of the bitstream.
- * The principle is roughly:
- * Two decoders with the same ID can decode the same streams.
- * Two encoders with the same ID can encode compatible streams.
- * There may be slight deviations from the principle due to implementation
- * details.
- *
- * If you add a codec ID to this list, add it so that
- * 1. no value of an existing codec ID changes (that would break ABI),
- * 2. it is as close as possible to similar codecs
- *
- * After adding new codec IDs, do not forget to add an entry to the codec
- * descriptor list and bump libavcodec minor version.
- */
-enum AVCodecID {
- AV_CODEC_ID_NONE,
-
- /* video codecs */
- AV_CODEC_ID_MPEG1VIDEO,
- AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
- AV_CODEC_ID_H261,
- AV_CODEC_ID_H263,
- AV_CODEC_ID_RV10,
- AV_CODEC_ID_RV20,
- AV_CODEC_ID_MJPEG,
- AV_CODEC_ID_MJPEGB,
- AV_CODEC_ID_LJPEG,
- AV_CODEC_ID_SP5X,
- AV_CODEC_ID_JPEGLS,
- AV_CODEC_ID_MPEG4,
- AV_CODEC_ID_RAWVIDEO,
- AV_CODEC_ID_MSMPEG4V1,
- AV_CODEC_ID_MSMPEG4V2,
- AV_CODEC_ID_MSMPEG4V3,
- AV_CODEC_ID_WMV1,
- AV_CODEC_ID_WMV2,
- AV_CODEC_ID_H263P,
- AV_CODEC_ID_H263I,
- AV_CODEC_ID_FLV1,
- AV_CODEC_ID_SVQ1,
- AV_CODEC_ID_SVQ3,
- AV_CODEC_ID_DVVIDEO,
- AV_CODEC_ID_HUFFYUV,
- AV_CODEC_ID_CYUV,
- AV_CODEC_ID_H264,
- AV_CODEC_ID_INDEO3,
- AV_CODEC_ID_VP3,
- AV_CODEC_ID_THEORA,
- AV_CODEC_ID_ASV1,
- AV_CODEC_ID_ASV2,
- AV_CODEC_ID_FFV1,
- AV_CODEC_ID_4XM,
- AV_CODEC_ID_VCR1,
- AV_CODEC_ID_CLJR,
- AV_CODEC_ID_MDEC,
- AV_CODEC_ID_ROQ,
- AV_CODEC_ID_INTERPLAY_VIDEO,
- AV_CODEC_ID_XAN_WC3,
- AV_CODEC_ID_XAN_WC4,
- AV_CODEC_ID_RPZA,
- AV_CODEC_ID_CINEPAK,
- AV_CODEC_ID_WS_VQA,
- AV_CODEC_ID_MSRLE,
- AV_CODEC_ID_MSVIDEO1,
- AV_CODEC_ID_IDCIN,
- AV_CODEC_ID_8BPS,
- AV_CODEC_ID_SMC,
- AV_CODEC_ID_FLIC,
- AV_CODEC_ID_TRUEMOTION1,
- AV_CODEC_ID_VMDVIDEO,
- AV_CODEC_ID_MSZH,
- AV_CODEC_ID_ZLIB,
- AV_CODEC_ID_QTRLE,
- AV_CODEC_ID_TSCC,
- AV_CODEC_ID_ULTI,
- AV_CODEC_ID_QDRAW,
- AV_CODEC_ID_VIXL,
- AV_CODEC_ID_QPEG,
- AV_CODEC_ID_PNG,
- AV_CODEC_ID_PPM,
- AV_CODEC_ID_PBM,
- AV_CODEC_ID_PGM,
- AV_CODEC_ID_PGMYUV,
- AV_CODEC_ID_PAM,
- AV_CODEC_ID_FFVHUFF,
- AV_CODEC_ID_RV30,
- AV_CODEC_ID_RV40,
- AV_CODEC_ID_VC1,
- AV_CODEC_ID_WMV3,
- AV_CODEC_ID_LOCO,
- AV_CODEC_ID_WNV1,
- AV_CODEC_ID_AASC,
- AV_CODEC_ID_INDEO2,
- AV_CODEC_ID_FRAPS,
- AV_CODEC_ID_TRUEMOTION2,
- AV_CODEC_ID_BMP,
- AV_CODEC_ID_CSCD,
- AV_CODEC_ID_MMVIDEO,
- AV_CODEC_ID_ZMBV,
- AV_CODEC_ID_AVS,
- AV_CODEC_ID_SMACKVIDEO,
- AV_CODEC_ID_NUV,
- AV_CODEC_ID_KMVC,
- AV_CODEC_ID_FLASHSV,
- AV_CODEC_ID_CAVS,
- AV_CODEC_ID_JPEG2000,
- AV_CODEC_ID_VMNC,
- AV_CODEC_ID_VP5,
- AV_CODEC_ID_VP6,
- AV_CODEC_ID_VP6F,
- AV_CODEC_ID_TARGA,
- AV_CODEC_ID_DSICINVIDEO,
- AV_CODEC_ID_TIERTEXSEQVIDEO,
- AV_CODEC_ID_TIFF,
- AV_CODEC_ID_GIF,
- AV_CODEC_ID_DXA,
- AV_CODEC_ID_DNXHD,
- AV_CODEC_ID_THP,
- AV_CODEC_ID_SGI,
- AV_CODEC_ID_C93,
- AV_CODEC_ID_BETHSOFTVID,
- AV_CODEC_ID_PTX,
- AV_CODEC_ID_TXD,
- AV_CODEC_ID_VP6A,
- AV_CODEC_ID_AMV,
- AV_CODEC_ID_VB,
- AV_CODEC_ID_PCX,
- AV_CODEC_ID_SUNRAST,
- AV_CODEC_ID_INDEO4,
- AV_CODEC_ID_INDEO5,
- AV_CODEC_ID_MIMIC,
- AV_CODEC_ID_RL2,
- AV_CODEC_ID_ESCAPE124,
- AV_CODEC_ID_DIRAC,
- AV_CODEC_ID_BFI,
- AV_CODEC_ID_CMV,
- AV_CODEC_ID_MOTIONPIXELS,
- AV_CODEC_ID_TGV,
- AV_CODEC_ID_TGQ,
- AV_CODEC_ID_TQI,
- AV_CODEC_ID_AURA,
- AV_CODEC_ID_AURA2,
- AV_CODEC_ID_V210X,
- AV_CODEC_ID_TMV,
- AV_CODEC_ID_V210,
- AV_CODEC_ID_DPX,
- AV_CODEC_ID_MAD,
- AV_CODEC_ID_FRWU,
- AV_CODEC_ID_FLASHSV2,
- AV_CODEC_ID_CDGRAPHICS,
- AV_CODEC_ID_R210,
- AV_CODEC_ID_ANM,
- AV_CODEC_ID_BINKVIDEO,
- AV_CODEC_ID_IFF_ILBM,
-#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
- AV_CODEC_ID_KGV1,
- AV_CODEC_ID_YOP,
- AV_CODEC_ID_VP8,
- AV_CODEC_ID_PICTOR,
- AV_CODEC_ID_ANSI,
- AV_CODEC_ID_A64_MULTI,
- AV_CODEC_ID_A64_MULTI5,
- AV_CODEC_ID_R10K,
- AV_CODEC_ID_MXPEG,
- AV_CODEC_ID_LAGARITH,
- AV_CODEC_ID_PRORES,
- AV_CODEC_ID_JV,
- AV_CODEC_ID_DFA,
- AV_CODEC_ID_WMV3IMAGE,
- AV_CODEC_ID_VC1IMAGE,
- AV_CODEC_ID_UTVIDEO,
- AV_CODEC_ID_BMV_VIDEO,
- AV_CODEC_ID_VBLE,
- AV_CODEC_ID_DXTORY,
- AV_CODEC_ID_V410,
- AV_CODEC_ID_XWD,
- AV_CODEC_ID_CDXL,
- AV_CODEC_ID_XBM,
- AV_CODEC_ID_ZEROCODEC,
- AV_CODEC_ID_MSS1,
- AV_CODEC_ID_MSA1,
- AV_CODEC_ID_TSCC2,
- AV_CODEC_ID_MTS2,
- AV_CODEC_ID_CLLC,
- AV_CODEC_ID_MSS2,
- AV_CODEC_ID_VP9,
- AV_CODEC_ID_AIC,
- AV_CODEC_ID_ESCAPE130,
- AV_CODEC_ID_G2M,
- AV_CODEC_ID_WEBP,
- AV_CODEC_ID_HNM4_VIDEO,
- AV_CODEC_ID_HEVC,
-#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
- AV_CODEC_ID_FIC,
- AV_CODEC_ID_ALIAS_PIX,
- AV_CODEC_ID_BRENDER_PIX,
- AV_CODEC_ID_PAF_VIDEO,
- AV_CODEC_ID_EXR,
- AV_CODEC_ID_VP7,
- AV_CODEC_ID_SANM,
- AV_CODEC_ID_SGIRLE,
- AV_CODEC_ID_MVC1,
- AV_CODEC_ID_MVC2,
- AV_CODEC_ID_HQX,
- AV_CODEC_ID_TDSC,
- AV_CODEC_ID_HQ_HQA,
- AV_CODEC_ID_HAP,
- AV_CODEC_ID_DDS,
- AV_CODEC_ID_DXV,
- AV_CODEC_ID_SCREENPRESSO,
- AV_CODEC_ID_RSCC,
- AV_CODEC_ID_AVS2,
-
- AV_CODEC_ID_Y41P = 0x8000,
- AV_CODEC_ID_AVRP,
- AV_CODEC_ID_012V,
- AV_CODEC_ID_AVUI,
- AV_CODEC_ID_AYUV,
- AV_CODEC_ID_TARGA_Y216,
- AV_CODEC_ID_V308,
- AV_CODEC_ID_V408,
- AV_CODEC_ID_YUV4,
- AV_CODEC_ID_AVRN,
- AV_CODEC_ID_CPIA,
- AV_CODEC_ID_XFACE,
- AV_CODEC_ID_SNOW,
- AV_CODEC_ID_SMVJPEG,
- AV_CODEC_ID_APNG,
- AV_CODEC_ID_DAALA,
- AV_CODEC_ID_CFHD,
- AV_CODEC_ID_TRUEMOTION2RT,
- AV_CODEC_ID_M101,
- AV_CODEC_ID_MAGICYUV,
- AV_CODEC_ID_SHEERVIDEO,
- AV_CODEC_ID_YLC,
- AV_CODEC_ID_PSD,
- AV_CODEC_ID_PIXLET,
- AV_CODEC_ID_SPEEDHQ,
- AV_CODEC_ID_FMVC,
- AV_CODEC_ID_SCPR,
- AV_CODEC_ID_CLEARVIDEO,
- AV_CODEC_ID_XPM,
- AV_CODEC_ID_AV1,
- AV_CODEC_ID_BITPACKED,
- AV_CODEC_ID_MSCC,
- AV_CODEC_ID_SRGC,
- AV_CODEC_ID_SVG,
- AV_CODEC_ID_GDV,
- AV_CODEC_ID_FITS,
- AV_CODEC_ID_IMM4,
- AV_CODEC_ID_PROSUMER,
- AV_CODEC_ID_MWSC,
- AV_CODEC_ID_WCMV,
- AV_CODEC_ID_RASC,
- AV_CODEC_ID_HYMT,
- AV_CODEC_ID_ARBC,
- AV_CODEC_ID_AGM,
- AV_CODEC_ID_LSCR,
- AV_CODEC_ID_VP4,
-
- /* various PCM "codecs" */
- AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
- AV_CODEC_ID_PCM_S16LE = 0x10000,
- AV_CODEC_ID_PCM_S16BE,
- AV_CODEC_ID_PCM_U16LE,
- AV_CODEC_ID_PCM_U16BE,
- AV_CODEC_ID_PCM_S8,
- AV_CODEC_ID_PCM_U8,
- AV_CODEC_ID_PCM_MULAW,
- AV_CODEC_ID_PCM_ALAW,
- AV_CODEC_ID_PCM_S32LE,
- AV_CODEC_ID_PCM_S32BE,
- AV_CODEC_ID_PCM_U32LE,
- AV_CODEC_ID_PCM_U32BE,
- AV_CODEC_ID_PCM_S24LE,
- AV_CODEC_ID_PCM_S24BE,
- AV_CODEC_ID_PCM_U24LE,
- AV_CODEC_ID_PCM_U24BE,
- AV_CODEC_ID_PCM_S24DAUD,
- AV_CODEC_ID_PCM_ZORK,
- AV_CODEC_ID_PCM_S16LE_PLANAR,
- AV_CODEC_ID_PCM_DVD,
- AV_CODEC_ID_PCM_F32BE,
- AV_CODEC_ID_PCM_F32LE,
- AV_CODEC_ID_PCM_F64BE,
- AV_CODEC_ID_PCM_F64LE,
- AV_CODEC_ID_PCM_BLURAY,
- AV_CODEC_ID_PCM_LXF,
- AV_CODEC_ID_S302M,
- AV_CODEC_ID_PCM_S8_PLANAR,
- AV_CODEC_ID_PCM_S24LE_PLANAR,
- AV_CODEC_ID_PCM_S32LE_PLANAR,
- AV_CODEC_ID_PCM_S16BE_PLANAR,
-
- AV_CODEC_ID_PCM_S64LE = 0x10800,
- AV_CODEC_ID_PCM_S64BE,
- AV_CODEC_ID_PCM_F16LE,
- AV_CODEC_ID_PCM_F24LE,
- AV_CODEC_ID_PCM_VIDC,
-
- /* various ADPCM codecs */
- AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
- AV_CODEC_ID_ADPCM_IMA_WAV,
- AV_CODEC_ID_ADPCM_IMA_DK3,
- AV_CODEC_ID_ADPCM_IMA_DK4,
- AV_CODEC_ID_ADPCM_IMA_WS,
- AV_CODEC_ID_ADPCM_IMA_SMJPEG,
- AV_CODEC_ID_ADPCM_MS,
- AV_CODEC_ID_ADPCM_4XM,
- AV_CODEC_ID_ADPCM_XA,
- AV_CODEC_ID_ADPCM_ADX,
- AV_CODEC_ID_ADPCM_EA,
- AV_CODEC_ID_ADPCM_G726,
- AV_CODEC_ID_ADPCM_CT,
- AV_CODEC_ID_ADPCM_SWF,
- AV_CODEC_ID_ADPCM_YAMAHA,
- AV_CODEC_ID_ADPCM_SBPRO_4,
- AV_CODEC_ID_ADPCM_SBPRO_3,
- AV_CODEC_ID_ADPCM_SBPRO_2,
- AV_CODEC_ID_ADPCM_THP,
- AV_CODEC_ID_ADPCM_IMA_AMV,
- AV_CODEC_ID_ADPCM_EA_R1,
- AV_CODEC_ID_ADPCM_EA_R3,
- AV_CODEC_ID_ADPCM_EA_R2,
- AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
- AV_CODEC_ID_ADPCM_IMA_EA_EACS,
- AV_CODEC_ID_ADPCM_EA_XAS,
- AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
- AV_CODEC_ID_ADPCM_IMA_ISS,
- AV_CODEC_ID_ADPCM_G722,
- AV_CODEC_ID_ADPCM_IMA_APC,
- AV_CODEC_ID_ADPCM_VIMA,
-
- AV_CODEC_ID_ADPCM_AFC = 0x11800,
- AV_CODEC_ID_ADPCM_IMA_OKI,
- AV_CODEC_ID_ADPCM_DTK,
- AV_CODEC_ID_ADPCM_IMA_RAD,
- AV_CODEC_ID_ADPCM_G726LE,
- AV_CODEC_ID_ADPCM_THP_LE,
- AV_CODEC_ID_ADPCM_PSX,
- AV_CODEC_ID_ADPCM_AICA,
- AV_CODEC_ID_ADPCM_IMA_DAT4,
- AV_CODEC_ID_ADPCM_MTAF,
- AV_CODEC_ID_ADPCM_AGM,
-
- /* AMR */
- AV_CODEC_ID_AMR_NB = 0x12000,
- AV_CODEC_ID_AMR_WB,
-
- /* RealAudio codecs*/
- AV_CODEC_ID_RA_144 = 0x13000,
- AV_CODEC_ID_RA_288,
-
- /* various DPCM codecs */
- AV_CODEC_ID_ROQ_DPCM = 0x14000,
- AV_CODEC_ID_INTERPLAY_DPCM,
- AV_CODEC_ID_XAN_DPCM,
- AV_CODEC_ID_SOL_DPCM,
-
- AV_CODEC_ID_SDX2_DPCM = 0x14800,
- AV_CODEC_ID_GREMLIN_DPCM,
-
- /* audio codecs */
- AV_CODEC_ID_MP2 = 0x15000,
- AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
- AV_CODEC_ID_AAC,
- AV_CODEC_ID_AC3,
- AV_CODEC_ID_DTS,
- AV_CODEC_ID_VORBIS,
- AV_CODEC_ID_DVAUDIO,
- AV_CODEC_ID_WMAV1,
- AV_CODEC_ID_WMAV2,
- AV_CODEC_ID_MACE3,
- AV_CODEC_ID_MACE6,
- AV_CODEC_ID_VMDAUDIO,
- AV_CODEC_ID_FLAC,
- AV_CODEC_ID_MP3ADU,
- AV_CODEC_ID_MP3ON4,
- AV_CODEC_ID_SHORTEN,
- AV_CODEC_ID_ALAC,
- AV_CODEC_ID_WESTWOOD_SND1,
- AV_CODEC_ID_GSM, ///< as in Berlin toast format
- AV_CODEC_ID_QDM2,
- AV_CODEC_ID_COOK,
- AV_CODEC_ID_TRUESPEECH,
- AV_CODEC_ID_TTA,
- AV_CODEC_ID_SMACKAUDIO,
- AV_CODEC_ID_QCELP,
- AV_CODEC_ID_WAVPACK,
- AV_CODEC_ID_DSICINAUDIO,
- AV_CODEC_ID_IMC,
- AV_CODEC_ID_MUSEPACK7,
- AV_CODEC_ID_MLP,
- AV_CODEC_ID_GSM_MS, /* as found in WAV */
- AV_CODEC_ID_ATRAC3,
- AV_CODEC_ID_APE,
- AV_CODEC_ID_NELLYMOSER,
- AV_CODEC_ID_MUSEPACK8,
- AV_CODEC_ID_SPEEX,
- AV_CODEC_ID_WMAVOICE,
- AV_CODEC_ID_WMAPRO,
- AV_CODEC_ID_WMALOSSLESS,
- AV_CODEC_ID_ATRAC3P,
- AV_CODEC_ID_EAC3,
- AV_CODEC_ID_SIPR,
- AV_CODEC_ID_MP1,
- AV_CODEC_ID_TWINVQ,
- AV_CODEC_ID_TRUEHD,
- AV_CODEC_ID_MP4ALS,
- AV_CODEC_ID_ATRAC1,
- AV_CODEC_ID_BINKAUDIO_RDFT,
- AV_CODEC_ID_BINKAUDIO_DCT,
- AV_CODEC_ID_AAC_LATM,
- AV_CODEC_ID_QDMC,
- AV_CODEC_ID_CELT,
- AV_CODEC_ID_G723_1,
- AV_CODEC_ID_G729,
- AV_CODEC_ID_8SVX_EXP,
- AV_CODEC_ID_8SVX_FIB,
- AV_CODEC_ID_BMV_AUDIO,
- AV_CODEC_ID_RALF,
- AV_CODEC_ID_IAC,
- AV_CODEC_ID_ILBC,
- AV_CODEC_ID_OPUS,
- AV_CODEC_ID_COMFORT_NOISE,
- AV_CODEC_ID_TAK,
- AV_CODEC_ID_METASOUND,
- AV_CODEC_ID_PAF_AUDIO,
- AV_CODEC_ID_ON2AVC,
- AV_CODEC_ID_DSS_SP,
- AV_CODEC_ID_CODEC2,
-
- AV_CODEC_ID_FFWAVESYNTH = 0x15800,
- AV_CODEC_ID_SONIC,
- AV_CODEC_ID_SONIC_LS,
- AV_CODEC_ID_EVRC,
- AV_CODEC_ID_SMV,
- AV_CODEC_ID_DSD_LSBF,
- AV_CODEC_ID_DSD_MSBF,
- AV_CODEC_ID_DSD_LSBF_PLANAR,
- AV_CODEC_ID_DSD_MSBF_PLANAR,
- AV_CODEC_ID_4GV,
- AV_CODEC_ID_INTERPLAY_ACM,
- AV_CODEC_ID_XMA1,
- AV_CODEC_ID_XMA2,
- AV_CODEC_ID_DST,
- AV_CODEC_ID_ATRAC3AL,
- AV_CODEC_ID_ATRAC3PAL,
- AV_CODEC_ID_DOLBY_E,
- AV_CODEC_ID_APTX,
- AV_CODEC_ID_APTX_HD,
- AV_CODEC_ID_SBC,
- AV_CODEC_ID_ATRAC9,
- AV_CODEC_ID_HCOM,
-
- /* subtitle codecs */
- AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
- AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
- AV_CODEC_ID_DVB_SUBTITLE,
- AV_CODEC_ID_TEXT, ///< raw UTF-8 text
- AV_CODEC_ID_XSUB,
- AV_CODEC_ID_SSA,
- AV_CODEC_ID_MOV_TEXT,
- AV_CODEC_ID_HDMV_PGS_SUBTITLE,
- AV_CODEC_ID_DVB_TELETEXT,
- AV_CODEC_ID_SRT,
-
- AV_CODEC_ID_MICRODVD = 0x17800,
- AV_CODEC_ID_EIA_608,
- AV_CODEC_ID_JACOSUB,
- AV_CODEC_ID_SAMI,
- AV_CODEC_ID_REALTEXT,
- AV_CODEC_ID_STL,
- AV_CODEC_ID_SUBVIEWER1,
- AV_CODEC_ID_SUBVIEWER,
- AV_CODEC_ID_SUBRIP,
- AV_CODEC_ID_WEBVTT,
- AV_CODEC_ID_MPL2,
- AV_CODEC_ID_VPLAYER,
- AV_CODEC_ID_PJS,
- AV_CODEC_ID_ASS,
- AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
- AV_CODEC_ID_TTML,
- AV_CODEC_ID_ARIB_CAPTION,
-
- /* other specific kind of codecs (generally used for attachments) */
- AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
- AV_CODEC_ID_TTF = 0x18000,
-
- AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
- AV_CODEC_ID_BINTEXT = 0x18800,
- AV_CODEC_ID_XBIN,
- AV_CODEC_ID_IDF,
- AV_CODEC_ID_OTF,
- AV_CODEC_ID_SMPTE_KLV,
- AV_CODEC_ID_DVD_NAV,
- AV_CODEC_ID_TIMED_ID3,
- AV_CODEC_ID_BIN_DATA,
-
-
- AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
-
- AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
- * stream (only used by libavformat) */
- AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
- * stream (only used by libavformat) */
- AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
- AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
-};
-
-/**
- * This struct describes the properties of a single codec described by an
- * AVCodecID.
- * @see avcodec_descriptor_get()
- */
-typedef struct AVCodecDescriptor {
- enum AVCodecID id;
- enum AVMediaType type;
- /**
- * Name of the codec described by this descriptor. It is non-empty and
- * unique for each codec descriptor. It should contain alphanumeric
- * characters and '_' only.
- */
- const char *name;
- /**
- * A more descriptive name for this codec. May be NULL.
- */
- const char *long_name;
- /**
- * Codec properties, a combination of AV_CODEC_PROP_* flags.
- */
- int props;
- /**
- * MIME type(s) associated with the codec.
- * May be NULL; if not, a NULL-terminated array of MIME types.
- * The first item is always non-NULL and is the preferred MIME type.
- */
- const char *const *mime_types;
- /**
- * If non-NULL, an array of profiles recognized for this codec.
- * Terminated with FF_PROFILE_UNKNOWN.
- */
- const struct AVProfile *profiles;
-} AVCodecDescriptor;
-
-/**
- * Codec uses only intra compression.
- * Video and audio codecs only.
- */
-#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
-/**
- * Codec supports lossy compression. Audio and video codecs only.
- * @note a codec may support both lossy and lossless
- * compression modes
- */
-#define AV_CODEC_PROP_LOSSY (1 << 1)
-/**
- * Codec supports lossless compression. Audio and video codecs only.
- */
-#define AV_CODEC_PROP_LOSSLESS (1 << 2)
-/**
- * Codec supports frame reordering. That is, the coded order (the order in which
- * the encoded packets are output by the encoders / stored / input to the
- * decoders) may be different from the presentation order of the corresponding
- * frames.
- *
- * For codecs that do not have this property set, PTS and DTS should always be
- * equal.
- */
-#define AV_CODEC_PROP_REORDER (1 << 3)
-/**
- * Subtitle codec is bitmap based
- * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
- */
-#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
-/**
- * Subtitle codec is text based.
- * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
- */
-#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
-
-/**
- * @ingroup lavc_decoding
- * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
- * This is mainly needed because some optimized bitstream readers read
- * 32 or 64 bit at once and could read over the end.<br>
- * Note: If the first 23 bits of the additional bytes are not 0, then damaged
- * MPEG bitstreams could cause overread and segfault.
- */
-#define AV_INPUT_BUFFER_PADDING_SIZE 64
-
/**
* @ingroup lavc_encoding
* minimum encoding buffer size
@@ -797,34 +191,6 @@ typedef struct AVCodecDescriptor {
#define AV_INPUT_BUFFER_MIN_SIZE 16384
/**
- * @ingroup lavc_decoding
- */
-enum AVDiscard{
- /* We leave some space between them for extensions (drop some
- * keyframes for intra-only or drop just some bidir frames). */
- AVDISCARD_NONE =-16, ///< discard nothing
- AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
- AVDISCARD_NONREF = 8, ///< discard all non reference
- AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
- AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
- AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
- AVDISCARD_ALL = 48, ///< discard all
-};
-
-enum AVAudioServiceType {
- AV_AUDIO_SERVICE_TYPE_MAIN = 0,
- AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
- AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
- AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
- AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
- AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
- AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
- AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
- AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
- AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
-};
-
-/**
* @ingroup lavc_encoding
*/
typedef struct RcOverride{
@@ -866,6 +232,58 @@ typedef struct RcOverride{
*/
#define AV_CODEC_FLAG_DROPCHANGED (1 << 5)
/**
+ * Request the encoder to output reconstructed frames, i.e.\ frames that would
+ * be produced by decoding the encoded bistream. These frames may be retrieved
+ * by calling avcodec_receive_frame() immediately after a successful call to
+ * avcodec_receive_packet().
+ *
+ * Should only be used with encoders flagged with the
+ * @ref AV_CODEC_CAP_ENCODER_RECON_FRAME capability.
+ */
+#define AV_CODEC_FLAG_RECON_FRAME (1 << 6)
+/**
+ * @par decoding
+ * Request the decoder to propagate each packets AVPacket.opaque and
+ * AVPacket.opaque_ref to its corresponding output AVFrame.
+ *
+ * @par encoding:
+ * Request the encoder to propagate each frame's AVFrame.opaque and
+ * AVFrame.opaque_ref values to its corresponding output AVPacket.
+ *
+ * @par
+ * May only be set on encoders that have the
+ * @ref AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability flag.
+ *
+ * @note
+ * While in typical cases one input frame produces exactly one output packet
+ * (perhaps after a delay), in general the mapping of frames to packets is
+ * M-to-N, so
+ * - Any number of input frames may be associated with any given output packet.
+ * This includes zero - e.g. some encoders may output packets that carry only
+ * metadata about the whole stream.
+ * - A given input frame may be associated with any number of output packets.
+ * Again this includes zero - e.g. some encoders may drop frames under certain
+ * conditions.
+ * .
+ * This implies that when using this flag, the caller must NOT assume that
+ * - a given input frame's opaques will necessarily appear on some output packet;
+ * - every output packet will have some non-NULL opaque value.
+ * .
+ * When an output packet contains multiple frames, the opaque values will be
+ * taken from the first of those.
+ *
+ * @note
+ * The converse holds for decoders, with frames and packets switched.
+ */
+#define AV_CODEC_FLAG_COPY_OPAQUE (1 << 7)
+/**
+ * Signal to the encoder that the values of AVFrame.duration are valid and
+ * should be used (typically for transferring them to output packets).
+ *
+ * If this flag is not set, frame durations are ignored.
+ */
+#define AV_CODEC_FLAG_FRAME_DURATION (1 << 8)
+/**
* Use internal 2pass ratecontrol in first pass mode.
*/
#define AV_CODEC_FLAG_PASS1 (1 << 9)
@@ -886,11 +304,6 @@ typedef struct RcOverride{
*/
#define AV_CODEC_FLAG_PSNR (1 << 15)
/**
- * Input bitstream might be truncated at a random location
- * instead of only at frame boundaries.
- */
-#define AV_CODEC_FLAG_TRUNCATED (1 << 16)
-/**
* Use interlaced DCT.
*/
#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
@@ -931,11 +344,6 @@ typedef struct RcOverride{
#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
/**
- * timecode is in drop frame format. DEPRECATED!!!!
- */
-#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13)
-
-/**
* Input bitstream might be truncated at a packet boundaries
* instead of only at frame boundaries.
*/
@@ -961,214 +369,34 @@ typedef struct RcOverride{
* Do not reset ASS ReadOrder field on flush (subtitles decoding)
*/
#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30)
-
-/* Unsupported options :
- * Syntax Arithmetic coding (SAC)
- * Reference Picture Selection
- * Independent Segment Decoding */
-/* /Fx */
-/* codec capabilities */
-
-/**
- * Decoder can use draw_horiz_band callback.
- */
-#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
/**
- * Codec uses get_buffer() for allocating buffers and supports custom allocators.
- * If not set, it might not use get_buffer() at all or use operations that
- * assume the buffer was allocated by avcodec_default_get_buffer.
+ * Generate/parse ICC profiles on encode/decode, as appropriate for the type of
+ * file. No effect on codecs which cannot contain embedded ICC profiles, or
+ * when compiled without support for lcms2.
*/
-#define AV_CODEC_CAP_DR1 (1 << 1)
-#define AV_CODEC_CAP_TRUNCATED (1 << 3)
-/**
- * Encoder or decoder requires flushing with NULL input at the end in order to
- * give the complete and correct output.
- *
- * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
- * with NULL data. The user can still send NULL data to the public encode
- * or decode function, but libavcodec will not pass it along to the codec
- * unless this flag is set.
- *
- * Decoders:
- * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
- * avpkt->size=0 at the end to get the delayed data until the decoder no longer
- * returns frames.
- *
- * Encoders:
- * The encoder needs to be fed with NULL data at the end of encoding until the
- * encoder no longer returns data.
- *
- * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
- * flag also means that the encoder must set the pts and duration for
- * each output packet. If this flag is not set, the pts and duration will
- * be determined by libavcodec from the input frame.
- */
-#define AV_CODEC_CAP_DELAY (1 << 5)
-/**
- * Codec can be fed a final frame with a smaller size.
- * This can be used to prevent truncation of the last audio samples.
- */
-#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+#define AV_CODEC_FLAG2_ICC_PROFILES (1U << 31)
+/* Exported side data.
+ These flags can be passed in AVCodecContext.export_side_data before initialization.
+*/
/**
- * Codec can output multiple frames per AVPacket
- * Normally demuxers return one frame at a time, demuxers which do not do
- * are connected to a parser to split what they return into proper frames.
- * This flag is reserved to the very rare category of codecs which have a
- * bitstream that cannot be split into frames without timeconsuming
- * operations like full decoding. Demuxers carrying such bitstreams thus
- * may return multiple frames in a packet. This has many disadvantages like
- * prohibiting stream copy in many cases thus it should only be considered
- * as a last resort.
- */
-#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
-/**
- * Codec is experimental and is thus avoided in favor of non experimental
- * encoders
- */
-#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
-/**
- * Codec should fill in channel configuration and samplerate instead of container
- */
-#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
-/**
- * Codec supports frame-level multithreading.
- */
-#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
-/**
- * Codec supports slice-based (or partition-based) multithreading.
- */
-#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
-/**
- * Codec supports changed parameters at any point.
- */
-#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
-/**
- * Codec supports avctx->thread_count == 0 (auto).
- */
-#define AV_CODEC_CAP_AUTO_THREADS (1 << 15)
-/**
- * Audio encoder supports receiving a different number of samples in each call.
- */
-#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
-/**
- * Decoder is not a preferred choice for probing.
- * This indicates that the decoder is not a good choice for probing.
- * It could for example be an expensive to spin up hardware decoder,
- * or it could simply not provide a lot of useful information about
- * the stream.
- * A decoder marked with this flag should only be used as last resort
- * choice for probing.
- */
-#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
-/**
- * Codec is intra only.
- */
-#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
-/**
- * Codec is lossless.
+ * Export motion vectors through frame side data
*/
-#define AV_CODEC_CAP_LOSSLESS 0x80000000
-
+#define AV_CODEC_EXPORT_DATA_MVS (1 << 0)
/**
- * Codec is backed by a hardware implementation. Typically used to
- * identify a non-hwaccel hardware decoder. For information about hwaccels, use
- * avcodec_get_hw_config() instead.
+ * Export encoder Producer Reference Time through packet side data
*/
-#define AV_CODEC_CAP_HARDWARE (1 << 18)
-
+#define AV_CODEC_EXPORT_DATA_PRFT (1 << 1)
/**
- * Codec is potentially backed by a hardware implementation, but not
- * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
- * implementation provides some sort of internal fallback.
+ * Decoding only.
+ * Export the AVVideoEncParams structure through frame side data.
*/
-#define AV_CODEC_CAP_HYBRID (1 << 19)
-
+#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS (1 << 2)
/**
- * This codec takes the reordered_opaque field from input AVFrames
- * and returns it in the corresponding field in AVCodecContext after
- * encoding.
+ * Decoding only.
+ * Do not apply film grain, export it instead.
*/
-#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
-
-/**
- * Pan Scan area.
- * This specifies the area which should be displayed.
- * Note there may be multiple such areas for one frame.
- */
-typedef struct AVPanScan {
- /**
- * id
- * - encoding: Set by user.
- * - decoding: Set by libavcodec.
- */
- int id;
-
- /**
- * width and height in 1/16 pel
- * - encoding: Set by user.
- * - decoding: Set by libavcodec.
- */
- int width;
- int height;
-
- /**
- * position of the top left corner in 1/16 pel for up to 3 fields/frames
- * - encoding: Set by user.
- * - decoding: Set by libavcodec.
- */
- int16_t position[3][2];
-} AVPanScan;
-
-/**
- * This structure describes the bitrate properties of an encoded bitstream. It
- * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
- * parameters for H.264/HEVC.
- */
-typedef struct AVCPBProperties {
- /**
- * Maximum bitrate of the stream, in bits per second.
- * Zero if unknown or unspecified.
- */
-#if FF_API_UNSANITIZED_BITRATES
- int max_bitrate;
-#else
- int64_t max_bitrate;
-#endif
- /**
- * Minimum bitrate of the stream, in bits per second.
- * Zero if unknown or unspecified.
- */
-#if FF_API_UNSANITIZED_BITRATES
- int min_bitrate;
-#else
- int64_t min_bitrate;
-#endif
- /**
- * Average bitrate of the stream, in bits per second.
- * Zero if unknown or unspecified.
- */
-#if FF_API_UNSANITIZED_BITRATES
- int avg_bitrate;
-#else
- int64_t avg_bitrate;
-#endif
-
- /**
- * The size of the buffer to which the ratecontrol is applied, in bits.
- * Zero if unknown or unspecified.
- */
- int buffer_size;
-
- /**
- * The delay between the time the packet this structure is associated with
- * is received and the time when it should be decoded, in periods of a 27MHz
- * clock.
- *
- * UINT64_MAX when unknown or unspecified.
- */
- uint64_t vbv_delay;
-} AVCPBProperties;
+#define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
/**
* The decoder will keep a reference to the frame and may reuse it later.
@@ -1176,379 +404,12 @@ typedef struct AVCPBProperties {
#define AV_GET_BUFFER_FLAG_REF (1 << 0)
/**
- * @defgroup lavc_packet AVPacket
- *
- * Types and functions for working with AVPacket.
- * @{
- */
-enum AVPacketSideDataType {
- /**
- * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE
- * bytes worth of palette. This side data signals that a new palette is
- * present.
- */
- AV_PKT_DATA_PALETTE,
-
- /**
- * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
- * that the extradata buffer was changed and the receiving side should
- * act upon it appropriately. The new extradata is embedded in the side
- * data buffer and should be immediately used for processing the current
- * frame or packet.
- */
- AV_PKT_DATA_NEW_EXTRADATA,
-
- /**
- * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
- * @code
- * u32le param_flags
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
- * s32le channel_count
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
- * u64le channel_layout
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
- * s32le sample_rate
- * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
- * s32le width
- * s32le height
- * @endcode
- */
- AV_PKT_DATA_PARAM_CHANGE,
-
- /**
- * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
- * structures with info about macroblocks relevant to splitting the
- * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
- * That is, it does not necessarily contain info about all macroblocks,
- * as long as the distance between macroblocks in the info is smaller
- * than the target payload size.
- * Each MB info structure is 12 bytes, and is laid out as follows:
- * @code
- * u32le bit offset from the start of the packet
- * u8 current quantizer at the start of the macroblock
- * u8 GOB number
- * u16le macroblock address within the GOB
- * u8 horizontal MV predictor
- * u8 vertical MV predictor
- * u8 horizontal MV predictor for block number 3
- * u8 vertical MV predictor for block number 3
- * @endcode
- */
- AV_PKT_DATA_H263_MB_INFO,
-
- /**
- * This side data should be associated with an audio stream and contains
- * ReplayGain information in form of the AVReplayGain struct.
- */
- AV_PKT_DATA_REPLAYGAIN,
-
- /**
- * This side data contains a 3x3 transformation matrix describing an affine
- * transformation that needs to be applied to the decoded video frames for
- * correct presentation.
- *
- * See libavutil/display.h for a detailed description of the data.
- */
- AV_PKT_DATA_DISPLAYMATRIX,
-
- /**
- * This side data should be associated with a video stream and contains
- * Stereoscopic 3D information in form of the AVStereo3D struct.
- */
- AV_PKT_DATA_STEREO3D,
-
- /**
- * This side data should be associated with an audio stream and corresponds
- * to enum AVAudioServiceType.
- */
- AV_PKT_DATA_AUDIO_SERVICE_TYPE,
-
- /**
- * This side data contains quality related information from the encoder.
- * @code
- * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad).
- * u8 picture type
- * u8 error count
- * u16 reserved
- * u64le[error count] sum of squared differences between encoder in and output
- * @endcode
- */
- AV_PKT_DATA_QUALITY_STATS,
-
- /**
- * This side data contains an integer value representing the stream index
- * of a "fallback" track. A fallback track indicates an alternate
- * track to use when the current track can not be decoded for some reason.
- * e.g. no decoder available for codec.
- */
- AV_PKT_DATA_FALLBACK_TRACK,
-
- /**
- * This side data corresponds to the AVCPBProperties struct.
- */
- AV_PKT_DATA_CPB_PROPERTIES,
-
- /**
- * Recommmends skipping the specified number of samples
- * @code
- * u32le number of samples to skip from start of this packet
- * u32le number of samples to skip from end of this packet
- * u8 reason for start skip
- * u8 reason for end skip (0=padding silence, 1=convergence)
- * @endcode
- */
- AV_PKT_DATA_SKIP_SAMPLES,
-
- /**
- * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
- * the packet may contain "dual mono" audio specific to Japanese DTV
- * and if it is true, recommends only the selected channel to be used.
- * @code
- * u8 selected channels (0=mail/left, 1=sub/right, 2=both)
- * @endcode
- */
- AV_PKT_DATA_JP_DUALMONO,
-
- /**
- * A list of zero terminated key/value strings. There is no end marker for
- * the list, so it is required to rely on the side data size to stop.
- */
- AV_PKT_DATA_STRINGS_METADATA,
-
- /**
- * Subtitle event position
- * @code
- * u32le x1
- * u32le y1
- * u32le x2
- * u32le y2
- * @endcode
- */
- AV_PKT_DATA_SUBTITLE_POSITION,
-
- /**
- * Data found in BlockAdditional element of matroska container. There is
- * no end marker for the data, so it is required to rely on the side data
- * size to recognize the end. 8 byte id (as found in BlockAddId) followed
- * by data.
- */
- AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
-
- /**
- * The optional first identifier line of a WebVTT cue.
- */
- AV_PKT_DATA_WEBVTT_IDENTIFIER,
-
- /**
- * The optional settings (rendering instructions) that immediately
- * follow the timestamp specifier of a WebVTT cue.
- */
- AV_PKT_DATA_WEBVTT_SETTINGS,
-
- /**
- * A list of zero terminated key/value strings. There is no end marker for
- * the list, so it is required to rely on the side data size to stop. This
- * side data includes updated metadata which appeared in the stream.
- */
- AV_PKT_DATA_METADATA_UPDATE,
-
- /**
- * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
- * information from the demuxer to the corresponding muxer.
- */
- AV_PKT_DATA_MPEGTS_STREAM_ID,
-
- /**
- * Mastering display metadata (based on SMPTE-2086:2014). This metadata
- * should be associated with a video stream and contains data in the form
- * of the AVMasteringDisplayMetadata struct.
- */
- AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
-
- /**
- * This side data should be associated with a video stream and corresponds
- * to the AVSphericalMapping structure.
- */
- AV_PKT_DATA_SPHERICAL,
-
- /**
- * Content light level (based on CTA-861.3). This metadata should be
- * associated with a video stream and contains data in the form of the
- * AVContentLightMetadata struct.
- */
- AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
-
- /**
- * ATSC A53 Part 4 Closed Captions. This metadata should be associated with
- * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data.
- * The number of bytes of CC data is AVPacketSideData.size.
- */
- AV_PKT_DATA_A53_CC,
-
- /**
- * This side data is encryption initialization data.
- * The format is not part of ABI, use av_encryption_init_info_* methods to
- * access.
- */
- AV_PKT_DATA_ENCRYPTION_INIT_INFO,
-
- /**
- * This side data contains encryption info for how to decrypt the packet.
- * The format is not part of ABI, use av_encryption_info_* methods to access.
- */
- AV_PKT_DATA_ENCRYPTION_INFO,
-
- /**
- * Active Format Description data consisting of a single byte as specified
- * in ETSI TS 101 154 using AVActiveFormatDescription enum.
- */
- AV_PKT_DATA_AFD,
-
- /**
- * The number of side data types.
- * This is not part of the public API/ABI in the sense that it may
- * change when new side data types are added.
- * This must stay the last enum value.
- * If its value becomes huge, some code using it
- * needs to be updated as it assumes it to be smaller than other limits.
- */
- AV_PKT_DATA_NB
-};
-
-#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED
-
-typedef struct AVPacketSideData {
- uint8_t *data;
- int size;
- enum AVPacketSideDataType type;
-} AVPacketSideData;
-
-/**
- * This structure stores compressed data. It is typically exported by demuxers
- * and then passed as input to decoders, or received as output from encoders and
- * then passed to muxers.
- *
- * For video, it should typically contain one compressed frame. For audio it may
- * contain several compressed frames. Encoders are allowed to output empty
- * packets, with no compressed data, containing only side data
- * (e.g. to update some stream parameters at the end of encoding).
- *
- * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
- * ABI. Thus it may be allocated on stack and no new fields can be added to it
- * without libavcodec and libavformat major bump.
- *
- * The semantics of data ownership depends on the buf field.
- * If it is set, the packet data is dynamically allocated and is
- * valid indefinitely until a call to av_packet_unref() reduces the
- * reference count to 0.
- *
- * If the buf field is not set av_packet_ref() would make a copy instead
- * of increasing the reference count.
- *
- * The side data is always allocated with av_malloc(), copied by
- * av_packet_ref() and freed by av_packet_unref().
- *
- * @see av_packet_ref
- * @see av_packet_unref
- */
-typedef struct AVPacket {
- /**
- * A reference to the reference-counted buffer where the packet data is
- * stored.
- * May be NULL, then the packet data is not reference-counted.
- */
- AVBufferRef *buf;
- /**
- * Presentation timestamp in AVStream->time_base units; the time at which
- * the decompressed packet will be presented to the user.
- * Can be AV_NOPTS_VALUE if it is not stored in the file.
- * pts MUST be larger or equal to dts as presentation cannot happen before
- * decompression, unless one wants to view hex dumps. Some formats misuse
- * the terms dts and pts/cts to mean something different. Such timestamps
- * must be converted to true pts/dts before they are stored in AVPacket.
- */
- int64_t pts;
- /**
- * Decompression timestamp in AVStream->time_base units; the time at which
- * the packet is decompressed.
- * Can be AV_NOPTS_VALUE if it is not stored in the file.
- */
- int64_t dts;
- uint8_t *data;
- int size;
- int stream_index;
- /**
- * A combination of AV_PKT_FLAG values
- */
- int flags;
- /**
- * Additional packet data that can be provided by the container.
- * Packet can contain several types of side information.
- */
- AVPacketSideData *side_data;
- int side_data_elems;
-
- /**
- * Duration of this packet in AVStream->time_base units, 0 if unknown.
- * Equals next_pts - this_pts in presentation order.
- */
- int64_t duration;
-
- int64_t pos; ///< byte position in stream, -1 if unknown
-
-#if FF_API_CONVERGENCE_DURATION
- /**
- * @deprecated Same as the duration field, but as int64_t. This was required
- * for Matroska subtitles, whose duration values could overflow when the
- * duration field was still an int.
- */
- attribute_deprecated
- int64_t convergence_duration;
-#endif
-} AVPacket;
-#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
-#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
-/**
- * Flag is used to discard packets which are required to maintain valid
- * decoder state but are not required for output and should be dropped
- * after decoding.
- **/
-#define AV_PKT_FLAG_DISCARD 0x0004
-/**
- * The packet comes from a trusted source.
- *
- * Otherwise-unsafe constructs such as arbitrary pointers to data
- * outside the packet may be followed.
- */
-#define AV_PKT_FLAG_TRUSTED 0x0008
-/**
- * Flag is used to indicate packets that contain frames that can
- * be discarded by the decoder. I.e. Non-reference frames.
- */
-#define AV_PKT_FLAG_DISPOSABLE 0x0010
-
-
-enum AVSideDataParamChangeFlags {
- AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
- AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
- AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
- AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
-};
-/**
- * @}
+ * The encoder will keep a reference to the packet and may reuse it later.
*/
+#define AV_GET_ENCODE_BUFFER_FLAG_REF (1 << 0)
struct AVCodecInternal;
-enum AVFieldOrder {
- AV_FIELD_UNKNOWN,
- AV_FIELD_PROGRESSIVE,
- AV_FIELD_TT, //< Top coded_first, top displayed first
- AV_FIELD_BB, //< Bottom coded first, bottom displayed first
- AV_FIELD_TB, //< Top coded first, bottom displayed first
- AV_FIELD_BT, //< Bottom coded first, top displayed first
-};
-
/**
* main external API structure.
* New fields can be added to the end with minor version bumps.
@@ -1682,8 +543,7 @@ typedef struct AVCodecContext {
* (fixed_vop_rate == 0 implies that it is different from the framerate)
*
* - encoding: MUST be set by user.
- * - decoding: the use of this field for decoding is deprecated.
- * Use framerate instead.
+ * - decoding: unused.
*/
AVRational time_base;
@@ -1726,7 +586,7 @@ typedef struct AVCodecContext {
* picture width / height.
*
* @note Those fields may not match the values of the last
- * AVFrame output by avcodec_decode_video2 due frame
+ * AVFrame output by avcodec_receive_frame() due frame
* reordering.
*
* - encoding: MUST be set by user.
@@ -1802,17 +662,29 @@ typedef struct AVCodecContext {
int y, int type, int height);
/**
- * callback to negotiate the pixelFormat
- * @param fmt is the list of formats which are supported by the codec,
- * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
- * The first is always the native one.
- * @note The callback may be called again immediately if initialization for
- * the selected (hardware-accelerated) pixel format failed.
- * @warning Behavior is undefined if the callback returns a value not
- * in the fmt list of formats.
- * @return the chosen format
- * - encoding: unused
- * - decoding: Set by user, if not set the native format will be chosen.
+ * Callback to negotiate the pixel format. Decoding only, may be set by the
+ * caller before avcodec_open2().
+ *
+ * Called by some decoders to select the pixel format that will be used for
+ * the output frames. This is mainly used to set up hardware acceleration,
+ * then the provided format list contains the corresponding hwaccel pixel
+ * formats alongside the "software" one. The software pixel format may also
+ * be retrieved from \ref sw_pix_fmt.
+ *
+ * This callback will be called when the coded frame properties (such as
+ * resolution, pixel format, etc.) change and more than one output format is
+ * supported for those new properties. If a hardware pixel format is chosen
+ * and initialization for it fails, the callback may be called again
+ * immediately.
+ *
+ * This callback may be called from different threads if the decoder is
+ * multi-threaded, but not from more than one thread simultaneously.
+ *
+ * @param fmt list of formats which may be used in the current
+ * configuration, terminated by AV_PIX_FMT_NONE.
+ * @warning Behavior is undefined if the callback returns a value other
+ * than one of the formats in fmt or AV_PIX_FMT_NONE.
+ * @return the chosen format or AV_PIX_FMT_NONE
*/
enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
@@ -1833,12 +705,6 @@ typedef struct AVCodecContext {
*/
float b_quant_factor;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int b_frame_strategy;
-#endif
-
/**
* qscale offset between IP and B-frames
* - encoding: Set by user.
@@ -1854,12 +720,6 @@ typedef struct AVCodecContext {
*/
int has_b_frames;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int mpeg_quant;
-#endif
-
/**
* qscale factor between P- and I-frames
* If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset).
@@ -1918,15 +778,6 @@ typedef struct AVCodecContext {
*/
int slice_count;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int prediction_method;
-#define FF_PRED_LEFT 0
-#define FF_PRED_PLANE 1
-#define FF_PRED_MEDIAN 2
-#endif
-
/**
* slice offsets in the frame in bytes
* - encoding: Set/allocated by libavcodec.
@@ -1999,12 +850,6 @@ typedef struct AVCodecContext {
*/
int last_predictor_count;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int pre_me;
-#endif
-
/**
* motion estimation prepass comparison function
* - encoding: Set by user.
@@ -2073,16 +918,6 @@ typedef struct AVCodecContext {
*/
uint16_t *inter_matrix;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int scenechange_threshold;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int noise_reduction;
-#endif
-
/**
* precision of the intra DC coefficient - 8
* - encoding: Set by user.
@@ -2118,26 +953,12 @@ typedef struct AVCodecContext {
*/
int mb_lmax;
-#if FF_API_PRIVATE_OPT
- /**
- * @deprecated use encoder private options instead
- */
- attribute_deprecated
- int me_penalty_compensation;
-#endif
-
/**
* - encoding: Set by user.
* - decoding: unused
*/
int bidir_refine;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int brd_scale;
-#endif
-
/**
* minimum GOP size
* - encoding: Set by user.
@@ -2152,12 +973,6 @@ typedef struct AVCodecContext {
*/
int refs;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int chromaoffset;
-#endif
-
/**
* Note: Value depends upon the compare function used for fullpel ME.
* - encoding: Set by user.
@@ -2165,12 +980,6 @@ typedef struct AVCodecContext {
*/
int mv0_threshold;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int b_sensitivity;
-#endif
-
/**
* Chromaticity coordinates of the source primaries.
* - encoding: Set by user
@@ -2223,7 +1032,15 @@ typedef struct AVCodecContext {
/* audio only */
int sample_rate; ///< samples per second
- int channels; ///< number of audio channels
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * number of audio channels
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated
+ int channels;
+#endif
/**
* audio sample format
@@ -2244,6 +1061,7 @@ typedef struct AVCodecContext {
*/
int frame_size;
+#if FF_API_AVCTX_FRAME_NUMBER
/**
* Frame counter, set by libavcodec.
*
@@ -2252,8 +1070,11 @@ typedef struct AVCodecContext {
*
* @note the counter is not incremented if encoding/decoding resulted in
* an error.
+ * @deprecated use frame_num instead
*/
+ attribute_deprecated
int frame_number;
+#endif
/**
* number of bytes per packet if constant and known or 0
@@ -2268,19 +1089,25 @@ typedef struct AVCodecContext {
*/
int cutoff;
+#if FF_API_OLD_CHANNEL_LAYOUT
/**
* Audio channel layout.
* - encoding: set by user.
* - decoding: set by user, may be overwritten by libavcodec.
+ * @deprecated use ch_layout
*/
+ attribute_deprecated
uint64_t channel_layout;
/**
* Request decoder to use this channel layout if it can (0 for default)
* - encoding: unused
* - decoding: Set by user.
+ * @deprecated use "downmix" codec private option
*/
+ attribute_deprecated
uint64_t request_channel_layout;
+#endif
/**
* Type of service that the audio stream conveys.
@@ -2353,9 +1180,9 @@ typedef struct AVCodecContext {
*
* Some decoders do not support linesizes changing between frames.
*
- * If frame multithreading is used and thread_safe_callbacks is set,
- * this callback may be called from a different thread, but not from more
- * than one at once. Does not need to be reentrant.
+ * If frame multithreading is used, this callback may be called from a
+ * different thread, but not from more than one at once. Does not need to be
+ * reentrant.
*
* @see avcodec_align_dimensions2()
*
@@ -2379,22 +1206,6 @@ typedef struct AVCodecContext {
*/
int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
- /**
- * If non-zero, the decoded audio and video frames returned from
- * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
- * and are valid indefinitely. The caller must free them with
- * av_frame_unref() when they are not needed anymore.
- * Otherwise, the decoded frames must not be freed by the caller and are
- * only valid until the next decode call.
- *
- * This is always automatically enabled if avcodec_receive_frame() is used.
- *
- * - encoding: unused
- * - decoding: set by the caller before avcodec_open2().
- */
- attribute_deprecated
- int refcounted_frames;
-
/* - encoding parameters */
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
@@ -2470,42 +1281,6 @@ typedef struct AVCodecContext {
*/
int rc_initial_buffer_occupancy;
-#if FF_API_CODER_TYPE
-#define FF_CODER_TYPE_VLC 0
-#define FF_CODER_TYPE_AC 1
-#define FF_CODER_TYPE_RAW 2
-#define FF_CODER_TYPE_RLE 3
- /**
- * @deprecated use encoder private options instead
- */
- attribute_deprecated
- int coder_type;
-#endif /* FF_API_CODER_TYPE */
-
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int context_model;
-#endif
-
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int frame_skip_threshold;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int frame_skip_factor;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int frame_skip_exp;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int frame_skip_cmp;
-#endif /* FF_API_PRIVATE_OPT */
-
/**
* trellis RD quantization
* - encoding: Set by user.
@@ -2513,69 +1288,6 @@ typedef struct AVCodecContext {
*/
int trellis;
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int min_prediction_order;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int max_prediction_order;
-
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int64_t timecode_frame_start;
-#endif
-
-#if FF_API_RTP_CALLBACK
- /**
- * @deprecated unused
- */
- /* The RTP callback: This function is called */
- /* every time the encoder has a packet to send. */
- /* It depends on the encoder if the data starts */
- /* with a Start Code (it should). H.263 does. */
- /* mb_nb contains the number of macroblocks */
- /* encoded in the RTP payload. */
- attribute_deprecated
- void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
-#endif
-
-#if FF_API_PRIVATE_OPT
- /** @deprecated use encoder private options instead */
- attribute_deprecated
- int rtp_payload_size; /* The size of the RTP payload: the coder will */
- /* do its best to deliver a chunk with size */
- /* below rtp_payload_size, the chunk will start */
- /* with a start code on some codecs like H.263. */
- /* This doesn't take account of any particular */
- /* headers inside the transmitted RTP payload. */
-#endif
-
-#if FF_API_STAT_BITS
- /* statistics, used for 2-pass encoding */
- attribute_deprecated
- int mv_bits;
- attribute_deprecated
- int header_bits;
- attribute_deprecated
- int i_tex_bits;
- attribute_deprecated
- int p_tex_bits;
- attribute_deprecated
- int i_count;
- attribute_deprecated
- int p_count;
- attribute_deprecated
- int skip_count;
- attribute_deprecated
- int misc_bits;
-
- /** @deprecated this field is unused */
- attribute_deprecated
- int frame_bits;
-#endif
-
/**
* pass1 encoding statistics output buffer
* - encoding: Set by libavcodec.
@@ -2624,13 +1336,9 @@ typedef struct AVCodecContext {
* unofficial and experimental (that is, they always try to decode things
* when they can) unless they are explicitly asked to behave stupidly
* (=strictly conform to the specs)
+ * This may only be set to one of the FF_COMPLIANCE_* values in defs.h.
*/
int strict_std_compliance;
-#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
-#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
-#define FF_COMPLIANCE_NORMAL 0
-#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
-#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
/**
* error concealment flags
@@ -2653,63 +1361,27 @@ typedef struct AVCodecContext {
#define FF_DEBUG_BITSTREAM 4
#define FF_DEBUG_MB_TYPE 8
#define FF_DEBUG_QP 16
-#if FF_API_DEBUG_MV
-/**
- * @deprecated this option does nothing
- */
-#define FF_DEBUG_MV 32
-#endif
#define FF_DEBUG_DCT_COEFF 0x00000040
#define FF_DEBUG_SKIP 0x00000080
#define FF_DEBUG_STARTCODE 0x00000100
#define FF_DEBUG_ER 0x00000400
#define FF_DEBUG_MMCO 0x00000800
#define FF_DEBUG_BUGS 0x00001000
-#if FF_API_DEBUG_MV
-#define FF_DEBUG_VIS_QP 0x00002000
-#define FF_DEBUG_VIS_MB_TYPE 0x00004000
-#endif
#define FF_DEBUG_BUFFERS 0x00008000
#define FF_DEBUG_THREADS 0x00010000
#define FF_DEBUG_GREEN_MD 0x00800000
#define FF_DEBUG_NOMC 0x01000000
-#if FF_API_DEBUG_MV
- /**
- * debug
- * - encoding: Set by user.
- * - decoding: Set by user.
- */
- int debug_mv;
-#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 // visualize forward predicted MVs of P-frames
-#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 // visualize forward predicted MVs of B-frames
-#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 // visualize backward predicted MVs of B-frames
-#endif
-
/**
* Error recognition; may misdetect some more or less valid parts as errors.
- * - encoding: unused
+ * This is a bitfield of the AV_EF_* values defined in defs.h.
+ *
+ * - encoding: Set by user.
* - decoding: Set by user.
*/
int err_recognition;
-/**
- * Verify checksums embedded in the bitstream (could be of either encoded or
- * decoded data, depending on the codec) and print an error message on mismatch.
- * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
- * decoder returning an error.
- */
-#define AV_EF_CRCCHECK (1<<0)
-#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations
-#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
-#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
-
-#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
-#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
-#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
-#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error
-
-
+#if FF_API_REORDERED_OPAQUE
/**
* opaque 64-bit number (generally a PTS) that will be reordered and
* output in AVFrame.reordered_opaque
@@ -2718,8 +1390,12 @@ typedef struct AVCodecContext {
* supported by encoders with the
* AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability.
* - decoding: Set by user.
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
*/
+ attribute_deprecated
int64_t reordered_opaque;
+#endif
/**
* Hardware accelerator in use
@@ -2729,14 +1405,26 @@ typedef struct AVCodecContext {
const struct AVHWAccel *hwaccel;
/**
- * Hardware accelerator context.
- * For some hardware accelerators, a global context needs to be
- * provided by the user. In that case, this holds display-dependent
- * data FFmpeg cannot instantiate itself. Please refer to the
- * FFmpeg HW accelerator documentation to know how to fill this
- * is. e.g. for VA API, this is a struct vaapi_context.
- * - encoding: unused
- * - decoding: Set by user
+ * Legacy hardware accelerator context.
+ *
+ * For some hardware acceleration methods, the caller may use this field to
+ * signal hwaccel-specific data to the codec. The struct pointed to by this
+ * pointer is hwaccel-dependent and defined in the respective header. Please
+ * refer to the FFmpeg HW accelerator documentation to know how to fill
+ * this.
+ *
+ * In most cases this field is optional - the necessary information may also
+ * be provided to libavcodec through @ref hw_frames_ctx or @ref
+ * hw_device_ctx (see avcodec_get_hw_config()). However, in some cases it
+ * may be the only method of signalling some (optional) information.
+ *
+ * The struct and its contents are owned by the caller.
+ *
+ * - encoding: May be set by the caller before avcodec_open2(). Must remain
+ * valid until avcodec_free_context().
+ * - decoding: May be set by the caller in the get_format() callback.
+ * Must remain valid until the next get_format() call,
+ * or avcodec_free_context() (whichever comes first).
*/
void *hwaccel_context;
@@ -2778,7 +1466,10 @@ typedef struct AVCodecContext {
#define FF_IDCT_SIMPLEARMV6 17
#define FF_IDCT_FAAN 20
#define FF_IDCT_SIMPLENEON 22
-#define FF_IDCT_NONE 24 /* Used by XvMC to extract IDCT coefficients with FF_IDCT_PERM_NONE */
+#if FF_API_IDCT_NONE
+// formerly used by xvmc
+#define FF_IDCT_NONE 24
+#endif
#define FF_IDCT_SIMPLEAUTO 128
/**
@@ -2795,25 +1486,12 @@ typedef struct AVCodecContext {
*/
int bits_per_raw_sample;
-#if FF_API_LOWRES
/**
* low resolution decoding, 1-> 1/2 size, 2->1/4 size
* - encoding: unused
* - decoding: Set by user.
*/
int lowres;
-#endif
-
-#if FF_API_CODED_FRAME
- /**
- * the picture in the bitstream
- * - encoding: Set by libavcodec.
- * - decoding: unused
- *
- * @deprecated use the quality factor packet side data instead
- */
- attribute_deprecated AVFrame *coded_frame;
-#endif
/**
* thread count
@@ -2843,16 +1521,6 @@ typedef struct AVCodecContext {
int active_thread_type;
/**
- * Set by the client if its custom get_buffer() callback can be called
- * synchronously from another thread, which allows faster multithreaded decoding.
- * draw_horiz_band() will be called from other threads regardless of this setting.
- * Ignored if the default get_buffer() is used.
- * - encoding: Set by user.
- * - decoding: Set by user.
- */
- int thread_safe_callbacks;
-
- /**
* The codec may call this to execute several independent things.
* It will return only after finishing all tasks.
* The user may replace this with some multithreaded implementation,
@@ -2868,7 +1536,6 @@ typedef struct AVCodecContext {
* It will return only after finishing all tasks.
* The user may replace this with some multithreaded implementation,
* the default implementation will execute the parts serially.
- * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
* @param c context passed also to func
* @param count the number of things to execute
* @param arg2 argument passed unchanged to func
@@ -2988,6 +1655,9 @@ typedef struct AVCodecContext {
#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
#define FF_PROFILE_HEVC_REXT 4
+#define FF_PROFILE_VVC_MAIN_10 1
+#define FF_PROFILE_VVC_MAIN_10_444 33
+
#define FF_PROFILE_AV1_MAIN 0
#define FF_PROFILE_AV1_HIGH 1
#define FF_PROFILE_AV1_PROFESSIONAL 2
@@ -3010,6 +1680,9 @@ typedef struct AVCodecContext {
#define FF_PROFILE_ARIB_PROFILE_A 0
#define FF_PROFILE_ARIB_PROFILE_C 1
+#define FF_PROFILE_KLVA_SYNC 0
+#define FF_PROFILE_KLVA_ASYNC 1
+
/**
* level
* - encoding: Set by user.
@@ -3050,34 +1723,6 @@ typedef struct AVCodecContext {
uint8_t *subtitle_header;
int subtitle_header_size;
-#if FF_API_VBV_DELAY
- /**
- * VBV delay coded in the last frame (in periods of a 27 MHz clock).
- * Used for compliant TS muxing.
- * - encoding: Set by libavcodec.
- * - decoding: unused.
- * @deprecated this value is now exported as a part of
- * AV_PKT_DATA_CPB_PROPERTIES packet side data
- */
- attribute_deprecated
- uint64_t vbv_delay;
-#endif
-
-#if FF_API_SIDEDATA_ONLY_PKT
- /**
- * Encoding only and set by default. Allow encoders to output packets
- * that do not contain any encoded data, only side data.
- *
- * Some encoders need to output such packets, e.g. to update some stream
- * parameters at the end of encoding.
- *
- * @deprecated this field disables the default behaviour and
- * it is kept only for compatibility.
- */
- attribute_deprecated
- int side_data_only_packets;
-#endif
-
/**
* Audio only. The number of "priming" samples (padding) inserted by the
* encoder at the beginning of the audio. I.e. this number of leading
@@ -3125,15 +1770,6 @@ typedef struct AVCodecContext {
*/
const AVCodecDescriptor *codec_descriptor;
-#if !FF_API_LOWRES
- /**
- * low resolution decoding, 1-> 1/2 size, 2->1/4 size
- * - encoding: unused
- * - decoding: Set by user.
- */
- int lowres;
-#endif
-
/**
* Current statistics for PTS correction.
* - decoding: maintained and used by libavcodec, not intended to be used by user apps
@@ -3184,18 +1820,6 @@ typedef struct AVCodecContext {
*/
int seek_preroll;
-#if !FF_API_DEBUG_MV
- /**
- * debug motion vectors
- * - encoding: Set by user.
- * - decoding: Set by user.
- */
- int debug_mv;
-#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
-#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
-#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
-#endif
-
/**
* custom intra quantization matrix
* - encoding: Set by user, can be NULL.
@@ -3227,6 +1851,7 @@ typedef struct AVCodecContext {
unsigned properties;
#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+#define FF_CODEC_PROPERTY_FILM_GRAIN 0x00000004
/**
* Additional data associated with the entire coded stream.
@@ -3262,17 +1887,6 @@ typedef struct AVCodecContext {
AVBufferRef *hw_frames_ctx;
/**
- * Control the form of AVSubtitle.rects[N]->ass
- * - decoding: set by user
- * - encoding: unused
- */
- int sub_text_format;
-#define FF_SUB_TEXT_FMT_ASS 0
-#if FF_API_ASS_TIMING
-#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1
-#endif
-
- /**
* Audio only. The amount of padding (in samples) appended by the encoder to
* the end of the audio. I.e. this number of decoded samples must be
* discarded by the caller from the end of the stream to get the original
@@ -3370,273 +1984,86 @@ typedef struct AVCodecContext {
* - encoding: unused
*/
int discard_damaged_percentage;
-} AVCodecContext;
-
-#if FF_API_CODEC_GET_SET
-/**
- * Accessors for some AVCodecContext fields. These used to be provided for ABI
- * compatibility, and do not need to be used anymore.
- */
-attribute_deprecated
-AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx);
-attribute_deprecated
-void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val);
-
-attribute_deprecated
-const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);
-attribute_deprecated
-void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);
-
-attribute_deprecated
-unsigned av_codec_get_codec_properties(const AVCodecContext *avctx);
-
-#if FF_API_LOWRES
-attribute_deprecated
-int av_codec_get_lowres(const AVCodecContext *avctx);
-attribute_deprecated
-void av_codec_set_lowres(AVCodecContext *avctx, int val);
-#endif
-
-attribute_deprecated
-int av_codec_get_seek_preroll(const AVCodecContext *avctx);
-attribute_deprecated
-void av_codec_set_seek_preroll(AVCodecContext *avctx, int val);
-
-attribute_deprecated
-uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);
-attribute_deprecated
-void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);
-#endif
-/**
- * AVProfile.
- */
-typedef struct AVProfile {
- int profile;
- const char *name; ///< short name for the profile
-} AVProfile;
-
-enum {
/**
- * The codec supports this format via the hw_device_ctx interface.
+ * The number of samples per frame to maximally accept.
*
- * When selecting this format, AVCodecContext.hw_device_ctx should
- * have been set to a device of the specified type before calling
- * avcodec_open2().
+ * - decoding: set by user
+ * - encoding: set by user
*/
- AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
+ int64_t max_samples;
+
/**
- * The codec supports this format via the hw_frames_ctx interface.
+ * Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of
+ * metadata exported in frame, packet, or coded stream side data by
+ * decoders and encoders.
*
- * When selecting this format for a decoder,
- * AVCodecContext.hw_frames_ctx should be set to a suitable frames
- * context inside the get_format() callback. The frames context
- * must have been created on a device of the specified type.
+ * - decoding: set by user
+ * - encoding: set by user
*/
- AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
+ int export_side_data;
+
/**
- * The codec supports this format by some internal method.
+ * This callback is called at the beginning of each packet to get a data
+ * buffer for it.
*
- * This format can be selected without any additional configuration -
- * no device or frames context is required.
- */
- AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
- /**
- * The codec supports this format by some ad-hoc method.
+ * The following field will be set in the packet before this callback is
+ * called:
+ * - size
+ * This callback must use the above value to calculate the required buffer size,
+ * which must padded by at least AV_INPUT_BUFFER_PADDING_SIZE bytes.
+ *
+ * In some specific cases, the encoder may not use the entire buffer allocated by this
+ * callback. This will be reflected in the size value in the packet once returned by
+ * avcodec_receive_packet().
+ *
+ * This callback must fill the following fields in the packet:
+ * - data: alignment requirements for AVPacket apply, if any. Some architectures and
+ * encoders may benefit from having aligned data.
+ * - buf: must contain a pointer to an AVBufferRef structure. The packet's
+ * data pointer must be contained in it. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
*
- * Additional settings and/or function calls are required. See the
- * codec-specific documentation for details. (Methods requiring
- * this sort of configuration are deprecated and others should be
- * used in preference.)
- */
- AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
-};
-
-typedef struct AVCodecHWConfig {
- /**
- * A hardware pixel format which the codec can use.
- */
- enum AVPixelFormat pix_fmt;
- /**
- * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
- * setup methods which can be used with this configuration.
- */
- int methods;
- /**
- * The device type associated with the configuration.
+ * If AV_CODEC_CAP_DR1 is not set then get_encode_buffer() must call
+ * avcodec_default_get_encode_buffer() instead of providing a buffer allocated by
+ * some other means.
*
- * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
- * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
- */
- enum AVHWDeviceType device_type;
-} AVCodecHWConfig;
-
-typedef struct AVCodecDefault AVCodecDefault;
-
-struct AVSubtitle;
-
-/**
- * AVCodec.
- */
-typedef struct AVCodec {
- /**
- * Name of the codec implementation.
- * The name is globally unique among encoders and among decoders (but an
- * encoder and a decoder can share the same name).
- * This is the primary way to find a codec from the user perspective.
- */
- const char *name;
- /**
- * Descriptive name for the codec, meant to be more human readable than name.
- * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
- */
- const char *long_name;
- enum AVMediaType type;
- enum AVCodecID id;
- /**
- * Codec capabilities.
- * see AV_CODEC_CAP_*
- */
- int capabilities;
- const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
- const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
- const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
- const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
- const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
- uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
- const AVClass *priv_class; ///< AVClass for the private context
- const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
-
- /**
- * Group name of the codec implementation.
- * This is a short symbolic name of the wrapper backing this codec. A
- * wrapper uses some kind of external implementation for the codec, such
- * as an external library, or a codec implementation provided by the OS or
- * the hardware.
- * If this field is NULL, this is a builtin, libavcodec native codec.
- * If non-NULL, this will be the suffix in AVCodec.name in most cases
- * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
- */
- const char *wrapper_name;
-
- /*****************************************************************
- * No fields below this line are part of the public API. They
- * may not be used outside of libavcodec and can be changed and
- * removed at will.
- * New public fields should be added right above.
- *****************************************************************
- */
- int priv_data_size;
- struct AVCodec *next;
- /**
- * @name Frame-level threading support functions
- * @{
- */
- /**
- * If defined, called on thread contexts when they are created.
- * If the codec allocates writable tables in init(), re-allocate them here.
- * priv_data will be set to a copy of the original.
- */
- int (*init_thread_copy)(AVCodecContext *);
- /**
- * Copy necessary context variables from a previous thread context to the current one.
- * If not defined, the next thread will start automatically; otherwise, the codec
- * must call ff_thread_finish_setup().
+ * The flags field may contain a combination of AV_GET_ENCODE_BUFFER_FLAG_ flags.
+ * They may be used for example to hint what use the buffer may get after being
+ * created.
+ * Implementations of this callback may ignore flags they don't understand.
+ * If AV_GET_ENCODE_BUFFER_FLAG_REF is set in flags then the packet may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
*
- * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
- */
- int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
- /** @} */
-
- /**
- * Private codec-specific defaults.
- */
- const AVCodecDefault *defaults;
-
- /**
- * Initialize codec static data, called from avcodec_register().
+ * This callback must be thread-safe, as when frame threading is used, it may
+ * be called from multiple threads simultaneously.
*
- * This is not intended for time consuming operations as it is
- * run for every codec regardless of that codec being used.
- */
- void (*init_static_data)(struct AVCodec *codec);
-
- int (*init)(AVCodecContext *);
- int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
- const struct AVSubtitle *sub);
- /**
- * Encode data to an AVPacket.
+ * @see avcodec_default_get_encode_buffer()
*
- * @param avctx codec context
- * @param avpkt output AVPacket (may contain a user-provided buffer)
- * @param[in] frame AVFrame containing the raw data to be encoded
- * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
- * non-empty packet was returned in avpkt.
- * @return 0 on success, negative error code on failure
- */
- int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
- int *got_packet_ptr);
- int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
- int (*close)(AVCodecContext *);
- /**
- * Encode API with decoupled packet/frame dataflow. The API is the
- * same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except
- * that:
- * - never called if the codec is closed or the wrong type,
- * - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent,
- * - only one drain frame is ever passed down,
- */
- int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame);
- int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt);
-
- /**
- * Decode API with decoupled packet/frame dataflow. This function is called
- * to get one output frame. It should call ff_decode_get_packet() to obtain
- * input data.
- */
- int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame);
- /**
- * Flush buffers.
- * Will be called when seeking
- */
- void (*flush)(AVCodecContext *);
- /**
- * Internal codec capabilities.
- * See FF_CODEC_CAP_* in internal.h
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: unused
*/
- int caps_internal;
+ int (*get_encode_buffer)(struct AVCodecContext *s, AVPacket *pkt, int flags);
/**
- * Decoding only, a comma-separated list of bitstream filters to apply to
- * packets before decoding.
+ * Audio channel layout.
+ * - encoding: must be set by the caller, to one of AVCodec.ch_layouts.
+ * - decoding: may be set by the caller if known e.g. from the container.
+ * The decoder can then override during decoding as needed.
*/
- const char *bsfs;
+ AVChannelLayout ch_layout;
/**
- * Array of pointers to hardware configurations supported by the codec,
- * or NULL if no hardware supported. The array is terminated by a NULL
- * pointer.
+ * Frame counter, set by libavcodec.
*
- * The user can only access this field via avcodec_get_hw_config().
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
*/
- const struct AVCodecHWConfigInternal **hw_configs;
-} AVCodec;
-
-#if FF_API_CODEC_GET_SET
-attribute_deprecated
-int av_codec_get_max_lowres(const AVCodec *codec);
-#endif
-
-struct MpegEncContext;
-
-/**
- * Retrieve supported hardware configurations for a codec.
- *
- * Values of index from zero to some maximum return the indexed configuration
- * descriptor; all other values return NULL. If the codec does not support
- * any hardware configurations then it will always return NULL.
- */
-const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index);
+ int64_t frame_num;
+} AVCodecContext;
/**
* @defgroup lavc_hwaccel AVHWAccel
@@ -3729,7 +2156,6 @@ typedef struct AVHWAccel {
*
* Meaningful slice information (codec specific) is guaranteed to
* be parsed at this point. This function is mandatory.
- * The only exception is XvMC, that works on MB level.
*
* @param avctx the codec context
* @param buf the slice data buffer base
@@ -3759,17 +2185,6 @@ typedef struct AVHWAccel {
int frame_priv_data_size;
/**
- * Called for every Macroblock in a slice.
- *
- * XvMC uses it to replace the ff_mpv_reconstruct_mb().
- * Instead of decoding to raw picture, MB parameters are
- * stored in an array provided by the video driver.
- *
- * @param s the mpeg context
- */
- void (*decode_mb)(struct MpegEncContext *s);
-
- /**
* Initialize the hwaccel private data.
*
* This will be called from ff_get_format(), after hwaccel and
@@ -3845,35 +2260,24 @@ typedef struct AVHWAccel {
#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2)
/**
- * @}
- */
-
-#if FF_API_AVPICTURE
-/**
- * @defgroup lavc_picture AVPicture
+ * Some hardware decoders (namely nvdec) can either output direct decoder
+ * surfaces, or make an on-device copy and return said copy.
+ * There is a hard limit on how many decoder surfaces there can be, and it
+ * cannot be accurately guessed ahead of time.
+ * For some processing chains, this can be okay, but others will run into the
+ * limit and in turn produce very confusing errors that require fine tuning of
+ * more or less obscure options by the user, or in extreme cases cannot be
+ * resolved at all without inserting an avfilter that forces a copy.
*
- * Functions for working with AVPicture
- * @{
+ * Thus, the hwaccel will by default make a copy for safety and resilience.
+ * If a users really wants to minimize the amount of copies, they can set this
+ * flag and ensure their processing chain does not exhaust the surface pool.
*/
-
-/**
- * Picture data structure.
- *
- * Up to four components can be stored into it, the last component is
- * alpha.
- * @deprecated use AVFrame or imgutils functions instead
- */
-typedef struct AVPicture {
- attribute_deprecated
- uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes
- attribute_deprecated
- int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
-} AVPicture;
+#define AV_HWACCEL_FLAG_UNSAFE_OUTPUT (1 << 3)
/**
* @}
*/
-#endif
enum AVSubtitleType {
SUBTITLE_NONE,
@@ -3902,13 +2306,6 @@ typedef struct AVSubtitleRect {
int h; ///< height of pict, undefined when pict is not set
int nb_colors; ///< number of colors in pict, undefined when pict is not set
-#if FF_API_AVPICTURE
- /**
- * @deprecated unused
- */
- attribute_deprecated
- AVPicture pict;
-#endif
/**
* data+linesize for the bitmap of this subtitle.
* Can be set for text/ass as well once they are rendered.
@@ -3940,185 +2337,6 @@ typedef struct AVSubtitle {
} AVSubtitle;
/**
- * This struct describes the properties of an encoded stream.
- *
- * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
- * be allocated with avcodec_parameters_alloc() and freed with
- * avcodec_parameters_free().
- */
-typedef struct AVCodecParameters {
- /**
- * General type of the encoded data.
- */
- enum AVMediaType codec_type;
- /**
- * Specific type of the encoded data (the codec used).
- */
- enum AVCodecID codec_id;
- /**
- * Additional information about the codec (corresponds to the AVI FOURCC).
- */
- uint32_t codec_tag;
-
- /**
- * Extra binary data needed for initializing the decoder, codec-dependent.
- *
- * Must be allocated with av_malloc() and will be freed by
- * avcodec_parameters_free(). The allocated size of extradata must be at
- * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
- * bytes zeroed.
- */
- uint8_t *extradata;
- /**
- * Size of the extradata content in bytes.
- */
- int extradata_size;
-
- /**
- * - video: the pixel format, the value corresponds to enum AVPixelFormat.
- * - audio: the sample format, the value corresponds to enum AVSampleFormat.
- */
- int format;
-
- /**
- * The average bitrate of the encoded data (in bits per second).
- */
- int64_t bit_rate;
-
- /**
- * The number of bits per sample in the codedwords.
- *
- * This is basically the bitrate per sample. It is mandatory for a bunch of
- * formats to actually decode them. It's the number of bits for one sample in
- * the actual coded bitstream.
- *
- * This could be for example 4 for ADPCM
- * For PCM formats this matches bits_per_raw_sample
- * Can be 0
- */
- int bits_per_coded_sample;
-
- /**
- * This is the number of valid bits in each output sample. If the
- * sample format has more bits, the least significant bits are additional
- * padding bits, which are always 0. Use right shifts to reduce the sample
- * to its actual size. For example, audio formats with 24 bit samples will
- * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
- * To get the original sample use "(int32_t)sample >> 8"."
- *
- * For ADPCM this might be 12 or 16 or similar
- * Can be 0
- */
- int bits_per_raw_sample;
-
- /**
- * Codec-specific bitstream restrictions that the stream conforms to.
- */
- int profile;
- int level;
-
- /**
- * Video only. The dimensions of the video frame in pixels.
- */
- int width;
- int height;
-
- /**
- * Video only. The aspect ratio (width / height) which a single pixel
- * should have when displayed.
- *
- * When the aspect ratio is unknown / undefined, the numerator should be
- * set to 0 (the denominator may have any value).
- */
- AVRational sample_aspect_ratio;
-
- /**
- * Video only. The order of the fields in interlaced video.
- */
- enum AVFieldOrder field_order;
-
- /**
- * Video only. Additional colorspace characteristics.
- */
- enum AVColorRange color_range;
- enum AVColorPrimaries color_primaries;
- enum AVColorTransferCharacteristic color_trc;
- enum AVColorSpace color_space;
- enum AVChromaLocation chroma_location;
-
- /**
- * Video only. Number of delayed frames.
- */
- int video_delay;
-
- /**
- * Audio only. The channel layout bitmask. May be 0 if the channel layout is
- * unknown or unspecified, otherwise the number of bits set must be equal to
- * the channels field.
- */
- uint64_t channel_layout;
- /**
- * Audio only. The number of audio channels.
- */
- int channels;
- /**
- * Audio only. The number of audio samples per second.
- */
- int sample_rate;
- /**
- * Audio only. The number of bytes per coded audio frame, required by some
- * formats.
- *
- * Corresponds to nBlockAlign in WAVEFORMATEX.
- */
- int block_align;
- /**
- * Audio only. Audio frame size, if known. Required by some formats to be static.
- */
- int frame_size;
-
- /**
- * Audio only. The amount of padding (in samples) inserted by the encoder at
- * the beginning of the audio. I.e. this number of leading decoded samples
- * must be discarded by the caller to get the original audio without leading
- * padding.
- */
- int initial_padding;
- /**
- * Audio only. The amount of padding (in samples) appended by the encoder to
- * the end of the audio. I.e. this number of decoded samples must be
- * discarded by the caller from the end of the stream to get the original
- * audio without any trailing padding.
- */
- int trailing_padding;
- /**
- * Audio only. Number of samples to skip after a discontinuity.
- */
- int seek_preroll;
-} AVCodecParameters;
-
-/**
- * Iterate over all registered codecs.
- *
- * @param opaque a pointer where libavcodec will store the iteration state. Must
- * point to NULL to start the iteration.
- *
- * @return the next registered codec or NULL when the iteration is
- * finished
- */
-const AVCodec *av_codec_iterate(void **opaque);
-
-#if FF_API_NEXT
-/**
- * If c is NULL, returns the first registered codec,
- * if c is non-NULL, returns the next registered codec after c,
- * or NULL if c is the last one.
- */
-attribute_deprecated
-AVCodec *av_codec_next(const AVCodec *c);
-#endif
-
-/**
* Return the LIBAVCODEC_VERSION_INT constant.
*/
unsigned avcodec_version(void);
@@ -4133,32 +2351,6 @@ const char *avcodec_configuration(void);
*/
const char *avcodec_license(void);
-#if FF_API_NEXT
-/**
- * Register the codec codec and initialize libavcodec.
- *
- * @warning either this function or avcodec_register_all() must be called
- * before any other libavcodec functions.
- *
- * @see avcodec_register_all()
- */
-attribute_deprecated
-void avcodec_register(AVCodec *codec);
-
-/**
- * Register all the codecs, parsers and bitstream filters which were enabled at
- * configuration time. If you do not call this function you can select exactly
- * which formats you want to support, by using the individual registration
- * functions.
- *
- * @see avcodec_register
- * @see av_register_codec_parser
- * @see av_register_bitstream_filter
- */
-attribute_deprecated
-void avcodec_register_all(void);
-#endif
-
/**
* Allocate an AVCodecContext and set its fields to default values. The
* resulting struct should be freed with avcodec_free_context().
@@ -4180,15 +2372,6 @@ AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
*/
void avcodec_free_context(AVCodecContext **avctx);
-#if FF_API_GET_CONTEXT_DEFAULTS
-/**
- * @deprecated This function should not be used, as closing and opening a codec
- * context multiple time is not supported. A new codec context should be
- * allocated for each new use.
- */
-int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
-#endif
-
/**
* Get the AVClass for AVCodecContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
@@ -4197,15 +2380,6 @@ int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
*/
const AVClass *avcodec_get_class(void);
-#if FF_API_COPY_CONTEXT
-/**
- * Get the AVClass for AVFrame. It can be used in combination with
- * AV_OPT_SEARCH_FAKE_OBJ for examining options.
- *
- * @see av_opt_find().
- */
-const AVClass *avcodec_get_frame_class(void);
-
/**
* Get the AVClass for AVSubtitleRect. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
@@ -4215,48 +2389,6 @@ const AVClass *avcodec_get_frame_class(void);
const AVClass *avcodec_get_subtitle_rect_class(void);
/**
- * Copy the settings of the source AVCodecContext into the destination
- * AVCodecContext. The resulting destination codec context will be
- * unopened, i.e. you are required to call avcodec_open2() before you
- * can use this AVCodecContext to decode/encode video/audio data.
- *
- * @param dest target codec context, should be initialized with
- * avcodec_alloc_context3(NULL), but otherwise uninitialized
- * @param src source codec context
- * @return AVERROR() on error (e.g. memory allocation error), 0 on success
- *
- * @deprecated The semantics of this function are ill-defined and it should not
- * be used. If you need to transfer the stream parameters from one codec context
- * to another, use an intermediate AVCodecParameters instance and the
- * avcodec_parameters_from_context() / avcodec_parameters_to_context()
- * functions.
- */
-attribute_deprecated
-int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
-#endif
-
-/**
- * Allocate a new AVCodecParameters and set its fields to default values
- * (unknown/invalid/0). The returned struct must be freed with
- * avcodec_parameters_free().
- */
-AVCodecParameters *avcodec_parameters_alloc(void);
-
-/**
- * Free an AVCodecParameters instance and everything associated with it and
- * write NULL to the supplied pointer.
- */
-void avcodec_parameters_free(AVCodecParameters **par);
-
-/**
- * Copy the contents of src to dst. Any allocated fields in dst are freed and
- * replaced with newly allocated duplicates of the corresponding fields in src.
- *
- * @return >= 0 on success, a negative AVERROR code on failure.
- */
-int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);
-
-/**
* Fill the parameters struct based on the values from the supplied codec
* context. Any allocated fields in par are freed and replaced with duplicates
* of the corresponding fields in codec.
@@ -4285,13 +2417,10 @@ int avcodec_parameters_to_context(AVCodecContext *codec,
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
* retrieving a codec.
*
- * @warning This function is not thread safe!
- *
* @note Always call this function before using decoding routines (such as
* @ref avcodec_receive_frame()).
*
* @code
- * avcodec_register_all();
* av_dict_set(&opts, "b", "2.5M", 0);
* codec = avcodec_find_decoder(AV_CODEC_ID_H264);
* if (!codec)
@@ -4344,337 +2473,11 @@ void avsubtitle_free(AVSubtitle *sub);
*/
/**
- * @addtogroup lavc_packet
- * @{
- */
-
-/**
- * Allocate an AVPacket and set its fields to default values. The resulting
- * struct must be freed using av_packet_free().
- *
- * @return An AVPacket filled with default values or NULL on failure.
- *
- * @note this only allocates the AVPacket itself, not the data buffers. Those
- * must be allocated through other means such as av_new_packet.
- *
- * @see av_new_packet
- */
-AVPacket *av_packet_alloc(void);
-
-/**
- * Create a new packet that references the same data as src.
- *
- * This is a shortcut for av_packet_alloc()+av_packet_ref().
- *
- * @return newly created AVPacket on success, NULL on error.
- *
- * @see av_packet_alloc
- * @see av_packet_ref
- */
-AVPacket *av_packet_clone(const AVPacket *src);
-
-/**
- * Free the packet, if the packet is reference counted, it will be
- * unreferenced first.
- *
- * @param pkt packet to be freed. The pointer will be set to NULL.
- * @note passing NULL is a no-op.
- */
-void av_packet_free(AVPacket **pkt);
-
-/**
- * Initialize optional fields of a packet with default values.
- *
- * Note, this does not touch the data and size members, which have to be
- * initialized separately.
- *
- * @param pkt packet
- */
-void av_init_packet(AVPacket *pkt);
-
-/**
- * Allocate the payload of a packet and initialize its fields with
- * default values.
- *
- * @param pkt packet
- * @param size wanted payload size
- * @return 0 if OK, AVERROR_xxx otherwise
- */
-int av_new_packet(AVPacket *pkt, int size);
-
-/**
- * Reduce packet size, correctly zeroing padding
- *
- * @param pkt packet
- * @param size new size
- */
-void av_shrink_packet(AVPacket *pkt, int size);
-
-/**
- * Increase packet size, correctly zeroing padding
- *
- * @param pkt packet
- * @param grow_by number of bytes by which to increase the size of the packet
- */
-int av_grow_packet(AVPacket *pkt, int grow_by);
-
-/**
- * Initialize a reference-counted packet from av_malloc()ed data.
- *
- * @param pkt packet to be initialized. This function will set the data, size,
- * and buf fields, all others are left untouched.
- * @param data Data allocated by av_malloc() to be used as packet data. If this
- * function returns successfully, the data is owned by the underlying AVBuffer.
- * The caller may not access the data through other means.
- * @param size size of data in bytes, without the padding. I.e. the full buffer
- * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
- *
- * @return 0 on success, a negative AVERROR on error
- */
-int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
-
-#if FF_API_AVPACKET_OLD_API
-/**
- * @warning This is a hack - the packet memory allocation stuff is broken. The
- * packet is allocated if it was not really allocated.
- *
- * @deprecated Use av_packet_ref or av_packet_make_refcounted
- */
-attribute_deprecated
-int av_dup_packet(AVPacket *pkt);
-/**
- * Copy packet, including contents
- *
- * @return 0 on success, negative AVERROR on fail
- *
- * @deprecated Use av_packet_ref
- */
-attribute_deprecated
-int av_copy_packet(AVPacket *dst, const AVPacket *src);
-
-/**
- * Copy packet side data
- *
- * @return 0 on success, negative AVERROR on fail
- *
- * @deprecated Use av_packet_copy_props
- */
-attribute_deprecated
-int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
-
-/**
- * Free a packet.
- *
- * @deprecated Use av_packet_unref
- *
- * @param pkt packet to free
- */
-attribute_deprecated
-void av_free_packet(AVPacket *pkt);
-#endif
-/**
- * Allocate new information of a packet.
- *
- * @param pkt packet
- * @param type side information type
- * @param size side information size
- * @return pointer to fresh allocated data or NULL otherwise
- */
-uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size);
-
-/**
- * Wrap an existing array as a packet side data.
- *
- * @param pkt packet
- * @param type side information type
- * @param data the side data array. It must be allocated with the av_malloc()
- * family of functions. The ownership of the data is transferred to
- * pkt.
- * @param size side information size
- * @return a non-negative number on success, a negative AVERROR code on
- * failure. On failure, the packet is unchanged and the data remains
- * owned by the caller.
- */
-int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- uint8_t *data, size_t size);
-
-/**
- * Shrink the already allocated side data buffer
- *
- * @param pkt packet
- * @param type side information type
- * @param size new side information size
- * @return 0 on success, < 0 on failure
- */
-int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size);
-
-/**
- * Get side information from packet.
- *
- * @param pkt packet
- * @param type desired side information type
- * @param size pointer for side information size to store (optional)
- * @return pointer to data if present or NULL otherwise
- */
-uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
- int *size);
-
-#if FF_API_MERGE_SD_API
-attribute_deprecated
-int av_packet_merge_side_data(AVPacket *pkt);
-
-attribute_deprecated
-int av_packet_split_side_data(AVPacket *pkt);
-#endif
-
-const char *av_packet_side_data_name(enum AVPacketSideDataType type);
-
-/**
- * Pack a dictionary for use in side_data.
- *
- * @param dict The dictionary to pack.
- * @param size pointer to store the size of the returned data
- * @return pointer to data if successful, NULL otherwise
- */
-uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);
-/**
- * Unpack a dictionary from side_data.
- *
- * @param data data from side_data
- * @param size size of the data
- * @param dict the metadata storage dictionary
- * @return 0 on success, < 0 on failure
- */
-int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);
-
-
-/**
- * Convenience function to free all the side data stored.
- * All the other fields stay untouched.
- *
- * @param pkt packet
- */
-void av_packet_free_side_data(AVPacket *pkt);
-
-/**
- * Setup a new reference to the data described by a given packet
- *
- * If src is reference-counted, setup dst as a new reference to the
- * buffer in src. Otherwise allocate a new buffer in dst and copy the
- * data from src into it.
- *
- * All the other fields are copied from src.
- *
- * @see av_packet_unref
- *
- * @param dst Destination packet
- * @param src Source packet
- *
- * @return 0 on success, a negative AVERROR on error.
- */
-int av_packet_ref(AVPacket *dst, const AVPacket *src);
-
-/**
- * Wipe the packet.
- *
- * Unreference the buffer referenced by the packet and reset the
- * remaining packet fields to their default values.
- *
- * @param pkt The packet to be unreferenced.
- */
-void av_packet_unref(AVPacket *pkt);
-
-/**
- * Move every field in src to dst and reset src.
- *
- * @see av_packet_unref
- *
- * @param src Source packet, will be reset
- * @param dst Destination packet
- */
-void av_packet_move_ref(AVPacket *dst, AVPacket *src);
-
-/**
- * Copy only "properties" fields from src to dst.
- *
- * Properties for the purpose of this function are all the fields
- * beside those related to the packet data (buf, data, size)
- *
- * @param dst Destination packet
- * @param src Source packet
- *
- * @return 0 on success AVERROR on failure.
- */
-int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
-
-/**
- * Ensure the data described by a given packet is reference counted.
- *
- * @note This function does not ensure that the reference will be writable.
- * Use av_packet_make_writable instead for that purpose.
- *
- * @see av_packet_ref
- * @see av_packet_make_writable
- *
- * @param pkt packet whose data should be made reference counted.
- *
- * @return 0 on success, a negative AVERROR on error. On failure, the
- * packet is unchanged.
- */
-int av_packet_make_refcounted(AVPacket *pkt);
-
-/**
- * Create a writable reference for the data described by a given packet,
- * avoiding data copy if possible.
- *
- * @param pkt Packet whose data should be made writable.
- *
- * @return 0 on success, a negative AVERROR on failure. On failure, the
- * packet is unchanged.
- */
-int av_packet_make_writable(AVPacket *pkt);
-
-/**
- * Convert valid timing fields (timestamps / durations) in a packet from one
- * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
- * ignored.
- *
- * @param pkt packet on which the conversion will be performed
- * @param tb_src source timebase, in which the timing fields in pkt are
- * expressed
- * @param tb_dst destination timebase, to which the timing fields will be
- * converted
- */
-void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
-
-/**
- * @}
- */
-
-/**
* @addtogroup lavc_decoding
* @{
*/
/**
- * Find a registered decoder with a matching codec ID.
- *
- * @param id AVCodecID of the requested decoder
- * @return A decoder if one was found, NULL otherwise.
- */
-AVCodec *avcodec_find_decoder(enum AVCodecID id);
-
-/**
- * Find a registered decoder with the specified name.
- *
- * @param name name of the requested decoder
- * @return A decoder if one was found, NULL otherwise.
- */
-AVCodec *avcodec_find_decoder_by_name(const char *name);
-
-/**
* The default callback for AVCodecContext.get_buffer2(). It is made public so
* it can be called by custom get_buffer2() implementations for decoders without
* AV_CODEC_CAP_DR1 set.
@@ -4682,6 +2485,13 @@ AVCodec *avcodec_find_decoder_by_name(const char *name);
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
/**
+ * The default callback for AVCodecContext.get_encode_buffer(). It is made public so
+ * it can be called by custom get_encode_buffer() implementations for encoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_encode_buffer(AVCodecContext *s, AVPacket *pkt, int flags);
+
+/**
* Modify width and height values so that they will result in a memory
* buffer that is acceptable for the codec if you do not use any horizontal
* padding.
@@ -4700,6 +2510,7 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
int linesize_align[AV_NUM_DATA_POINTERS]);
+#ifdef FF_API_AVCODEC_CHROMA_POS
/**
* Converts AVChromaLocation to swscale x/y chroma position.
*
@@ -4708,7 +2519,9 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
*
* @param xpos horizontal chroma sample position
* @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_enum_to_pos() instead.
*/
+ attribute_deprecated
int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
/**
@@ -4719,115 +2532,11 @@ int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
*
* @param xpos horizontal chroma sample position
* @param ypos vertical chroma sample position
+ * @deprecated Use av_chroma_location_pos_to_enum() instead.
*/
+ attribute_deprecated
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
-
-/**
- * Decode the audio frame of size avpkt->size from avpkt->data into frame.
- *
- * Some decoders may support multiple frames in a single AVPacket. Such
- * decoders would then just decode the first frame and the return value would be
- * less than the packet size. In this case, avcodec_decode_audio4 has to be
- * called again with an AVPacket containing the remaining data in order to
- * decode the second frame, etc... Even if no frames are returned, the packet
- * needs to be fed to the decoder with remaining data until it is completely
- * consumed or an error occurs.
- *
- * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input
- * and output. This means that for some packets they will not immediately
- * produce decoded output and need to be flushed at the end of decoding to get
- * all the decoded data. Flushing is done by calling this function with packets
- * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
- * returning samples. It is safe to flush even those decoders that are not
- * marked with AV_CODEC_CAP_DELAY, then no samples will be returned.
- *
- * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
- * larger than the actual read bytes because some optimized bitstream
- * readers read 32 or 64 bits at once and could read over the end.
- *
- * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
- * before packets may be fed to the decoder.
- *
- * @param avctx the codec context
- * @param[out] frame The AVFrame in which to store decoded audio samples.
- * The decoder will allocate a buffer for the decoded frame by
- * calling the AVCodecContext.get_buffer2() callback.
- * When AVCodecContext.refcounted_frames is set to 1, the frame is
- * reference counted and the returned reference belongs to the
- * caller. The caller must release the frame using av_frame_unref()
- * when the frame is no longer needed. The caller may safely write
- * to the frame if av_frame_is_writable() returns 1.
- * When AVCodecContext.refcounted_frames is set to 0, the returned
- * reference belongs to the decoder and is valid only until the
- * next call to this function or until closing or flushing the
- * decoder. The caller may not write to it.
- * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
- * non-zero. Note that this field being set to zero
- * does not mean that an error has occurred. For
- * decoders with AV_CODEC_CAP_DELAY set, no given decode
- * call is guaranteed to produce a frame.
- * @param[in] avpkt The input AVPacket containing the input buffer.
- * At least avpkt->data and avpkt->size should be set. Some
- * decoders might also require additional fields to be set.
- * @return A negative error code is returned if an error occurred during
- * decoding, otherwise the number of bytes consumed from the input
- * AVPacket is returned.
- *
-* @deprecated Use avcodec_send_packet() and avcodec_receive_frame().
- */
-attribute_deprecated
-int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame_ptr, const AVPacket *avpkt);
-
-/**
- * Decode the video frame of size avpkt->size from avpkt->data into picture.
- * Some decoders may support multiple frames in a single AVPacket, such
- * decoders would then just decode the first frame.
- *
- * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than
- * the actual read bytes because some optimized bitstream readers read 32 or 64
- * bits at once and could read over the end.
- *
- * @warning The end of the input buffer buf should be set to 0 to ensure that
- * no overreading happens for damaged MPEG streams.
- *
- * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay
- * between input and output, these need to be fed with avpkt->data=NULL,
- * avpkt->size=0 at the end to return the remaining frames.
- *
- * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
- * before packets may be fed to the decoder.
- *
- * @param avctx the codec context
- * @param[out] picture The AVFrame in which the decoded video frame will be stored.
- * Use av_frame_alloc() to get an AVFrame. The codec will
- * allocate memory for the actual bitmap by calling the
- * AVCodecContext.get_buffer2() callback.
- * When AVCodecContext.refcounted_frames is set to 1, the frame is
- * reference counted and the returned reference belongs to the
- * caller. The caller must release the frame using av_frame_unref()
- * when the frame is no longer needed. The caller may safely write
- * to the frame if av_frame_is_writable() returns 1.
- * When AVCodecContext.refcounted_frames is set to 0, the returned
- * reference belongs to the decoder and is valid only until the
- * next call to this function or until closing or flushing the
- * decoder. The caller may not write to it.
- *
- * @param[in] avpkt The input AVPacket containing the input buffer.
- * You can create such packet with av_init_packet() and by then setting
- * data and size, some decoders might in addition need other fields like
- * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
- * fields possible.
- * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
- * @return On error a negative value is returned, otherwise the number of bytes
- * used or zero if no frame could be decompressed.
- *
- * @deprecated Use avcodec_send_packet() and avcodec_receive_frame().
- */
-attribute_deprecated
-int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
- int *got_picture_ptr,
- const AVPacket *avpkt);
+#endif
/**
* Decode a subtitle message.
@@ -4835,7 +2544,7 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
* If no subtitle could be decompressed, got_sub_ptr is zero.
* Otherwise, the subtitle is stored in *sub.
* Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
- * simplicity, because the performance difference is expect to be negligible
+ * simplicity, because the performance difference is expected to be negligible
* and reusing a get_buffer written for video codecs would probably perform badly
* due to a potentially very different allocation pattern.
*
@@ -4851,14 +2560,13 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
* before packets may be fed to the decoder.
*
* @param avctx the codec context
- * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
+ * @param[out] sub The preallocated AVSubtitle in which the decoded subtitle will be stored,
* must be freed with avsubtitle_free if *got_sub_ptr is set.
* @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
* @param[in] avpkt The input AVPacket containing the input buffer.
*/
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
- int *got_sub_ptr,
- AVPacket *avpkt);
+ int *got_sub_ptr, const AVPacket *avpkt);
/**
* Supply raw packet data as input to a decoder.
@@ -4872,10 +2580,6 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
* larger than the actual read bytes because some optimized bitstream
* readers read 32 or 64 bits at once and could read over the end.
*
- * @warning Do not mix this API with the legacy API (like avcodec_decode_video2())
- * on the same AVCodecContext. It will return unexpected results now
- * or in future libavcodec versions.
- *
* @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
* before packets may be fed to the decoder.
*
@@ -4898,40 +2602,41 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
* still has frames buffered, it will return them after sending
* a flush packet.
*
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): input is not accepted in the current state - user
- * must read output with avcodec_receive_frame() (once
- * all output is read, the packet should be resent, and
- * the call will not fail with EAGAIN).
- * AVERROR_EOF: the decoder has been flushed, and no new packets can
- * be sent to it (also returned if more than 1 flush
- * packet is sent)
- * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush
- * AVERROR(ENOMEM): failed to add packet to internal queue, or similar
- * other errors: legitimate decoding errors
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user
+ * must read output with avcodec_receive_frame() (once
+ * all output is read, the packet should be resent,
+ * and the call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the decoder has been flushed, and no new packets can be
+ * sent to it (also returned if more than 1 flush
+ * packet is sent)
+ * @retval AVERROR(EINVAL) codec not opened, it is an encoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate decoding errors
*/
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
/**
- * Return decoded output data from a decoder.
+ * Return decoded output data from a decoder or encoder (when the
+ * AV_CODEC_FLAG_RECON_FRAME flag is used).
*
* @param avctx codec context
* @param frame This will be set to a reference-counted video or audio
* frame (depending on the decoder type) allocated by the
- * decoder. Note that the function will always call
+ * codec. Note that the function will always call
* av_frame_unref(frame) before doing anything else.
*
- * @return
- * 0: success, a frame was returned
- * AVERROR(EAGAIN): output is not available in this state - user must try
- * to send new input
- * AVERROR_EOF: the decoder has been fully flushed, and there will be
- * no more output frames
- * AVERROR(EINVAL): codec not opened, or it is an encoder
- * AVERROR_INPUT_CHANGED: current decoded frame has changed parameters
- * with respect to first decoded frame. Applicable
- * when flag AV_CODEC_FLAG_DROPCHANGED is set.
- * other negative values: legitimate decoding errors
+ * @retval 0 success, a frame was returned
+ * @retval AVERROR(EAGAIN) output is not available in this state - user must
+ * try to send new input
+ * @retval AVERROR_EOF the codec has been fully flushed, and there will be
+ * no more output frames
+ * @retval AVERROR(EINVAL) codec not opened, or it is an encoder without the
+ * AV_CODEC_FLAG_RECON_FRAME flag enabled
+ * @retval AVERROR_INPUT_CHANGED current decoded frame has changed parameters with
+ * respect to first decoded frame. Applicable when flag
+ * AV_CODEC_FLAG_DROPCHANGED is set.
+ * @retval "other negative error code" legitimate decoding errors
*/
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
@@ -4958,17 +2663,16 @@ int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
* If it is not set, frame->nb_samples must be equal to
* avctx->frame_size for all frames except the last.
* The final frame may be smaller than avctx->frame_size.
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): input is not accepted in the current state - user
- * must read output with avcodec_receive_packet() (once
- * all output is read, the packet should be resent, and
- * the call will not fail with EAGAIN).
- * AVERROR_EOF: the encoder has been flushed, and no new frames can
- * be sent to it
- * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a
- * decoder, or requires flush
- * AVERROR(ENOMEM): failed to add packet to internal queue, or similar
- * other errors: legitimate decoding errors
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) input is not accepted in the current state - user must
+ * read output with avcodec_receive_packet() (once all
+ * output is read, the packet should be resent, and the
+ * call will not fail with EAGAIN).
+ * @retval AVERROR_EOF the encoder has been flushed, and no new frames can
+ * be sent to it
+ * @retval AVERROR(EINVAL) codec not opened, it is a decoder, or requires flush
+ * @retval AVERROR(ENOMEM) failed to add packet to internal queue, or similar
+ * @retval "another negative error code" legitimate encoding errors
*/
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
@@ -4978,14 +2682,14 @@ int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);
* @param avctx codec context
* @param avpkt This will be set to a reference-counted packet allocated by the
* encoder. Note that the function will always call
- * av_frame_unref(frame) before doing anything else.
- * @return 0 on success, otherwise negative error code:
- * AVERROR(EAGAIN): output is not available in the current state - user
- * must try to send input
- * AVERROR_EOF: the encoder has been fully flushed, and there will be
- * no more output packets
- * AVERROR(EINVAL): codec not opened, or it is an encoder
- * other errors: legitimate decoding errors
+ * av_packet_unref(avpkt) before doing anything else.
+ * @retval 0 success
+ * @retval AVERROR(EAGAIN) output is not available in the current state - user must
+ * try to send input
+ * @retval AVERROR_EOF the encoder has been fully flushed, and there will be no
+ * more output packets
+ * @retval AVERROR(EINVAL) codec not opened, or it is a decoder
+ * @retval "another negative error code" legitimate encoding errors
*/
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
@@ -5099,15 +2803,15 @@ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
*/
enum AVPictureStructure {
- AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown
- AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field
- AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field
- AV_PICTURE_STRUCTURE_FRAME, //< coded as frame
+ AV_PICTURE_STRUCTURE_UNKNOWN, ///< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, ///< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, ///< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, ///< coded as frame
};
typedef struct AVCodecParserContext {
void *priv_data;
- struct AVCodecParser *parser;
+ const struct AVCodecParser *parser;
int64_t frame_offset; /* offset of the current frame */
int64_t cur_offset; /* current offset
(incremented by each av_parser_parse()) */
@@ -5156,14 +2860,6 @@ typedef struct AVCodecParserContext {
*/
int key_frame;
-#if FF_API_CONVERGENCE_DURATION
- /**
- * @deprecated unused
- */
- attribute_deprecated
- int64_t convergence_duration;
-#endif
-
// Timestamp generation support:
/**
* Synchronization point for start of timestamp generation.
@@ -5273,7 +2969,7 @@ typedef struct AVCodecParserContext {
} AVCodecParserContext;
typedef struct AVCodecParser {
- int codec_ids[5]; /* several codec IDs are permitted */
+ int codec_ids[7]; /* several codec IDs are permitted */
int priv_data_size;
int (*parser_init)(AVCodecParserContext *s);
/* This callback never returns an error, a negative value means that
@@ -5284,7 +2980,6 @@ typedef struct AVCodecParser {
const uint8_t *buf, int buf_size);
void (*parser_close)(AVCodecParserContext *s);
int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
- struct AVCodecParser *next;
} AVCodecParser;
/**
@@ -5298,11 +2993,6 @@ typedef struct AVCodecParser {
*/
const AVCodecParser *av_parser_iterate(void **opaque);
-attribute_deprecated
-AVCodecParser *av_parser_next(const AVCodecParser *c);
-
-attribute_deprecated
-void av_register_codec_parser(AVCodecParser *parser);
AVCodecParserContext *av_parser_init(int codec_id);
/**
@@ -5343,14 +3033,6 @@ int av_parser_parse2(AVCodecParserContext *s,
int64_t pts, int64_t dts,
int64_t pos);
-/**
- * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
- * @deprecated use AVBitStreamFilter
- */
-int av_parser_change(AVCodecParserContext *s,
- AVCodecContext *avctx,
- uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size, int keyframe);
void av_parser_close(AVCodecParserContext *s);
/**
@@ -5363,105 +3045,6 @@ void av_parser_close(AVCodecParserContext *s);
* @{
*/
-/**
- * Find a registered encoder with a matching codec ID.
- *
- * @param id AVCodecID of the requested encoder
- * @return An encoder if one was found, NULL otherwise.
- */
-AVCodec *avcodec_find_encoder(enum AVCodecID id);
-
-/**
- * Find a registered encoder with the specified name.
- *
- * @param name name of the requested encoder
- * @return An encoder if one was found, NULL otherwise.
- */
-AVCodec *avcodec_find_encoder_by_name(const char *name);
-
-/**
- * Encode a frame of audio.
- *
- * Takes input samples from frame and writes the next output packet, if
- * available, to avpkt. The output packet does not necessarily contain data for
- * the most recent frame, as encoders can delay, split, and combine input frames
- * internally as needed.
- *
- * @param avctx codec context
- * @param avpkt output AVPacket.
- * The user can supply an output buffer by setting
- * avpkt->data and avpkt->size prior to calling the
- * function, but if the size of the user-provided data is not
- * large enough, encoding will fail. If avpkt->data and
- * avpkt->size are set, avpkt->destruct must also be set. All
- * other AVPacket fields will be reset by the encoder using
- * av_init_packet(). If avpkt->data is NULL, the encoder will
- * allocate it. The encoder will set avpkt->size to the size
- * of the output packet.
- *
- * If this function fails or produces no output, avpkt will be
- * freed using av_packet_unref().
- * @param[in] frame AVFrame containing the raw audio data to be encoded.
- * May be NULL when flushing an encoder that has the
- * AV_CODEC_CAP_DELAY capability set.
- * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
- * can have any number of samples.
- * If it is not set, frame->nb_samples must be equal to
- * avctx->frame_size for all frames except the last.
- * The final frame may be smaller than avctx->frame_size.
- * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
- * output packet is non-empty, and to 0 if it is
- * empty. If the function returns an error, the
- * packet can be assumed to be invalid, and the
- * value of got_packet_ptr is undefined and should
- * not be used.
- * @return 0 on success, negative error code on failure
- *
- * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead
- */
-attribute_deprecated
-int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr);
-
-/**
- * Encode a frame of video.
- *
- * Takes input raw video data from frame and writes the next output packet, if
- * available, to avpkt. The output packet does not necessarily contain data for
- * the most recent frame, as encoders can delay and reorder input frames
- * internally as needed.
- *
- * @param avctx codec context
- * @param avpkt output AVPacket.
- * The user can supply an output buffer by setting
- * avpkt->data and avpkt->size prior to calling the
- * function, but if the size of the user-provided data is not
- * large enough, encoding will fail. All other AVPacket fields
- * will be reset by the encoder using av_init_packet(). If
- * avpkt->data is NULL, the encoder will allocate it.
- * The encoder will set avpkt->size to the size of the
- * output packet. The returned data (if any) belongs to the
- * caller, he is responsible for freeing it.
- *
- * If this function fails or produces no output, avpkt will be
- * freed using av_packet_unref().
- * @param[in] frame AVFrame containing the raw video data to be encoded.
- * May be NULL when flushing an encoder that has the
- * AV_CODEC_CAP_DELAY capability set.
- * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
- * output packet is non-empty, and to 0 if it is
- * empty. If the function returns an error, the
- * packet can be assumed to be invalid, and the
- * value of got_packet_ptr is undefined and should
- * not be used.
- * @return 0 on success, negative error code on failure
- *
- * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead
- */
-attribute_deprecated
-int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr);
-
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVSubtitle *sub);
@@ -5470,71 +3053,6 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
* @}
*/
-#if FF_API_AVPICTURE
-/**
- * @addtogroup lavc_picture
- * @{
- */
-
-/**
- * @deprecated unused
- */
-attribute_deprecated
-int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
-
-/**
- * @deprecated unused
- */
-attribute_deprecated
-void avpicture_free(AVPicture *picture);
-
-/**
- * @deprecated use av_image_fill_arrays() instead.
- */
-attribute_deprecated
-int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
- enum AVPixelFormat pix_fmt, int width, int height);
-
-/**
- * @deprecated use av_image_copy_to_buffer() instead.
- */
-attribute_deprecated
-int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,
- int width, int height,
- unsigned char *dest, int dest_size);
-
-/**
- * @deprecated use av_image_get_buffer_size() instead.
- */
-attribute_deprecated
-int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
-
-/**
- * @deprecated av_image_copy() instead.
- */
-attribute_deprecated
-void av_picture_copy(AVPicture *dst, const AVPicture *src,
- enum AVPixelFormat pix_fmt, int width, int height);
-
-/**
- * @deprecated unused
- */
-attribute_deprecated
-int av_picture_crop(AVPicture *dst, const AVPicture *src,
- enum AVPixelFormat pix_fmt, int top_band, int left_band);
-
-/**
- * @deprecated unused
- */
-attribute_deprecated
-int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
- int padtop, int padbottom, int padleft, int padright, int *color);
-
-/**
- * @}
- */
-#endif
-
/**
* @defgroup lavc_misc Utility functions
* @ingroup libavc
@@ -5551,15 +3069,6 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
* @{
*/
-#if FF_API_GETCHROMA
-/**
- * @deprecated Use av_pix_fmt_get_chroma_sub_sample
- */
-
-attribute_deprecated
-void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
-#endif
-
/**
* Return a value representing the fourCC code associated to the
* pixel format pix_fmt, or 0 if no associated fourCC code can be
@@ -5568,12 +3077,6 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
/**
- * @deprecated see av_get_pix_fmt_loss()
- */
-int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
- int has_alpha);
-
-/**
* Find the best pixel format to convert to given a certain source pixel
* format. When converting from one pixel format to another, information loss
* may occur. For example, when converting from RGB24 to GRAY, the color
@@ -5594,62 +3097,14 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *p
enum AVPixelFormat src_pix_fmt,
int has_alpha, int *loss_ptr);
-/**
- * @deprecated see av_find_best_pix_fmt_of_2()
- */
-enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
- enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
-
-attribute_deprecated
-enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
- enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
-
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
/**
* @}
*/
-#if FF_API_TAG_STRING
-/**
- * Put a string representing the codec tag codec_tag in buf.
- *
- * @param buf buffer to place codec tag in
- * @param buf_size size in bytes of buf
- * @param codec_tag codec tag to assign
- * @return the length of the string that would have been generated if
- * enough space had been available, excluding the trailing null
- *
- * @deprecated see av_fourcc_make_string() and av_fourcc2str().
- */
-attribute_deprecated
-size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
-#endif
-
void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
-/**
- * Return a name for the specified profile, if available.
- *
- * @param codec the codec that is searched for the given profile
- * @param profile the profile value for which a name is requested
- * @return A name for the profile if found, NULL otherwise.
- */
-const char *av_get_profile_name(const AVCodec *codec, int profile);
-
-/**
- * Return a name for the specified profile, if available.
- *
- * @param codec_id the ID of the codec to which the requested profile belongs
- * @param profile the profile value for which a name is requested
- * @return A name for the profile if found, NULL otherwise.
- *
- * @note unlike av_get_profile_name(), which searches a list of profiles
- * supported by a specific decoder or encoder implementation, this
- * function searches the list of profiles from the AVCodecDescriptor
- */
-const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
-
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
//FIXME func typedef
@@ -5682,41 +3137,20 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
int buf_size, int align);
/**
- * Reset the internal decoder state / flush internal buffers. Should be called
+ * Reset the internal codec state / flush internal buffers. Should be called
* e.g. when seeking or when switching to a different stream.
*
- * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),
- * this invalidates the frames previously returned from the decoder. When
- * refcounted frames are used, the decoder just releases any references it might
- * keep internally, but the caller's reference remains valid.
- */
-void avcodec_flush_buffers(AVCodecContext *avctx);
-
-/**
- * Return codec bits per sample.
- *
- * @param[in] codec_id the codec
- * @return Number of bits per sample or zero if unknown for the given codec.
- */
-int av_get_bits_per_sample(enum AVCodecID codec_id);
-
-/**
- * Return the PCM codec associated with a sample format.
- * @param be endianness, 0 for little, 1 for big,
- * -1 (or anything else) for native
- * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
- */
-enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
-
-/**
- * Return codec bits per sample.
- * Only return non-zero if the bits per sample is exactly correct, not an
- * approximation.
+ * @note for decoders, this function just releases any references the decoder
+ * might keep internally, but the caller's references remain valid.
*
- * @param[in] codec_id the codec
- * @return Number of bits per sample or zero if unknown for the given codec.
+ * @note for encoders, this function will only do something if the encoder
+ * declares support for AV_CODEC_CAP_ENCODER_FLUSH. When called, the encoder
+ * will drain any remaining packets, and can then be re-used for a different
+ * stream (as opposed to sending a null frame which will leave the encoder
+ * in a permanent EOF state after draining). This can be desirable if the
+ * cost of tearing down and replacing the encoder instance is high.
*/
-int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+void avcodec_flush_buffers(AVCodecContext *avctx);
/**
* Return audio frame duration.
@@ -5728,351 +3162,6 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
*/
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
-/**
- * This function is the same as av_get_audio_frame_duration(), except it works
- * with AVCodecParameters instead of an AVCodecContext.
- */
-int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
-
-#if FF_API_OLD_BSF
-typedef struct AVBitStreamFilterContext {
- void *priv_data;
- const struct AVBitStreamFilter *filter;
- AVCodecParserContext *parser;
- struct AVBitStreamFilterContext *next;
- /**
- * Internal default arguments, used if NULL is passed to av_bitstream_filter_filter().
- * Not for access by library users.
- */
- char *args;
-} AVBitStreamFilterContext;
-#endif
-
-typedef struct AVBSFInternal AVBSFInternal;
-
-/**
- * The bitstream filter state.
- *
- * This struct must be allocated with av_bsf_alloc() and freed with
- * av_bsf_free().
- *
- * The fields in the struct will only be changed (by the caller or by the
- * filter) as described in their documentation, and are to be considered
- * immutable otherwise.
- */
-typedef struct AVBSFContext {
- /**
- * A class for logging and AVOptions
- */
- const AVClass *av_class;
-
- /**
- * The bitstream filter this context is an instance of.
- */
- const struct AVBitStreamFilter *filter;
-
- /**
- * Opaque libavcodec internal data. Must not be touched by the caller in any
- * way.
- */
- AVBSFInternal *internal;
-
- /**
- * Opaque filter-specific private data. If filter->priv_class is non-NULL,
- * this is an AVOptions-enabled struct.
- */
- void *priv_data;
-
- /**
- * Parameters of the input stream. This field is allocated in
- * av_bsf_alloc(), it needs to be filled by the caller before
- * av_bsf_init().
- */
- AVCodecParameters *par_in;
-
- /**
- * Parameters of the output stream. This field is allocated in
- * av_bsf_alloc(), it is set by the filter in av_bsf_init().
- */
- AVCodecParameters *par_out;
-
- /**
- * The timebase used for the timestamps of the input packets. Set by the
- * caller before av_bsf_init().
- */
- AVRational time_base_in;
-
- /**
- * The timebase used for the timestamps of the output packets. Set by the
- * filter in av_bsf_init().
- */
- AVRational time_base_out;
-} AVBSFContext;
-
-typedef struct AVBitStreamFilter {
- const char *name;
-
- /**
- * A list of codec ids supported by the filter, terminated by
- * AV_CODEC_ID_NONE.
- * May be NULL, in that case the bitstream filter works with any codec id.
- */
- const enum AVCodecID *codec_ids;
-
- /**
- * A class for the private data, used to declare bitstream filter private
- * AVOptions. This field is NULL for bitstream filters that do not declare
- * any options.
- *
- * If this field is non-NULL, the first member of the filter private data
- * must be a pointer to AVClass, which will be set by libavcodec generic
- * code to this class.
- */
- const AVClass *priv_class;
-
- /*****************************************************************
- * No fields below this line are part of the public API. They
- * may not be used outside of libavcodec and can be changed and
- * removed at will.
- * New public fields should be added right above.
- *****************************************************************
- */
-
- int priv_data_size;
- int (*init)(AVBSFContext *ctx);
- int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
- void (*close)(AVBSFContext *ctx);
- void (*flush)(AVBSFContext *ctx);
-} AVBitStreamFilter;
-
-#if FF_API_OLD_BSF
-/**
- * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext)
- * is deprecated. Use the new bitstream filtering API (using AVBSFContext).
- */
-attribute_deprecated
-void av_register_bitstream_filter(AVBitStreamFilter *bsf);
-/**
- * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext)
- * is deprecated. Use av_bsf_get_by_name(), av_bsf_alloc(), and av_bsf_init()
- * from the new bitstream filtering API (using AVBSFContext).
- */
-attribute_deprecated
-AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
-/**
- * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext)
- * is deprecated. Use av_bsf_send_packet() and av_bsf_receive_packet() from the
- * new bitstream filtering API (using AVBSFContext).
- */
-attribute_deprecated
-int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
- AVCodecContext *avctx, const char *args,
- uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size, int keyframe);
-/**
- * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext)
- * is deprecated. Use av_bsf_free() from the new bitstream filtering API (using
- * AVBSFContext).
- */
-attribute_deprecated
-void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
-/**
- * @deprecated the old bitstream filtering API (using AVBitStreamFilterContext)
- * is deprecated. Use av_bsf_iterate() from the new bitstream filtering API (using
- * AVBSFContext).
- */
-attribute_deprecated
-const AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f);
-#endif
-
-/**
- * @return a bitstream filter with the specified name or NULL if no such
- * bitstream filter exists.
- */
-const AVBitStreamFilter *av_bsf_get_by_name(const char *name);
-
-/**
- * Iterate over all registered bitstream filters.
- *
- * @param opaque a pointer where libavcodec will store the iteration state. Must
- * point to NULL to start the iteration.
- *
- * @return the next registered bitstream filter or NULL when the iteration is
- * finished
- */
-const AVBitStreamFilter *av_bsf_iterate(void **opaque);
-#if FF_API_NEXT
-attribute_deprecated
-const AVBitStreamFilter *av_bsf_next(void **opaque);
-#endif
-
-/**
- * Allocate a context for a given bitstream filter. The caller must fill in the
- * context parameters as described in the documentation and then call
- * av_bsf_init() before sending any data to the filter.
- *
- * @param filter the filter for which to allocate an instance.
- * @param ctx a pointer into which the pointer to the newly-allocated context
- * will be written. It must be freed with av_bsf_free() after the
- * filtering is done.
- *
- * @return 0 on success, a negative AVERROR code on failure
- */
-int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx);
-
-/**
- * Prepare the filter for use, after all the parameters and options have been
- * set.
- */
-int av_bsf_init(AVBSFContext *ctx);
-
-/**
- * Submit a packet for filtering.
- *
- * After sending each packet, the filter must be completely drained by calling
- * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
- * AVERROR_EOF.
- *
- * @param pkt the packet to filter. The bitstream filter will take ownership of
- * the packet and reset the contents of pkt. pkt is not touched if an error occurs.
- * This parameter may be NULL, which signals the end of the stream (i.e. no more
- * packets will be sent). That will cause the filter to output any packets it
- * may have buffered internally.
- *
- * @return 0 on success, a negative AVERROR on error.
- */
-int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
-
-/**
- * Retrieve a filtered packet.
- *
- * @param[out] pkt this struct will be filled with the contents of the filtered
- * packet. It is owned by the caller and must be freed using
- * av_packet_unref() when it is no longer needed.
- * This parameter should be "clean" (i.e. freshly allocated
- * with av_packet_alloc() or unreffed with av_packet_unref())
- * when this function is called. If this function returns
- * successfully, the contents of pkt will be completely
- * overwritten by the returned data. On failure, pkt is not
- * touched.
- *
- * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the
- * filter (using av_bsf_send_packet()) to get more output. AVERROR_EOF if there
- * will be no further output from the filter. Another negative AVERROR value if
- * an error occurs.
- *
- * @note one input packet may result in several output packets, so after sending
- * a packet with av_bsf_send_packet(), this function needs to be called
- * repeatedly until it stops returning 0. It is also possible for a filter to
- * output fewer packets than were sent to it, so this function may return
- * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call.
- */
-int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt);
-
-/**
- * Reset the internal bitstream filter state / flush internal buffers.
- */
-void av_bsf_flush(AVBSFContext *ctx);
-
-/**
- * Free a bitstream filter context and everything associated with it; write NULL
- * into the supplied pointer.
- */
-void av_bsf_free(AVBSFContext **ctx);
-
-/**
- * Get the AVClass for AVBSFContext. It can be used in combination with
- * AV_OPT_SEARCH_FAKE_OBJ for examining options.
- *
- * @see av_opt_find().
- */
-const AVClass *av_bsf_get_class(void);
-
-/**
- * Structure for chain/list of bitstream filters.
- * Empty list can be allocated by av_bsf_list_alloc().
- */
-typedef struct AVBSFList AVBSFList;
-
-/**
- * Allocate empty list of bitstream filters.
- * The list must be later freed by av_bsf_list_free()
- * or finalized by av_bsf_list_finalize().
- *
- * @return Pointer to @ref AVBSFList on success, NULL in case of failure
- */
-AVBSFList *av_bsf_list_alloc(void);
-
-/**
- * Free list of bitstream filters.
- *
- * @param lst Pointer to pointer returned by av_bsf_list_alloc()
- */
-void av_bsf_list_free(AVBSFList **lst);
-
-/**
- * Append bitstream filter to the list of bitstream filters.
- *
- * @param lst List to append to
- * @param bsf Filter context to be appended
- *
- * @return >=0 on success, negative AVERROR in case of failure
- */
-int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf);
-
-/**
- * Construct new bitstream filter context given it's name and options
- * and append it to the list of bitstream filters.
- *
- * @param lst List to append to
- * @param bsf_name Name of the bitstream filter
- * @param options Options for the bitstream filter, can be set to NULL
- *
- * @return >=0 on success, negative AVERROR in case of failure
- */
-int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options);
-/**
- * Finalize list of bitstream filters.
- *
- * This function will transform @ref AVBSFList to single @ref AVBSFContext,
- * so the whole chain of bitstream filters can be treated as single filter
- * freshly allocated by av_bsf_alloc().
- * If the call is successful, @ref AVBSFList structure is freed and lst
- * will be set to NULL. In case of failure, caller is responsible for
- * freeing the structure by av_bsf_list_free()
- *
- * @param lst Filter list structure to be transformed
- * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
- * representing the chain of bitstream filters
- *
- * @return >=0 on success, negative AVERROR in case of failure
- */
-int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf);
-
-/**
- * Parse string describing list of bitstream filters and create single
- * @ref AVBSFContext describing the whole chain of bitstream filters.
- * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly
- * allocated by av_bsf_alloc().
- *
- * @param str String describing chain of bitstream filters in format
- * `bsf1[=opt1=val1:opt2=val2][,bsf2]`
- * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
- * representing the chain of bitstream filters
- *
- * @return >=0 on success, negative AVERROR in case of failure
- */
-int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf);
-
-/**
- * Get null/pass-through bitstream filter.
- *
- * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter
- *
- * @return
- */
-int av_bsf_get_null_filter(AVBSFContext **bsf);
-
/* memory */
/**
@@ -6091,137 +3180,12 @@ void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
/**
- * Encode extradata length to a buffer. Used by xiph codecs.
- *
- * @param s buffer to write to; must be at least (v/255+1) bytes long
- * @param v size of extradata in bytes
- * @return number of bytes written to the buffer.
- */
-unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
-
-#if FF_API_USER_VISIBLE_AVHWACCEL
-/**
- * Register the hardware accelerator hwaccel.
- *
- * @deprecated This function doesn't do anything.
- */
-attribute_deprecated
-void av_register_hwaccel(AVHWAccel *hwaccel);
-
-/**
- * If hwaccel is NULL, returns the first registered hardware accelerator,
- * if hwaccel is non-NULL, returns the next registered hardware accelerator
- * after hwaccel, or NULL if hwaccel is the last one.
- *
- * @deprecated AVHWaccel structures contain no user-serviceable parts, so
- * this function should not be used.
- */
-attribute_deprecated
-AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel);
-#endif
-
-#if FF_API_LOCKMGR
-/**
- * Lock operation used by lockmgr
- *
- * @deprecated Deprecated together with av_lockmgr_register().
- */
-enum AVLockOp {
- AV_LOCK_CREATE, ///< Create a mutex
- AV_LOCK_OBTAIN, ///< Lock the mutex
- AV_LOCK_RELEASE, ///< Unlock the mutex
- AV_LOCK_DESTROY, ///< Free mutex resources
-};
-
-/**
- * Register a user provided lock manager supporting the operations
- * specified by AVLockOp. The "mutex" argument to the function points
- * to a (void *) where the lockmgr should store/get a pointer to a user
- * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the
- * value left by the last call for all other ops. If the lock manager is
- * unable to perform the op then it should leave the mutex in the same
- * state as when it was called and return a non-zero value. However,
- * when called with AV_LOCK_DESTROY the mutex will always be assumed to
- * have been successfully destroyed. If av_lockmgr_register succeeds
- * it will return a non-negative value, if it fails it will return a
- * negative value and destroy all mutex and unregister all callbacks.
- * av_lockmgr_register is not thread-safe, it must be called from a
- * single thread before any calls which make use of locking are used.
- *
- * @param cb User defined callback. av_lockmgr_register invokes calls
- * to this callback and the previously registered callback.
- * The callback will be used to create more than one mutex
- * each of which must be backed by its own underlying locking
- * mechanism (i.e. do not use a single static object to
- * implement your lock manager). If cb is set to NULL the
- * lockmgr will be unregistered.
- *
- * @deprecated This function does nothing, and always returns 0. Be sure to
- * build with thread support to get basic thread safety.
- */
-attribute_deprecated
-int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
-#endif
-
-/**
- * Get the type of the given codec.
- */
-enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
-
-/**
- * Get the name of a codec.
- * @return a static string identifying the codec; never NULL
- */
-const char *avcodec_get_name(enum AVCodecID id);
-
-/**
* @return a positive value if s is open (i.e. avcodec_open2() was called on it
* with no corresponding avcodec_close()), 0 otherwise.
*/
int avcodec_is_open(AVCodecContext *s);
/**
- * @return a non-zero number if codec is an encoder, zero otherwise
- */
-int av_codec_is_encoder(const AVCodec *codec);
-
-/**
- * @return a non-zero number if codec is a decoder, zero otherwise
- */
-int av_codec_is_decoder(const AVCodec *codec);
-
-/**
- * @return descriptor for given codec ID or NULL if no descriptor exists.
- */
-const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
-
-/**
- * Iterate over all codec descriptors known to libavcodec.
- *
- * @param prev previous descriptor. NULL to get the first descriptor.
- *
- * @return next descriptor or NULL after the last descriptor
- */
-const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
-
-/**
- * @return codec descriptor with the given name or NULL if no such descriptor
- * exists.
- */
-const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
-
-/**
- * Allocate a CPB properties structure and initialize its fields to default
- * values.
- *
- * @param size if non-NULL, the size of the allocated struct will be written
- * here. This is useful for embedding it in side data.
- *
- * @return the newly allocated struct or NULL on failure
- */
-AVCPBProperties *av_cpb_properties_alloc(size_t *size);
-
-/**
* @}
*/
diff --git a/media/ffvpx/libavcodec/avpacket.c b/media/ffvpx/libavcodec/avpacket.c
index 2b20067211..5fef65e97a 100644
--- a/media/ffvpx/libavcodec/avpacket.c
+++ b/media/ffvpx/libavcodec/avpacket.c
@@ -22,39 +22,50 @@
#include <string.h>
#include "libavutil/avassert.h"
-#include "libavutil/common.h"
-#include "libavutil/internal.h"
+#include "libavutil/intreadwrite.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
-#include "avcodec.h"
-#include "bytestream.h"
-#include "internal.h"
+#include "libavutil/rational.h"
+#include "defs.h"
+#include "packet.h"
+#include "packet_internal.h"
+
+#if FF_API_INIT_PACKET
void av_init_packet(AVPacket *pkt)
{
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
pkt->pos = -1;
pkt->duration = 0;
-#if FF_API_CONVERGENCE_DURATION
-FF_DISABLE_DEPRECATION_WARNINGS
- pkt->convergence_duration = 0;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
pkt->flags = 0;
pkt->stream_index = 0;
pkt->buf = NULL;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
+ pkt->opaque = NULL;
+ pkt->opaque_ref = NULL;
+ pkt->time_base = av_make_q(0, 1);
+}
+#endif
+
+static void get_packet_defaults(AVPacket *pkt)
+{
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ pkt->pos = -1;
+ pkt->time_base = av_make_q(0, 1);
}
AVPacket *av_packet_alloc(void)
{
- AVPacket *pkt = av_mallocz(sizeof(AVPacket));
+ AVPacket *pkt = av_malloc(sizeof(AVPacket));
if (!pkt)
return pkt;
- av_packet_unref(pkt);
+ get_packet_defaults(pkt);
return pkt;
}
@@ -90,7 +101,7 @@ int av_new_packet(AVPacket *pkt, int size)
if (ret < 0)
return ret;
- av_init_packet(pkt);
+ get_packet_defaults(pkt);
pkt->buf = buf;
pkt->data = buf->data;
pkt->size = size;
@@ -127,8 +138,16 @@ int av_grow_packet(AVPacket *pkt, int grow_by)
return AVERROR(ENOMEM);
}
- if (new_size + data_offset > pkt->buf->size) {
- int ret = av_buffer_realloc(&pkt->buf, new_size + data_offset);
+ if (new_size + data_offset > pkt->buf->size ||
+ !av_buffer_is_writable(pkt->buf)) {
+ int ret;
+
+ // allocate slightly more than requested to avoid excessive
+ // reallocations
+ if (new_size + data_offset < INT_MAX - new_size/16)
+ new_size += new_size/16;
+
+ ret = av_buffer_realloc(&pkt->buf, new_size + data_offset);
if (ret < 0) {
pkt->data = old_data;
return ret;
@@ -165,108 +184,6 @@ int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size)
return 0;
}
-#if FF_API_AVPACKET_OLD_API
-FF_DISABLE_DEPRECATION_WARNINGS
-#define ALLOC_MALLOC(data, size) data = av_malloc(size)
-#define ALLOC_BUF(data, size) \
-do { \
- av_buffer_realloc(&pkt->buf, size); \
- data = pkt->buf ? pkt->buf->data : NULL; \
-} while (0)
-
-#define DUP_DATA(dst, src, size, padding, ALLOC) \
- do { \
- void *data; \
- if (padding) { \
- if ((unsigned)(size) > \
- (unsigned)(size) + AV_INPUT_BUFFER_PADDING_SIZE) \
- goto failed_alloc; \
- ALLOC(data, size + AV_INPUT_BUFFER_PADDING_SIZE); \
- } else { \
- ALLOC(data, size); \
- } \
- if (!data) \
- goto failed_alloc; \
- memcpy(data, src, size); \
- if (padding) \
- memset((uint8_t *)data + size, 0, \
- AV_INPUT_BUFFER_PADDING_SIZE); \
- dst = data; \
- } while (0)
-
-/* Makes duplicates of data, side_data, but does not copy any other fields */
-static int copy_packet_data(AVPacket *pkt, const AVPacket *src, int dup)
-{
- pkt->data = NULL;
- pkt->side_data = NULL;
- pkt->side_data_elems = 0;
- if (pkt->buf) {
- AVBufferRef *ref = av_buffer_ref(src->buf);
- if (!ref)
- return AVERROR(ENOMEM);
- pkt->buf = ref;
- pkt->data = ref->data;
- } else {
- DUP_DATA(pkt->data, src->data, pkt->size, 1, ALLOC_BUF);
- }
- if (src->side_data_elems && dup) {
- pkt->side_data = src->side_data;
- pkt->side_data_elems = src->side_data_elems;
- }
- if (src->side_data_elems && !dup) {
- return av_copy_packet_side_data(pkt, src);
- }
- return 0;
-
-failed_alloc:
- av_packet_unref(pkt);
- return AVERROR(ENOMEM);
-}
-
-int av_copy_packet_side_data(AVPacket *pkt, const AVPacket *src)
-{
- if (src->side_data_elems) {
- int i;
- DUP_DATA(pkt->side_data, src->side_data,
- src->side_data_elems * sizeof(*src->side_data), 0, ALLOC_MALLOC);
- if (src != pkt) {
- memset(pkt->side_data, 0,
- src->side_data_elems * sizeof(*src->side_data));
- }
- for (i = 0; i < src->side_data_elems; i++) {
- DUP_DATA(pkt->side_data[i].data, src->side_data[i].data,
- src->side_data[i].size, 1, ALLOC_MALLOC);
- pkt->side_data[i].size = src->side_data[i].size;
- pkt->side_data[i].type = src->side_data[i].type;
- }
- }
- pkt->side_data_elems = src->side_data_elems;
- return 0;
-
-failed_alloc:
- av_packet_unref(pkt);
- return AVERROR(ENOMEM);
-}
-
-int av_dup_packet(AVPacket *pkt)
-{
- AVPacket tmp_pkt;
-
- if (!pkt->buf && pkt->data) {
- tmp_pkt = *pkt;
- return copy_packet_data(pkt, &tmp_pkt, 1);
- }
- return 0;
-}
-
-int av_copy_packet(AVPacket *dst, const AVPacket *src)
-{
- *dst = *src;
- return copy_packet_data(dst, src, 0);
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
void av_packet_free_side_data(AVPacket *pkt)
{
int i;
@@ -276,22 +193,6 @@ void av_packet_free_side_data(AVPacket *pkt)
pkt->side_data_elems = 0;
}
-#if FF_API_AVPACKET_OLD_API
-FF_DISABLE_DEPRECATION_WARNINGS
-void av_free_packet(AVPacket *pkt)
-{
- if (pkt) {
- if (pkt->buf)
- av_buffer_unref(&pkt->buf);
- pkt->data = NULL;
- pkt->size = 0;
-
- av_packet_free_side_data(pkt);
- }
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
uint8_t *data, size_t size)
{
@@ -327,12 +228,12 @@ int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size)
+ size_t size)
{
int ret;
uint8_t *data;
- if ((unsigned)size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ if (size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
return NULL;
data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!data)
@@ -348,7 +249,7 @@ uint8_t *av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
}
uint8_t *av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
- int *size)
+ size_t *size)
{
int i;
@@ -394,139 +295,58 @@ const char *av_packet_side_data_name(enum AVPacketSideDataType type)
case AV_PKT_DATA_ENCRYPTION_INIT_INFO: return "Encryption initialization data";
case AV_PKT_DATA_ENCRYPTION_INFO: return "Encryption info";
case AV_PKT_DATA_AFD: return "Active Format Description data";
+ case AV_PKT_DATA_PRFT: return "Producer Reference Time";
+ case AV_PKT_DATA_ICC_PROFILE: return "ICC Profile";
+ case AV_PKT_DATA_DOVI_CONF: return "DOVI configuration record";
+ case AV_PKT_DATA_S12M_TIMECODE: return "SMPTE ST 12-1:2014 timecode";
+ case AV_PKT_DATA_DYNAMIC_HDR10_PLUS: return "HDR10+ Dynamic Metadata (SMPTE 2094-40)";
}
return NULL;
}
-#if FF_API_MERGE_SD_API
-
-#define FF_MERGE_MARKER 0x8c4d9d108e25e9feULL
-
-int av_packet_merge_side_data(AVPacket *pkt){
- if(pkt->side_data_elems){
- AVBufferRef *buf;
- int i;
- uint8_t *p;
- uint64_t size= pkt->size + 8LL + AV_INPUT_BUFFER_PADDING_SIZE;
- AVPacket old= *pkt;
- for (i=0; i<old.side_data_elems; i++) {
- size += old.side_data[i].size + 5LL;
- }
- if (size > INT_MAX)
- return AVERROR(EINVAL);
- buf = av_buffer_alloc(size);
- if (!buf)
- return AVERROR(ENOMEM);
- pkt->buf = buf;
- pkt->data = p = buf->data;
- pkt->size = size - AV_INPUT_BUFFER_PADDING_SIZE;
- bytestream_put_buffer(&p, old.data, old.size);
- for (i=old.side_data_elems-1; i>=0; i--) {
- bytestream_put_buffer(&p, old.side_data[i].data, old.side_data[i].size);
- bytestream_put_be32(&p, old.side_data[i].size);
- *p++ = old.side_data[i].type | ((i==old.side_data_elems-1)*128);
- }
- bytestream_put_be64(&p, FF_MERGE_MARKER);
- av_assert0(p-pkt->data == pkt->size);
- memset(p, 0, AV_INPUT_BUFFER_PADDING_SIZE);
- av_packet_unref(&old);
- pkt->side_data_elems = 0;
- pkt->side_data = NULL;
- return 1;
- }
- return 0;
-}
-
-int av_packet_split_side_data(AVPacket *pkt){
- if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
- int i;
- unsigned int size;
- uint8_t *p;
-
- p = pkt->data + pkt->size - 8 - 5;
- for (i=1; ; i++){
- size = AV_RB32(p);
- if (size>INT_MAX - 5 || p - pkt->data < size)
- return 0;
- if (p[4]&128)
- break;
- if (p - pkt->data < size + 5)
- return 0;
- p-= size+5;
- }
-
- if (i > AV_PKT_DATA_NB)
- return AVERROR(ERANGE);
-
- pkt->side_data = av_malloc_array(i, sizeof(*pkt->side_data));
- if (!pkt->side_data)
- return AVERROR(ENOMEM);
-
- p= pkt->data + pkt->size - 8 - 5;
- for (i=0; ; i++){
- size= AV_RB32(p);
- av_assert0(size<=INT_MAX - 5 && p - pkt->data >= size);
- pkt->side_data[i].data = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
- pkt->side_data[i].size = size;
- pkt->side_data[i].type = p[4]&127;
- if (!pkt->side_data[i].data)
- return AVERROR(ENOMEM);
- memcpy(pkt->side_data[i].data, p-size, size);
- pkt->size -= size + 5;
- if(p[4]&128)
- break;
- p-= size+5;
- }
- pkt->size -= 8;
- pkt->side_data_elems = i+1;
- return 1;
- }
- return 0;
-}
-#endif
-
-uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size)
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size)
{
- AVDictionaryEntry *t = NULL;
uint8_t *data = NULL;
*size = 0;
if (!dict)
return NULL;
- while ((t = av_dict_get(dict, "", t, AV_DICT_IGNORE_SUFFIX))) {
- const size_t keylen = strlen(t->key);
- const size_t valuelen = strlen(t->value);
- const size_t new_size = *size + keylen + 1 + valuelen + 1;
- uint8_t *const new_data = av_realloc(data, new_size);
+ for (int pass = 0; pass < 2; pass++) {
+ const AVDictionaryEntry *t = NULL;
+ size_t total_length = 0;
- if (!new_data)
- goto fail;
- data = new_data;
- if (new_size > INT_MAX)
- goto fail;
+ while ((t = av_dict_iterate(dict, t))) {
+ for (int i = 0; i < 2; i++) {
+ const char *str = i ? t->value : t->key;
+ const size_t len = strlen(str) + 1;
- memcpy(data + *size, t->key, keylen + 1);
- memcpy(data + *size + keylen + 1, t->value, valuelen + 1);
-
- *size = new_size;
+ if (pass)
+ memcpy(data + total_length, str, len);
+ else if (len > SIZE_MAX - total_length)
+ return NULL;
+ total_length += len;
+ }
+ }
+ if (pass)
+ break;
+ data = av_malloc(total_length);
+ if (!data)
+ return NULL;
+ *size = total_length;
}
return data;
-
-fail:
- av_freep(&data);
- *size = 0;
- return NULL;
}
-int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
+int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
+ AVDictionary **dict)
{
const uint8_t *end;
- int ret = 0;
+ int ret;
if (!dict || !data || !size)
- return ret;
+ return 0;
end = data + size;
if (size && end[-1])
return AVERROR_INVALIDDATA;
@@ -539,15 +359,15 @@ int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **di
ret = av_dict_set(dict, key, val, 0);
if (ret < 0)
- break;
+ return ret;
data = val + strlen(val) + 1;
}
- return ret;
+ return 0;
}
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
- int size)
+ size_t size)
{
int i;
@@ -564,29 +384,32 @@ int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
{
- int i;
+ int i, ret;
dst->pts = src->pts;
dst->dts = src->dts;
dst->pos = src->pos;
dst->duration = src->duration;
-#if FF_API_CONVERGENCE_DURATION
-FF_DISABLE_DEPRECATION_WARNINGS
- dst->convergence_duration = src->convergence_duration;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
dst->flags = src->flags;
dst->stream_index = src->stream_index;
-
+ dst->opaque = src->opaque;
+ dst->time_base = src->time_base;
+ dst->opaque_ref = NULL;
dst->side_data = NULL;
dst->side_data_elems = 0;
+
+ ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < src->side_data_elems; i++) {
enum AVPacketSideDataType type = src->side_data[i].type;
- int size = src->side_data[i].size;
+ size_t size = src->side_data[i].size;
uint8_t *src_data = src->side_data[i].data;
uint8_t *dst_data = av_packet_new_side_data(dst, type, size);
if (!dst_data) {
+ av_buffer_unref(&dst->opaque_ref);
av_packet_free_side_data(dst);
return AVERROR(ENOMEM);
}
@@ -599,19 +422,20 @@ FF_ENABLE_DEPRECATION_WARNINGS
void av_packet_unref(AVPacket *pkt)
{
av_packet_free_side_data(pkt);
+ av_buffer_unref(&pkt->opaque_ref);
av_buffer_unref(&pkt->buf);
- av_init_packet(pkt);
- pkt->data = NULL;
- pkt->size = 0;
+ get_packet_defaults(pkt);
}
int av_packet_ref(AVPacket *dst, const AVPacket *src)
{
int ret;
+ dst->buf = NULL;
+
ret = av_packet_copy_props(dst, src);
if (ret < 0)
- return ret;
+ goto fail;
if (!src->buf) {
ret = packet_alloc(&dst->buf, src->size);
@@ -635,7 +459,7 @@ int av_packet_ref(AVPacket *dst, const AVPacket *src)
return 0;
fail:
- av_packet_free_side_data(dst);
+ av_packet_unref(dst);
return ret;
}
@@ -655,9 +479,7 @@ AVPacket *av_packet_clone(const AVPacket *src)
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
{
*dst = *src;
- av_init_packet(src);
- src->data = NULL;
- src->size = 0;
+ get_packet_defaults(src);
}
int av_packet_make_refcounted(AVPacket *pkt)
@@ -709,18 +531,78 @@ void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
pkt->dts = av_rescale_q(pkt->dts, src_tb, dst_tb);
if (pkt->duration > 0)
pkt->duration = av_rescale_q(pkt->duration, src_tb, dst_tb);
-#if FF_API_CONVERGENCE_DURATION
-FF_DISABLE_DEPRECATION_WARNINGS
- if (pkt->convergence_duration > 0)
- pkt->convergence_duration = av_rescale_q(pkt->convergence_duration, src_tb, dst_tb);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
+}
+
+int avpriv_packet_list_put(PacketList *packet_buffer,
+ AVPacket *pkt,
+ int (*copy)(AVPacket *dst, const AVPacket *src),
+ int flags)
+{
+ PacketListEntry *pktl = av_malloc(sizeof(*pktl));
+ int ret;
+
+ if (!pktl)
+ return AVERROR(ENOMEM);
+
+ if (copy) {
+ get_packet_defaults(&pktl->pkt);
+ ret = copy(&pktl->pkt, pkt);
+ if (ret < 0) {
+ av_free(pktl);
+ return ret;
+ }
+ } else {
+ ret = av_packet_make_refcounted(pkt);
+ if (ret < 0) {
+ av_free(pktl);
+ return ret;
+ }
+ av_packet_move_ref(&pktl->pkt, pkt);
+ }
+
+ pktl->next = NULL;
+
+ if (packet_buffer->head)
+ packet_buffer->tail->next = pktl;
+ else
+ packet_buffer->head = pktl;
+
+ /* Add the packet in the buffered packet list. */
+ packet_buffer->tail = pktl;
+ return 0;
+}
+
+int avpriv_packet_list_get(PacketList *pkt_buffer,
+ AVPacket *pkt)
+{
+ PacketListEntry *pktl = pkt_buffer->head;
+ if (!pktl)
+ return AVERROR(EAGAIN);
+ *pkt = pktl->pkt;
+ pkt_buffer->head = pktl->next;
+ if (!pkt_buffer->head)
+ pkt_buffer->tail = NULL;
+ av_freep(&pktl);
+ return 0;
+}
+
+void avpriv_packet_list_free(PacketList *pkt_buf)
+{
+ PacketListEntry *tmp = pkt_buf->head;
+
+ while (tmp) {
+ PacketListEntry *pktl = tmp;
+ tmp = pktl->next;
+ av_packet_unref(&pktl->pkt);
+ av_freep(&pktl);
+ }
+ pkt_buf->head = pkt_buf->tail = NULL;
}
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
{
uint8_t *side_data;
- int side_data_size;
+ size_t side_data_size;
int i;
side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, &side_data_size);
@@ -741,3 +623,25 @@ int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, i
return 0;
}
+
+int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp)
+{
+ AVProducerReferenceTime *prft;
+ uint8_t *side_data;
+ size_t side_data_size;
+
+ side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
+ if (!side_data) {
+ side_data_size = sizeof(AVProducerReferenceTime);
+ side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT, side_data_size);
+ }
+
+ if (!side_data || side_data_size < sizeof(AVProducerReferenceTime))
+ return AVERROR(ENOMEM);
+
+ prft = (AVProducerReferenceTime *)side_data;
+ prft->wallclock = timestamp;
+ prft->flags = 0;
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/bitstream.c b/media/ffvpx/libavcodec/bitstream.c
index 53a2db7451..3606575055 100644
--- a/media/ffvpx/libavcodec/bitstream.c
+++ b/media/ffvpx/libavcodec/bitstream.c
@@ -28,30 +28,15 @@
* bitstream api.
*/
+#include <stdint.h>
+#include <string.h>
+
+#include "config.h"
#include "libavutil/avassert.h"
-#include "libavutil/qsort.h"
-#include "avcodec.h"
-#include "internal.h"
-#include "mathops.h"
+#include "libavutil/intreadwrite.h"
#include "put_bits.h"
-#include "vlc.h"
-
-const uint8_t ff_log2_run[41]={
- 0, 0, 0, 0, 1, 1, 1, 1,
- 2, 2, 2, 2, 3, 3, 3, 3,
- 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 9,10,11,12,13,14,15,
-16,17,18,19,20,21,22,23,
-24,
-};
-
-void avpriv_align_put_bits(PutBitContext *s)
-{
- put_bits(s, s->bit_left & 7, 0);
-}
-void avpriv_put_string(PutBitContext *pb, const char *string,
- int terminate_string)
+void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
{
while (*string) {
put_bits(pb, 8, *string);
@@ -61,7 +46,7 @@ void avpriv_put_string(PutBitContext *pb, const char *string,
put_bits(pb, 8, 0);
}
-void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
{
int words = length >> 4;
int bits = length & 15;
@@ -85,278 +70,3 @@ void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
put_bits(pb, bits, AV_RB16(src + 2 * words) >> (16 - bits));
}
-
-/* VLC decoding */
-
-#define GET_DATA(v, table, i, wrap, size) \
-{ \
- const uint8_t *ptr = (const uint8_t *)table + i * wrap; \
- switch(size) { \
- case 1: \
- v = *(const uint8_t *)ptr; \
- break; \
- case 2: \
- v = *(const uint16_t *)ptr; \
- break; \
- case 4: \
- v = *(const uint32_t *)ptr; \
- break; \
- default: \
- av_assert1(0); \
- } \
-}
-
-
-static int alloc_table(VLC *vlc, int size, int use_static)
-{
- int index = vlc->table_size;
-
- vlc->table_size += size;
- if (vlc->table_size > vlc->table_allocated) {
- if (use_static)
- abort(); // cannot do anything, init_vlc() is used with too little memory
- vlc->table_allocated += (1 << vlc->bits);
- vlc->table = av_realloc_f(vlc->table, vlc->table_allocated, sizeof(VLC_TYPE) * 2);
- if (!vlc->table) {
- vlc->table_allocated = 0;
- vlc->table_size = 0;
- return AVERROR(ENOMEM);
- }
- memset(vlc->table + vlc->table_allocated - (1 << vlc->bits), 0, sizeof(VLC_TYPE) * 2 << vlc->bits);
- }
- return index;
-}
-
-typedef struct VLCcode {
- uint8_t bits;
- uint16_t symbol;
- /** codeword, with the first bit-to-be-read in the msb
- * (even if intended for a little-endian bitstream reader) */
- uint32_t code;
-} VLCcode;
-
-static int compare_vlcspec(const void *a, const void *b)
-{
- const VLCcode *sa = a, *sb = b;
- return (sa->code >> 1) - (sb->code >> 1);
-}
-/**
- * Build VLC decoding tables suitable for use with get_vlc().
- *
- * @param vlc the context to be initialized
- *
- * @param table_nb_bits max length of vlc codes to store directly in this table
- * (Longer codes are delegated to subtables.)
- *
- * @param nb_codes number of elements in codes[]
- *
- * @param codes descriptions of the vlc codes
- * These must be ordered such that codes going into the same subtable are contiguous.
- * Sorting by VLCcode.code is sufficient, though not necessary.
- */
-static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
- VLCcode *codes, int flags)
-{
- int table_size, table_index, index, code_prefix, symbol, subtable_bits;
- int i, j, k, n, nb, inc;
- uint32_t code;
- volatile VLC_TYPE (* volatile table)[2]; // the double volatile is needed to prevent an internal compiler error in gcc 4.2
-
- if (table_nb_bits > 30)
- return AVERROR(EINVAL);
- table_size = 1 << table_nb_bits;
- table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
- ff_dlog(NULL, "new table index=%d size=%d\n", table_index, table_size);
- if (table_index < 0)
- return table_index;
- table = (volatile VLC_TYPE (*)[2])&vlc->table[table_index];
-
- /* first pass: map codes and compute auxiliary table sizes */
- for (i = 0; i < nb_codes; i++) {
- n = codes[i].bits;
- code = codes[i].code;
- symbol = codes[i].symbol;
- ff_dlog(NULL, "i=%d n=%d code=0x%"PRIx32"\n", i, n, code);
- if (n <= table_nb_bits) {
- /* no need to add another table */
- j = code >> (32 - table_nb_bits);
- nb = 1 << (table_nb_bits - n);
- inc = 1;
- if (flags & INIT_VLC_LE) {
- j = bitswap_32(code);
- inc = 1 << n;
- }
- for (k = 0; k < nb; k++) {
- int bits = table[j][1];
- int oldsym = table[j][0];
- ff_dlog(NULL, "%4x: code=%d n=%d\n", j, i, n);
- if ((bits || oldsym) && (bits != n || oldsym != symbol)) {
- av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
- return AVERROR_INVALIDDATA;
- }
- table[j][1] = n; //bits
- table[j][0] = symbol;
- j += inc;
- }
- } else {
- /* fill auxiliary table recursively */
- n -= table_nb_bits;
- code_prefix = code >> (32 - table_nb_bits);
- subtable_bits = n;
- codes[i].bits = n;
- codes[i].code = code << table_nb_bits;
- for (k = i+1; k < nb_codes; k++) {
- n = codes[k].bits - table_nb_bits;
- if (n <= 0)
- break;
- code = codes[k].code;
- if (code >> (32 - table_nb_bits) != code_prefix)
- break;
- codes[k].bits = n;
- codes[k].code = code << table_nb_bits;
- subtable_bits = FFMAX(subtable_bits, n);
- }
- subtable_bits = FFMIN(subtable_bits, table_nb_bits);
- j = (flags & INIT_VLC_LE) ? bitswap_32(code_prefix) >> (32 - table_nb_bits) : code_prefix;
- table[j][1] = -subtable_bits;
- ff_dlog(NULL, "%4x: n=%d (subtable)\n",
- j, codes[i].bits + table_nb_bits);
- index = build_table(vlc, subtable_bits, k-i, codes+i, flags);
- if (index < 0)
- return index;
- /* note: realloc has been done, so reload tables */
- table = (volatile VLC_TYPE (*)[2])&vlc->table[table_index];
- table[j][0] = index; //code
- if (table[j][0] != index) {
- avpriv_request_sample(NULL, "strange codes");
- return AVERROR_PATCHWELCOME;
- }
- i = k-1;
- }
- }
-
- for (i = 0; i < table_size; i++) {
- if (table[i][1] == 0) //bits
- table[i][0] = -1; //codes
- }
-
- return table_index;
-}
-
-
-/* Build VLC decoding tables suitable for use with get_vlc().
-
- 'nb_bits' sets the decoding table size (2^nb_bits) entries. The
- bigger it is, the faster is the decoding. But it should not be too
- big to save memory and L1 cache. '9' is a good compromise.
-
- 'nb_codes' : number of vlcs codes
-
- 'bits' : table which gives the size (in bits) of each vlc code.
-
- 'codes' : table which gives the bit pattern of of each vlc code.
-
- 'symbols' : table which gives the values to be returned from get_vlc().
-
- 'xxx_wrap' : give the number of bytes between each entry of the
- 'bits' or 'codes' tables.
-
- 'xxx_size' : gives the number of bytes of each entry of the 'bits'
- or 'codes' tables. Currently 1,2 and 4 are supported.
-
- 'wrap' and 'size' make it possible to use any memory configuration and types
- (byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
-
- 'use_static' should be set to 1 for tables, which should be freed
- with av_free_static(), 0 if ff_free_vlc() will be used.
-*/
-int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes,
- const void *bits, int bits_wrap, int bits_size,
- const void *codes, int codes_wrap, int codes_size,
- const void *symbols, int symbols_wrap, int symbols_size,
- int flags)
-{
- VLCcode *buf;
- int i, j, ret;
- VLCcode localbuf[1500]; // the maximum currently needed is 1296 by rv34
- VLC localvlc, *vlc;
-
- vlc = vlc_arg;
- vlc->bits = nb_bits;
- if (flags & INIT_VLC_USE_NEW_STATIC) {
- av_assert0(nb_codes + 1 <= FF_ARRAY_ELEMS(localbuf));
- buf = localbuf;
- localvlc = *vlc_arg;
- vlc = &localvlc;
- vlc->table_size = 0;
- } else {
- vlc->table = NULL;
- vlc->table_allocated = 0;
- vlc->table_size = 0;
-
- buf = av_malloc_array((nb_codes + 1), sizeof(VLCcode));
- if (!buf)
- return AVERROR(ENOMEM);
- }
-
-
- av_assert0(symbols_size <= 2 || !symbols);
- j = 0;
-#define COPY(condition)\
- for (i = 0; i < nb_codes; i++) { \
- GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size); \
- if (!(condition)) \
- continue; \
- if (buf[j].bits > 3*nb_bits || buf[j].bits>32) { \
- av_log(NULL, AV_LOG_ERROR, "Too long VLC (%d) in init_vlc\n", buf[j].bits);\
- if (!(flags & INIT_VLC_USE_NEW_STATIC)) \
- av_free(buf); \
- return AVERROR(EINVAL); \
- } \
- GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size); \
- if (buf[j].code >= (1LL<<buf[j].bits)) { \
- av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \
- "init_vlc\n", buf[j].code, i); \
- if (!(flags & INIT_VLC_USE_NEW_STATIC)) \
- av_free(buf); \
- return AVERROR(EINVAL); \
- } \
- if (flags & INIT_VLC_LE) \
- buf[j].code = bitswap_32(buf[j].code); \
- else \
- buf[j].code <<= 32 - buf[j].bits; \
- if (symbols) \
- GET_DATA(buf[j].symbol, symbols, i, symbols_wrap, symbols_size) \
- else \
- buf[j].symbol = i; \
- j++; \
- }
- COPY(buf[j].bits > nb_bits);
- // qsort is the slowest part of init_vlc, and could probably be improved or avoided
- AV_QSORT(buf, j, struct VLCcode, compare_vlcspec);
- COPY(buf[j].bits && buf[j].bits <= nb_bits);
- nb_codes = j;
-
- ret = build_table(vlc, nb_bits, nb_codes, buf, flags);
-
- if (flags & INIT_VLC_USE_NEW_STATIC) {
- if(vlc->table_size != vlc->table_allocated)
- av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
-
- av_assert0(ret >= 0);
- *vlc_arg = *vlc;
- } else {
- av_free(buf);
- if (ret < 0) {
- av_freep(&vlc->table);
- return ret;
- }
- }
- return 0;
-}
-
-
-void ff_free_vlc(VLC *vlc)
-{
- av_freep(&vlc->table);
-}
diff --git a/media/ffvpx/libavcodec/bitstream_filters.c b/media/ffvpx/libavcodec/bitstream_filters.c
index 463003966a..e8216819ca 100644
--- a/media/ffvpx/libavcodec/bitstream_filters.c
+++ b/media/ffvpx/libavcodec/bitstream_filters.c
@@ -16,66 +16,69 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "config.h"
+#include <stdint.h>
+#include <string.h>
-#include "libavutil/common.h"
#include "libavutil/log.h"
-#include "avcodec.h"
#include "bsf.h"
-
-extern const AVBitStreamFilter ff_aac_adtstoasc_bsf;
-extern const AVBitStreamFilter ff_av1_frame_split_bsf;
-extern const AVBitStreamFilter ff_av1_metadata_bsf;
-extern const AVBitStreamFilter ff_chomp_bsf;
-extern const AVBitStreamFilter ff_dump_extradata_bsf;
-extern const AVBitStreamFilter ff_dca_core_bsf;
-extern const AVBitStreamFilter ff_eac3_core_bsf;
-extern const AVBitStreamFilter ff_extract_extradata_bsf;
-extern const AVBitStreamFilter ff_filter_units_bsf;
-extern const AVBitStreamFilter ff_h264_metadata_bsf;
-extern const AVBitStreamFilter ff_h264_mp4toannexb_bsf;
-extern const AVBitStreamFilter ff_h264_redundant_pps_bsf;
-extern const AVBitStreamFilter ff_hapqa_extract_bsf;
-extern const AVBitStreamFilter ff_hevc_metadata_bsf;
-extern const AVBitStreamFilter ff_hevc_mp4toannexb_bsf;
-extern const AVBitStreamFilter ff_imx_dump_header_bsf;
-extern const AVBitStreamFilter ff_mjpeg2jpeg_bsf;
-extern const AVBitStreamFilter ff_mjpega_dump_header_bsf;
-extern const AVBitStreamFilter ff_mp3_header_decompress_bsf;
-extern const AVBitStreamFilter ff_mpeg2_metadata_bsf;
-extern const AVBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
-extern const AVBitStreamFilter ff_mov2textsub_bsf;
-extern const AVBitStreamFilter ff_noise_bsf;
-extern const AVBitStreamFilter ff_null_bsf;
-extern const AVBitStreamFilter ff_prores_metadata_bsf;
-extern const AVBitStreamFilter ff_remove_extradata_bsf;
-extern const AVBitStreamFilter ff_text2movsub_bsf;
-extern const AVBitStreamFilter ff_trace_headers_bsf;
-extern const AVBitStreamFilter ff_truehd_core_bsf;
-extern const AVBitStreamFilter ff_vp9_metadata_bsf;
-extern const AVBitStreamFilter ff_vp9_raw_reorder_bsf;
-extern const AVBitStreamFilter ff_vp9_superframe_bsf;
-extern const AVBitStreamFilter ff_vp9_superframe_split_bsf;
+#include "bsf_internal.h"
+
+extern const FFBitStreamFilter ff_aac_adtstoasc_bsf;
+extern const FFBitStreamFilter ff_av1_frame_merge_bsf;
+extern const FFBitStreamFilter ff_av1_frame_split_bsf;
+extern const FFBitStreamFilter ff_av1_metadata_bsf;
+extern const FFBitStreamFilter ff_chomp_bsf;
+extern const FFBitStreamFilter ff_dump_extradata_bsf;
+extern const FFBitStreamFilter ff_dca_core_bsf;
+extern const FFBitStreamFilter ff_dts2pts_bsf;
+extern const FFBitStreamFilter ff_dv_error_marker_bsf;
+extern const FFBitStreamFilter ff_eac3_core_bsf;
+extern const FFBitStreamFilter ff_extract_extradata_bsf;
+extern const FFBitStreamFilter ff_filter_units_bsf;
+extern const FFBitStreamFilter ff_h264_metadata_bsf;
+extern const FFBitStreamFilter ff_h264_mp4toannexb_bsf;
+extern const FFBitStreamFilter ff_h264_redundant_pps_bsf;
+extern const FFBitStreamFilter ff_hapqa_extract_bsf;
+extern const FFBitStreamFilter ff_hevc_metadata_bsf;
+extern const FFBitStreamFilter ff_hevc_mp4toannexb_bsf;
+extern const FFBitStreamFilter ff_imx_dump_header_bsf;
+extern const FFBitStreamFilter ff_media100_to_mjpegb_bsf;
+extern const FFBitStreamFilter ff_mjpeg2jpeg_bsf;
+extern const FFBitStreamFilter ff_mjpega_dump_header_bsf;
+extern const FFBitStreamFilter ff_mp3_header_decompress_bsf;
+extern const FFBitStreamFilter ff_mpeg2_metadata_bsf;
+extern const FFBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
+extern const FFBitStreamFilter ff_mov2textsub_bsf;
+extern const FFBitStreamFilter ff_noise_bsf;
+extern const FFBitStreamFilter ff_null_bsf;
+extern const FFBitStreamFilter ff_opus_metadata_bsf;
+extern const FFBitStreamFilter ff_pcm_rechunk_bsf;
+extern const FFBitStreamFilter ff_pgs_frame_merge_bsf;
+extern const FFBitStreamFilter ff_prores_metadata_bsf;
+extern const FFBitStreamFilter ff_remove_extradata_bsf;
+extern const FFBitStreamFilter ff_setts_bsf;
+extern const FFBitStreamFilter ff_text2movsub_bsf;
+extern const FFBitStreamFilter ff_trace_headers_bsf;
+extern const FFBitStreamFilter ff_truehd_core_bsf;
+extern const FFBitStreamFilter ff_vp9_metadata_bsf;
+extern const FFBitStreamFilter ff_vp9_raw_reorder_bsf;
+extern const FFBitStreamFilter ff_vp9_superframe_bsf;
+extern const FFBitStreamFilter ff_vp9_superframe_split_bsf;
#include "libavcodec/bsf_list.c"
const AVBitStreamFilter *av_bsf_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
- const AVBitStreamFilter *f = bitstream_filters[i];
+ const FFBitStreamFilter *f = bitstream_filters[i];
- if (f)
+ if (f) {
*opaque = (void*)(i + 1);
-
- return f;
-}
-
-#if FF_API_NEXT
-const AVBitStreamFilter *av_bsf_next(void **opaque) {
- return av_bsf_iterate(opaque);
+ return &f->p;
+ }
+ return NULL;
}
-#endif
const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
{
@@ -93,20 +96,12 @@ const AVBitStreamFilter *av_bsf_get_by_name(const char *name)
return NULL;
}
-const AVClass *ff_bsf_child_class_next(const AVClass *prev)
+const AVClass *ff_bsf_child_class_iterate(void **opaque)
{
- const AVBitStreamFilter *f = NULL;
- void *i = 0;
-
- /* find the filter that corresponds to prev */
- while (prev && (f = av_bsf_iterate(&i))) {
- if (f->priv_class == prev) {
- break;
- }
- }
+ const AVBitStreamFilter *f;
/* find next filter with priv options */
- while ((f = av_bsf_iterate(&i))) {
+ while ((f = av_bsf_iterate(opaque))) {
if (f->priv_class)
return f->priv_class;
}
diff --git a/media/ffvpx/libavcodec/blockdsp.h b/media/ffvpx/libavcodec/blockdsp.h
index 26fc2ea13b..d853adada2 100644
--- a/media/ffvpx/libavcodec/blockdsp.h
+++ b/media/ffvpx/libavcodec/blockdsp.h
@@ -22,9 +22,6 @@
#include <stddef.h>
#include <stdint.h>
-#include "avcodec.h"
-#include "version.h"
-
/* add and put pixel (decoding)
* Block sizes for op_pixels_func are 8x4,8x8 16x8 16x16.
* h for op_pixels_func is limited to { width / 2, width },
@@ -39,12 +36,12 @@ typedef struct BlockDSPContext {
op_fill_func fill_block_tab[2];
} BlockDSPContext;
-void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx);
+void ff_blockdsp_init(BlockDSPContext *c);
void ff_blockdsp_init_alpha(BlockDSPContext *c);
void ff_blockdsp_init_arm(BlockDSPContext *c);
void ff_blockdsp_init_ppc(BlockDSPContext *c);
-void ff_blockdsp_init_x86(BlockDSPContext *c, AVCodecContext *avctx);
+void ff_blockdsp_init_x86(BlockDSPContext *c);
void ff_blockdsp_init_mips(BlockDSPContext *c);
#endif /* AVCODEC_BLOCKDSP_H */
diff --git a/media/ffvpx/libavcodec/bsf.c b/media/ffvpx/libavcodec/bsf.c
index e17dc854f5..42cc1b5ab0 100644
--- a/media/ffvpx/libavcodec/bsf.c
+++ b/media/ffvpx/libavcodec/bsf.c
@@ -18,39 +18,56 @@
#include <string.h>
+#include "config_components.h"
+
+#include "libavutil/avassert.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
-#include "avcodec.h"
#include "bsf.h"
+#include "bsf_internal.h"
+#include "codec_desc.h"
+#include "codec_par.h"
+
+#define IS_EMPTY(pkt) (!(pkt)->data && !(pkt)->side_data_elems)
+
+static av_always_inline const FFBitStreamFilter *ff_bsf(const AVBitStreamFilter *bsf)
+{
+ return (const FFBitStreamFilter*)bsf;
+}
-struct AVBSFInternal {
+typedef struct FFBSFContext {
+ AVBSFContext pub;
AVPacket *buffer_pkt;
int eof;
-};
+} FFBSFContext;
+
+static av_always_inline FFBSFContext *ffbsfcontext(AVBSFContext *ctx)
+{
+ return (FFBSFContext *)ctx;
+}
void av_bsf_free(AVBSFContext **pctx)
{
AVBSFContext *ctx;
+ FFBSFContext *bsfi;
if (!pctx || !*pctx)
return;
- ctx = *pctx;
-
- if (ctx->filter->close)
- ctx->filter->close(ctx);
- if (ctx->filter->priv_class && ctx->priv_data)
- av_opt_free(ctx->priv_data);
-
- av_opt_free(ctx);
-
- if (ctx->internal)
- av_packet_free(&ctx->internal->buffer_pkt);
- av_freep(&ctx->internal);
- av_freep(&ctx->priv_data);
+ ctx = *pctx;
+ bsfi = ffbsfcontext(ctx);
+
+ if (ctx->priv_data) {
+ if (ff_bsf(ctx->filter)->close)
+ ff_bsf(ctx->filter)->close(ctx);
+ if (ctx->filter->priv_class)
+ av_opt_free(ctx->priv_data);
+ av_freep(&ctx->priv_data);
+ }
+ av_packet_free(&bsfi->buffer_pkt);
avcodec_parameters_free(&ctx->par_in);
avcodec_parameters_free(&ctx->par_out);
@@ -66,12 +83,18 @@ static void *bsf_child_next(void *obj, void *prev)
return NULL;
}
+static const char *bsf_to_name(void *bsf)
+{
+ return ((AVBSFContext *)bsf)->filter->name;
+}
+
static const AVClass bsf_class = {
.class_name = "AVBSFContext",
- .item_name = av_default_item_name,
+ .item_name = bsf_to_name,
.version = LIBAVUTIL_VERSION_INT,
.child_next = bsf_child_next,
- .child_class_next = ff_bsf_child_class_next,
+ .child_class_iterate = ff_bsf_child_class_iterate,
+ .category = AV_CLASS_CATEGORY_BITSTREAM_FILTER,
};
const AVClass *av_bsf_get_class(void)
@@ -82,11 +105,13 @@ const AVClass *av_bsf_get_class(void)
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
{
AVBSFContext *ctx;
+ FFBSFContext *bsfi;
int ret;
- ctx = av_mallocz(sizeof(*ctx));
- if (!ctx)
+ bsfi = av_mallocz(sizeof(*bsfi));
+ if (!bsfi)
return AVERROR(ENOMEM);
+ ctx = &bsfi->pub;
ctx->av_class = &bsf_class;
ctx->filter = filter;
@@ -97,24 +122,9 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
ret = AVERROR(ENOMEM);
goto fail;
}
-
- ctx->internal = av_mallocz(sizeof(*ctx->internal));
- if (!ctx->internal) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- ctx->internal->buffer_pkt = av_packet_alloc();
- if (!ctx->internal->buffer_pkt) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- av_opt_set_defaults(ctx);
-
/* allocate priv data and init private options */
- if (filter->priv_data_size) {
- ctx->priv_data = av_mallocz(filter->priv_data_size);
+ if (ff_bsf(filter)->priv_data_size) {
+ ctx->priv_data = av_mallocz(ff_bsf(filter)->priv_data_size);
if (!ctx->priv_data) {
ret = AVERROR(ENOMEM);
goto fail;
@@ -124,6 +134,11 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
av_opt_set_defaults(ctx->priv_data);
}
}
+ bsfi->buffer_pkt = av_packet_alloc();
+ if (!bsfi->buffer_pkt) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
*pctx = ctx;
return 0;
@@ -147,9 +162,9 @@ int av_bsf_init(AVBSFContext *ctx)
"bitstream filter '%s'. Supported codecs are: ",
desc ? desc->name : "unknown", ctx->par_in->codec_id, ctx->filter->name);
for (i = 0; ctx->filter->codec_ids[i] != AV_CODEC_ID_NONE; i++) {
- desc = avcodec_descriptor_get(ctx->filter->codec_ids[i]);
+ enum AVCodecID codec_id = ctx->filter->codec_ids[i];
av_log(ctx, AV_LOG_ERROR, "%s (%d) ",
- desc ? desc->name : "unknown", ctx->filter->codec_ids[i]);
+ avcodec_get_name(codec_id), codec_id);
}
av_log(ctx, AV_LOG_ERROR, "\n");
return AVERROR(EINVAL);
@@ -164,8 +179,8 @@ int av_bsf_init(AVBSFContext *ctx)
ctx->time_base_out = ctx->time_base_in;
- if (ctx->filter->init) {
- ret = ctx->filter->init(ctx);
+ if (ff_bsf(ctx->filter)->init) {
+ ret = ff_bsf(ctx->filter)->init(ctx);
if (ret < 0)
return ret;
}
@@ -175,79 +190,81 @@ int av_bsf_init(AVBSFContext *ctx)
void av_bsf_flush(AVBSFContext *ctx)
{
- ctx->internal->eof = 0;
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
- av_packet_unref(ctx->internal->buffer_pkt);
+ bsfi->eof = 0;
- if (ctx->filter->flush)
- ctx->filter->flush(ctx);
+ av_packet_unref(bsfi->buffer_pkt);
+
+ if (ff_bsf(ctx->filter)->flush)
+ ff_bsf(ctx->filter)->flush(ctx);
}
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
int ret;
- if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
- ctx->internal->eof = 1;
+ if (!pkt || IS_EMPTY(pkt)) {
+ if (pkt)
+ av_packet_unref(pkt);
+ bsfi->eof = 1;
return 0;
}
- if (ctx->internal->eof) {
+ if (bsfi->eof) {
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
return AVERROR(EINVAL);
}
- if (ctx->internal->buffer_pkt->data ||
- ctx->internal->buffer_pkt->side_data_elems)
+ if (!IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
return ret;
- av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
+ av_packet_move_ref(bsfi->buffer_pkt, pkt);
return 0;
}
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
{
- return ctx->filter->filter(ctx, pkt);
+ return ff_bsf(ctx->filter)->filter(ctx, pkt);
}
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
{
- AVBSFInternal *in = ctx->internal;
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
AVPacket *tmp_pkt;
- if (in->eof)
+ if (bsfi->eof)
return AVERROR_EOF;
- if (!ctx->internal->buffer_pkt->data &&
- !ctx->internal->buffer_pkt->side_data_elems)
+ if (IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
return AVERROR(ENOMEM);
- *pkt = ctx->internal->buffer_pkt;
- ctx->internal->buffer_pkt = tmp_pkt;
+ *pkt = bsfi->buffer_pkt;
+ bsfi->buffer_pkt = tmp_pkt;
return 0;
}
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
{
- AVBSFInternal *in = ctx->internal;
+ FFBSFContext *const bsfi = ffbsfcontext(ctx);
- if (in->eof)
+ if (bsfi->eof)
return AVERROR_EOF;
- if (!ctx->internal->buffer_pkt->data &&
- !ctx->internal->buffer_pkt->side_data_elems)
+ if (IS_EMPTY(bsfi->buffer_pkt))
return AVERROR(EAGAIN);
- av_packet_move_ref(pkt, ctx->internal->buffer_pkt);
+ av_packet_move_ref(pkt, bsfi->buffer_pkt);
return 0;
}
@@ -259,7 +276,6 @@ typedef struct BSFListContext {
int nb_bsfs;
unsigned idx; // index of currently processed BSF
- unsigned flushed_idx; // index of BSF being flushed
char * item_name;
} BSFListContext;
@@ -297,58 +313,43 @@ fail:
static int bsf_list_filter(AVBSFContext *bsf, AVPacket *out)
{
BSFListContext *lst = bsf->priv_data;
- int ret;
+ int ret, eof = 0;
if (!lst->nb_bsfs)
return ff_bsf_get_packet_ref(bsf, out);
while (1) {
- if (lst->idx > lst->flushed_idx) {
+ /* get a packet from the previous filter up the chain */
+ if (lst->idx)
ret = av_bsf_receive_packet(lst->bsfs[lst->idx-1], out);
- if (ret == AVERROR(EAGAIN)) {
- /* no more packets from idx-1, try with previous */
- ret = 0;
- lst->idx--;
- continue;
- } else if (ret == AVERROR_EOF) {
- /* filter idx-1 is done, continue with idx...nb_bsfs */
- lst->flushed_idx = lst->idx;
- continue;
- }else if (ret < 0) {
- /* filtering error */
- break;
- }
- } else {
+ else
ret = ff_bsf_get_packet_ref(bsf, out);
- if (ret == AVERROR_EOF) {
- lst->idx = lst->flushed_idx;
- } else if (ret < 0)
- break;
- }
+ if (ret == AVERROR(EAGAIN)) {
+ if (!lst->idx)
+ return ret;
+ lst->idx--;
+ continue;
+ } else if (ret == AVERROR_EOF) {
+ eof = 1;
+ } else if (ret < 0)
+ return ret;
+ /* send it to the next filter down the chain */
if (lst->idx < lst->nb_bsfs) {
- AVPacket *pkt;
- if (ret == AVERROR_EOF && lst->idx == lst->flushed_idx) {
- /* ff_bsf_get_packet_ref returned EOF and idx is first
- * filter of yet not flushed filter chain */
- pkt = NULL;
- } else {
- pkt = out;
+ ret = av_bsf_send_packet(lst->bsfs[lst->idx], eof ? NULL : out);
+ av_assert1(ret != AVERROR(EAGAIN));
+ if (ret < 0) {
+ av_packet_unref(out);
+ return ret;
}
- ret = av_bsf_send_packet(lst->bsfs[lst->idx], pkt);
- if (ret < 0)
- break;
lst->idx++;
+ eof = 0;
+ } else if (eof) {
+ return ret;
} else {
- /* The end of filter chain, break to return result */
- break;
+ return 0;
}
}
-
- if (ret < 0)
- av_packet_unref(out);
-
- return ret;
}
static void bsf_list_flush(AVBSFContext *bsf)
@@ -357,7 +358,7 @@ static void bsf_list_flush(AVBSFContext *bsf)
for (int i = 0; i < lst->nb_bsfs; i++)
av_bsf_flush(lst->bsfs[i]);
- lst->idx = lst->flushed_idx = 0;
+ lst->idx = 0;
}
static void bsf_list_close(AVBSFContext *bsf)
@@ -402,10 +403,10 @@ static const AVClass bsf_list_class = {
.version = LIBAVUTIL_VERSION_INT,
};
-const AVBitStreamFilter ff_list_bsf = {
- .name = "bsf_list",
+static const FFBitStreamFilter list_bsf = {
+ .p.name = "bsf_list",
+ .p.priv_class = &bsf_list_class,
.priv_data_size = sizeof(BSFListContext),
- .priv_class = &bsf_list_class,
.init = bsf_list_init,
.filter = bsf_list_filter,
.flush = bsf_list_flush,
@@ -440,7 +441,7 @@ int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf)
return av_dynarray_add_nofree(&lst->bsfs, &lst->nb_bsfs, bsf);
}
-int av_bsf_list_append2(AVBSFList *lst, const char *bsf_name, AVDictionary ** options)
+static int bsf_list_append_internal(AVBSFList *lst, const char *bsf_name, const char *options, AVDictionary ** options_dict)
{
int ret;
const AVBitStreamFilter *filter;
@@ -454,8 +455,20 @@ int av_bsf_list_append2(AVBSFList *lst, const char *bsf_name, AVDictionary ** op
if (ret < 0)
return ret;
- if (options) {
- ret = av_opt_set_dict2(bsf, options, AV_OPT_SEARCH_CHILDREN);
+ if (options && filter->priv_class) {
+ const AVOption *opt = av_opt_next(bsf->priv_data, NULL);
+ const char * shorthand[2] = {NULL};
+
+ if (opt)
+ shorthand[0] = opt->name;
+
+ ret = av_opt_set_from_string(bsf->priv_data, options, shorthand, "=", ":");
+ if (ret < 0)
+ goto end;
+ }
+
+ if (options_dict) {
+ ret = av_opt_set_dict2(bsf, options_dict, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
}
@@ -469,6 +482,11 @@ end:
return ret;
}
+int av_bsf_list_append2(AVBSFList *lst, const char *bsf_name, AVDictionary ** options)
+{
+ return bsf_list_append_internal(lst, bsf_name, NULL, options);
+}
+
int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf)
{
int ret = 0;
@@ -481,7 +499,7 @@ int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf)
goto end;
}
- ret = av_bsf_alloc(&ff_list_bsf, bsf);
+ ret = av_bsf_alloc(&list_bsf.p, bsf);
if (ret < 0)
return ret;
@@ -495,39 +513,20 @@ end:
return ret;
}
-static int bsf_parse_single(const char *str, AVBSFList *bsf_lst)
+static int bsf_parse_single(char *str, AVBSFList *bsf_lst)
{
- char *bsf_name, *bsf_options_str, *buf;
- AVDictionary *bsf_options = NULL;
- int ret = 0;
-
- if (!(buf = av_strdup(str)))
- return AVERROR(ENOMEM);
+ char *bsf_name, *bsf_options_str;
- bsf_name = av_strtok(buf, "=", &bsf_options_str);
- if (!bsf_name) {
- ret = AVERROR(EINVAL);
- goto end;
- }
-
- if (bsf_options_str) {
- ret = av_dict_parse_string(&bsf_options, bsf_options_str, "=", ":", 0);
- if (ret < 0)
- goto end;
- }
-
- ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);
+ bsf_name = av_strtok(str, "=", &bsf_options_str);
+ if (!bsf_name)
+ return AVERROR(EINVAL);
- av_dict_free(&bsf_options);
-end:
- av_free(buf);
- return ret;
+ return bsf_list_append_internal(bsf_lst, bsf_name, bsf_options_str, NULL);
}
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
{
AVBSFList *lst;
- char *bsf_str, *buf, *dup, *saveptr;
int ret;
if (!str)
@@ -537,32 +536,27 @@ int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
if (!lst)
return AVERROR(ENOMEM);
- if (!(dup = buf = av_strdup(str))) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- while (1) {
- bsf_str = av_strtok(buf, ",", &saveptr);
- if (!bsf_str)
- break;
-
+ do {
+ char *bsf_str = av_get_token(&str, ",");
ret = bsf_parse_single(bsf_str, lst);
+ av_free(bsf_str);
if (ret < 0)
goto end;
-
- buf = NULL;
- }
+ } while (*str && *++str);
ret = av_bsf_list_finalize(&lst, bsf_lst);
end:
if (ret < 0)
av_bsf_list_free(&lst);
- av_free(dup);
return ret;
}
int av_bsf_get_null_filter(AVBSFContext **bsf)
{
- return av_bsf_alloc(&ff_list_bsf, bsf);
+#if CONFIG_NULL_BSF
+ extern const FFBitStreamFilter ff_null_bsf;
+ return av_bsf_alloc(&ff_null_bsf.p, bsf);
+#else
+ return av_bsf_alloc(&list_bsf.p, bsf);
+#endif
}
diff --git a/media/ffvpx/libavcodec/bsf.h b/media/ffvpx/libavcodec/bsf.h
index af035eee44..a09c69f242 100644
--- a/media/ffvpx/libavcodec/bsf.h
+++ b/media/ffvpx/libavcodec/bsf.h
@@ -1,4 +1,6 @@
/*
+ * Bitstream filters public API
+ *
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
@@ -19,26 +21,312 @@
#ifndef AVCODEC_BSF_H
#define AVCODEC_BSF_H
-#include "avcodec.h"
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/rational.h"
+
+#include "codec_id.h"
+#include "codec_par.h"
+#include "packet.h"
+
+/**
+ * @defgroup lavc_bsf Bitstream filters
+ * @ingroup libavc
+ *
+ * Bitstream filters transform encoded media data without decoding it. This
+ * allows e.g. manipulating various header values. Bitstream filters operate on
+ * @ref AVPacket "AVPackets".
+ *
+ * The bitstream filtering API is centered around two structures:
+ * AVBitStreamFilter and AVBSFContext. The former represents a bitstream filter
+ * in abstract, the latter a specific filtering process. Obtain an
+ * AVBitStreamFilter using av_bsf_get_by_name() or av_bsf_iterate(), then pass
+ * it to av_bsf_alloc() to create an AVBSFContext. Fill in the user-settable
+ * AVBSFContext fields, as described in its documentation, then call
+ * av_bsf_init() to prepare the filter context for use.
+ *
+ * Submit packets for filtering using av_bsf_send_packet(), obtain filtered
+ * results with av_bsf_receive_packet(). When no more input packets will be
+ * sent, submit a NULL AVPacket to signal the end of the stream to the filter.
+ * av_bsf_receive_packet() will then return trailing packets, if any are
+ * produced by the filter.
+ *
+ * Finally, free the filter context with av_bsf_free().
+ * @{
+ */
+
+/**
+ * The bitstream filter state.
+ *
+ * This struct must be allocated with av_bsf_alloc() and freed with
+ * av_bsf_free().
+ *
+ * The fields in the struct will only be changed (by the caller or by the
+ * filter) as described in their documentation, and are to be considered
+ * immutable otherwise.
+ */
+typedef struct AVBSFContext {
+ /**
+ * A class for logging and AVOptions
+ */
+ const AVClass *av_class;
+
+ /**
+ * The bitstream filter this context is an instance of.
+ */
+ const struct AVBitStreamFilter *filter;
+
+ /**
+ * Opaque filter-specific private data. If filter->priv_class is non-NULL,
+ * this is an AVOptions-enabled struct.
+ */
+ void *priv_data;
+
+ /**
+ * Parameters of the input stream. This field is allocated in
+ * av_bsf_alloc(), it needs to be filled by the caller before
+ * av_bsf_init().
+ */
+ AVCodecParameters *par_in;
+
+ /**
+ * Parameters of the output stream. This field is allocated in
+ * av_bsf_alloc(), it is set by the filter in av_bsf_init().
+ */
+ AVCodecParameters *par_out;
+
+ /**
+ * The timebase used for the timestamps of the input packets. Set by the
+ * caller before av_bsf_init().
+ */
+ AVRational time_base_in;
+
+ /**
+ * The timebase used for the timestamps of the output packets. Set by the
+ * filter in av_bsf_init().
+ */
+ AVRational time_base_out;
+} AVBSFContext;
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+
+ /**
+ * A list of codec ids supported by the filter, terminated by
+ * AV_CODEC_ID_NONE.
+ * May be NULL, in that case the bitstream filter works with any codec id.
+ */
+ const enum AVCodecID *codec_ids;
+
+ /**
+ * A class for the private data, used to declare bitstream filter private
+ * AVOptions. This field is NULL for bitstream filters that do not declare
+ * any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavcodec generic
+ * code to this class.
+ */
+ const AVClass *priv_class;
+} AVBitStreamFilter;
+
+/**
+ * @return a bitstream filter with the specified name or NULL if no such
+ * bitstream filter exists.
+ */
+const AVBitStreamFilter *av_bsf_get_by_name(const char *name);
+
+/**
+ * Iterate over all registered bitstream filters.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered bitstream filter or NULL when the iteration is
+ * finished
+ */
+const AVBitStreamFilter *av_bsf_iterate(void **opaque);
+
+/**
+ * Allocate a context for a given bitstream filter. The caller must fill in the
+ * context parameters as described in the documentation and then call
+ * av_bsf_init() before sending any data to the filter.
+ *
+ * @param filter the filter for which to allocate an instance.
+ * @param[out] ctx a pointer into which the pointer to the newly-allocated context
+ * will be written. It must be freed with av_bsf_free() after the
+ * filtering is done.
+ *
+ * @return 0 on success, a negative AVERROR code on failure
+ */
+int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx);
+
+/**
+ * Prepare the filter for use, after all the parameters and options have been
+ * set.
+ *
+ * @param ctx a AVBSFContext previously allocated with av_bsf_alloc()
+ */
+int av_bsf_init(AVBSFContext *ctx);
+
+/**
+ * Submit a packet for filtering.
+ *
+ * After sending each packet, the filter must be completely drained by calling
+ * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or
+ * AVERROR_EOF.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param pkt the packet to filter. The bitstream filter will take ownership of
+ * the packet and reset the contents of pkt. pkt is not touched if an error occurs.
+ * If pkt is empty (i.e. NULL, or pkt->data is NULL and pkt->side_data_elems zero),
+ * it signals the end of the stream (i.e. no more non-empty packets will be sent;
+ * sending more empty packets does nothing) and will cause the filter to output
+ * any packets it may have buffered internally.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if packets need to be retrieved from the filter (using
+ * av_bsf_receive_packet()) before new input can be consumed.
+ * - Another negative AVERROR value if an error occurs.
+ */
+int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
+
+/**
+ * Retrieve a filtered packet.
+ *
+ * @param ctx an initialized AVBSFContext
+ * @param[out] pkt this struct will be filled with the contents of the filtered
+ * packet. It is owned by the caller and must be freed using
+ * av_packet_unref() when it is no longer needed.
+ * This parameter should be "clean" (i.e. freshly allocated
+ * with av_packet_alloc() or unreffed with av_packet_unref())
+ * when this function is called. If this function returns
+ * successfully, the contents of pkt will be completely
+ * overwritten by the returned data. On failure, pkt is not
+ * touched.
+ *
+ * @return
+ * - 0 on success.
+ * - AVERROR(EAGAIN) if more packets need to be sent to the filter (using
+ * av_bsf_send_packet()) to get more output.
+ * - AVERROR_EOF if there will be no further output from the filter.
+ * - Another negative AVERROR value if an error occurs.
+ *
+ * @note one input packet may result in several output packets, so after sending
+ * a packet with av_bsf_send_packet(), this function needs to be called
+ * repeatedly until it stops returning 0. It is also possible for a filter to
+ * output fewer packets than were sent to it, so this function may return
+ * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call.
+ */
+int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt);
+
+/**
+ * Reset the internal bitstream filter state. Should be called e.g. when seeking.
+ */
+void av_bsf_flush(AVBSFContext *ctx);
+
+/**
+ * Free a bitstream filter context and everything associated with it; write NULL
+ * into the supplied pointer.
+ */
+void av_bsf_free(AVBSFContext **ctx);
+
+/**
+ * Get the AVClass for AVBSFContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *av_bsf_get_class(void);
+
+/**
+ * Structure for chain/list of bitstream filters.
+ * Empty list can be allocated by av_bsf_list_alloc().
+ */
+typedef struct AVBSFList AVBSFList;
+
+/**
+ * Allocate empty list of bitstream filters.
+ * The list must be later freed by av_bsf_list_free()
+ * or finalized by av_bsf_list_finalize().
+ *
+ * @return Pointer to @ref AVBSFList on success, NULL in case of failure
+ */
+AVBSFList *av_bsf_list_alloc(void);
+
+/**
+ * Free list of bitstream filters.
+ *
+ * @param lst Pointer to pointer returned by av_bsf_list_alloc()
+ */
+void av_bsf_list_free(AVBSFList **lst);
/**
- * Called by the bitstream filters to get the next packet for filtering.
- * The filter is responsible for either freeing the packet or passing it to the
- * caller.
+ * Append bitstream filter to the list of bitstream filters.
+ *
+ * @param lst List to append to
+ * @param bsf Filter context to be appended
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
*/
-int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
+int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf);
/**
- * Called by bitstream filters to get packet for filtering.
- * The reference to packet is moved to provided packet structure.
+ * Construct new bitstream filter context given it's name and options
+ * and append it to the list of bitstream filters.
*
- * @param ctx pointer to AVBSFContext of filter
- * @param pkt pointer to packet to move reference to
+ * @param lst List to append to
+ * @param bsf_name Name of the bitstream filter
+ * @param options Options for the bitstream filter, can be set to NULL
*
- * @return 0>= on success, negative AVERROR in case of failure
+ * @return >=0 on success, negative AVERROR in case of failure
*/
-int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
+int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options);
+/**
+ * Finalize list of bitstream filters.
+ *
+ * This function will transform @ref AVBSFList to single @ref AVBSFContext,
+ * so the whole chain of bitstream filters can be treated as single filter
+ * freshly allocated by av_bsf_alloc().
+ * If the call is successful, @ref AVBSFList structure is freed and lst
+ * will be set to NULL. In case of failure, caller is responsible for
+ * freeing the structure by av_bsf_list_free()
+ *
+ * @param lst Filter list structure to be transformed
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
+ * representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf);
-const AVClass *ff_bsf_child_class_next(const AVClass *prev);
+/**
+ * Parse string describing list of bitstream filters and create single
+ * @ref AVBSFContext describing the whole chain of bitstream filters.
+ * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly
+ * allocated by av_bsf_alloc().
+ *
+ * @param str String describing chain of bitstream filters in format
+ * `bsf1[=opt1=val1:opt2=val2][,bsf2]`
+ * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure
+ * representing the chain of bitstream filters
+ *
+ * @return >=0 on success, negative AVERROR in case of failure
+ */
+int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf);
+
+/**
+ * Get null/pass-through bitstream filter.
+ *
+ * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter
+ *
+ * @return
+ */
+int av_bsf_get_null_filter(AVBSFContext **bsf);
+
+/**
+ * @}
+ */
-#endif /* AVCODEC_BSF_H */
+#endif // AVCODEC_BSF_H
diff --git a/media/ffvpx/libavcodec/bsf_internal.h b/media/ffvpx/libavcodec/bsf_internal.h
new file mode 100644
index 0000000000..922b03c01b
--- /dev/null
+++ b/media/ffvpx/libavcodec/bsf_internal.h
@@ -0,0 +1,60 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_BSF_INTERNAL_H
+#define AVCODEC_BSF_INTERNAL_H
+
+#include "libavutil/log.h"
+
+#include "bsf.h"
+#include "packet.h"
+
+typedef struct FFBitStreamFilter {
+ /**
+ * The public AVBitStreamFilter. See bsf.h for it.
+ */
+ AVBitStreamFilter p;
+
+ int priv_data_size;
+ int (*init)(AVBSFContext *ctx);
+ int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
+ void (*close)(AVBSFContext *ctx);
+ void (*flush)(AVBSFContext *ctx);
+} FFBitStreamFilter;
+
+/**
+ * Called by the bitstream filters to get the next packet for filtering.
+ * The filter is responsible for either freeing the packet or passing it to the
+ * caller.
+ */
+int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt);
+
+/**
+ * Called by bitstream filters to get packet for filtering.
+ * The reference to packet is moved to provided packet structure.
+ *
+ * @param ctx pointer to AVBSFContext of filter
+ * @param pkt pointer to packet to move reference to
+ *
+ * @return 0 on success, negative AVERROR in case of failure
+ */
+int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt);
+
+const AVClass *ff_bsf_child_class_iterate(void **opaque);
+
+#endif /* AVCODEC_BSF_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/bsf_list.c b/media/ffvpx/libavcodec/bsf_list.c
index 92d9948b29..4a687dc07f 100644
--- a/media/ffvpx/libavcodec/bsf_list.c
+++ b/media/ffvpx/libavcodec/bsf_list.c
@@ -1,4 +1,6 @@
-static const AVBitStreamFilter * const bitstream_filters[] = {
+#include "config_common.h"
+
+static const FFBitStreamFilter * const bitstream_filters[] = {
&ff_null_bsf,
#if CONFIG_VP9_SUPERFRAME_SPLIT_BSF
&ff_vp9_superframe_split_bsf,
diff --git a/media/ffvpx/libavcodec/bytestream.h b/media/ffvpx/libavcodec/bytestream.h
index 7be7fc22fc..d0033f14f3 100644
--- a/media/ffvpx/libavcodec/bytestream.h
+++ b/media/ffvpx/libavcodec/bytestream.h
@@ -77,11 +77,15 @@ static av_always_inline type bytestream2_get_ ## name(GetByteContext *g) \
} \
return bytestream2_get_ ## name ## u(g); \
} \
+static av_always_inline type bytestream2_peek_ ## name ## u(GetByteContext *g) \
+{ \
+ return read(g->buffer); \
+} \
static av_always_inline type bytestream2_peek_ ## name(GetByteContext *g) \
{ \
if (g->buffer_end - g->buffer < bytes) \
return 0; \
- return read(g->buffer); \
+ return bytestream2_peek_ ## name ## u(g); \
}
DEF(uint64_t, le64, 8, AV_RL64, AV_WL64)
@@ -151,12 +155,12 @@ static av_always_inline void bytestream2_init_writer(PutByteContext *p,
p->eof = 0;
}
-static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
+static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
{
return g->buffer_end - g->buffer;
}
-static av_always_inline unsigned int bytestream2_get_bytes_left_p(PutByteContext *p)
+static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p)
{
return p->buffer_end - p->buffer;
}
diff --git a/media/ffvpx/libavcodec/codec.h b/media/ffvpx/libavcodec/codec.h
new file mode 100644
index 0000000000..3b1995bcfe
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec.h
@@ -0,0 +1,375 @@
+/*
+ * AVCodec public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_H
+#define AVCODEC_CODEC_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+#include "libavutil/samplefmt.h"
+
+#include "libavcodec/codec_id.h"
+#include "libavcodec/version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Decoder can use draw_horiz_band callback.
+ */
+#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+/**
+ * Codec uses get_buffer() or get_encode_buffer() for allocating buffers and
+ * supports custom allocators.
+ * If not set, it might not use get_buffer() or get_encode_buffer() at all, or
+ * use operations that assume the buffer was allocated by
+ * avcodec_default_get_buffer2 or avcodec_default_get_encode_buffer.
+ */
+#define AV_CODEC_CAP_DR1 (1 << 1)
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define AV_CODEC_CAP_DELAY (1 << 5)
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carrying such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+/**
+ * Codec supports multithreading through a method other than slice- or
+ * frame-level multithreading. Typically this marks wrappers around
+ * multithreading-capable external libraries.
+ */
+#define AV_CODEC_CAP_OTHER_THREADS (1 << 15)
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
+/**
+ * Decoder is not a preferred choice for probing.
+ * This indicates that the decoder is not a good choice for probing.
+ * It could for example be an expensive to spin up hardware decoder,
+ * or it could simply not provide a lot of useful information about
+ * the stream.
+ * A decoder marked with this flag should only be used as last resort
+ * choice for probing.
+ */
+#define AV_CODEC_CAP_AVOID_PROBING (1 << 17)
+
+/**
+ * Codec is backed by a hardware implementation. Typically used to
+ * identify a non-hwaccel hardware decoder. For information about hwaccels, use
+ * avcodec_get_hw_config() instead.
+ */
+#define AV_CODEC_CAP_HARDWARE (1 << 18)
+
+/**
+ * Codec is potentially backed by a hardware implementation, but not
+ * necessarily. This is used instead of AV_CODEC_CAP_HARDWARE, if the
+ * implementation provides some sort of internal fallback.
+ */
+#define AV_CODEC_CAP_HYBRID (1 << 19)
+
+/**
+ * This encoder can reorder user opaque values from input AVFrames and return
+ * them with corresponding output packets.
+ * @see AV_CODEC_FLAG_COPY_OPAQUE
+ */
+#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20)
+
+/**
+ * This encoder can be flushed using avcodec_flush_buffers(). If this flag is
+ * not set, the encoder must be closed and reopened to ensure that no frames
+ * remain pending.
+ */
+#define AV_CODEC_CAP_ENCODER_FLUSH (1 << 21)
+
+/**
+ * The encoder is able to output reconstructed frame data, i.e. raw frames that
+ * would be produced by decoding the encoded bitstream.
+ *
+ * Reconstructed frame output is enabled by the AV_CODEC_FLAG_RECON_FRAME flag.
+ */
+#define AV_CODEC_CAP_ENCODER_RECON_FRAME (1 << 22)
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see AV_CODEC_CAP_*
+ */
+ int capabilities;
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * @deprecated use ch_layouts instead
+ */
+ attribute_deprecated
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#endif
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /**
+ * Group name of the codec implementation.
+ * This is a short symbolic name of the wrapper backing this codec. A
+ * wrapper uses some kind of external implementation for the codec, such
+ * as an external library, or a codec implementation provided by the OS or
+ * the hardware.
+ * If this field is NULL, this is a builtin, libavcodec native codec.
+ * If non-NULL, this will be the suffix in AVCodec.name in most cases
+ * (usually AVCodec.name will be of the form "<codec_name>_<wrapper_name>").
+ */
+ const char *wrapper_name;
+
+ /**
+ * Array of supported channel layouts, terminated with a zeroed layout.
+ */
+ const AVChannelLayout *ch_layouts;
+} AVCodec;
+
+/**
+ * Iterate over all registered codecs.
+ *
+ * @param opaque a pointer where libavcodec will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the next registered codec or NULL when the iteration is
+ * finished
+ */
+const AVCodec *av_codec_iterate(void **opaque);
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+const AVCodec *avcodec_find_encoder_by_name(const char *name);
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+enum {
+ /**
+ * The codec supports this format via the hw_device_ctx interface.
+ *
+ * When selecting this format, AVCodecContext.hw_device_ctx should
+ * have been set to a device of the specified type before calling
+ * avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX = 0x01,
+ /**
+ * The codec supports this format via the hw_frames_ctx interface.
+ *
+ * When selecting this format for a decoder,
+ * AVCodecContext.hw_frames_ctx should be set to a suitable frames
+ * context inside the get_format() callback. The frames context
+ * must have been created on a device of the specified type.
+ *
+ * When selecting this format for an encoder,
+ * AVCodecContext.hw_frames_ctx should be set to the context which
+ * will be used for the input frames before calling avcodec_open2().
+ */
+ AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX = 0x02,
+ /**
+ * The codec supports this format by some internal method.
+ *
+ * This format can be selected without any additional configuration -
+ * no device or frames context is required.
+ */
+ AV_CODEC_HW_CONFIG_METHOD_INTERNAL = 0x04,
+ /**
+ * The codec supports this format by some ad-hoc method.
+ *
+ * Additional settings and/or function calls are required. See the
+ * codec-specific documentation for details. (Methods requiring
+ * this sort of configuration are deprecated and others should be
+ * used in preference.)
+ */
+ AV_CODEC_HW_CONFIG_METHOD_AD_HOC = 0x08,
+};
+
+typedef struct AVCodecHWConfig {
+ /**
+ * For decoders, a hardware pixel format which that decoder may be
+ * able to decode to if suitable hardware is available.
+ *
+ * For encoders, a pixel format which the encoder may be able to
+ * accept. If set to AV_PIX_FMT_NONE, this applies to all pixel
+ * formats supported by the codec.
+ */
+ enum AVPixelFormat pix_fmt;
+ /**
+ * Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible
+ * setup methods which can be used with this configuration.
+ */
+ int methods;
+ /**
+ * The device type associated with the configuration.
+ *
+ * Must be set for AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX and
+ * AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX, otherwise unused.
+ */
+ enum AVHWDeviceType device_type;
+} AVCodecHWConfig;
+
+/**
+ * Retrieve supported hardware configurations for a codec.
+ *
+ * Values of index from zero to some maximum return the indexed configuration
+ * descriptor; all other values return NULL. If the codec does not support
+ * any hardware configurations then it will always return NULL.
+ */
+const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_CODEC_H */
diff --git a/media/ffvpx/libavcodec/codec_desc.c b/media/ffvpx/libavcodec/codec_desc.c
index 4d033c20ff..199f62df15 100644
--- a/media/ffvpx/libavcodec/codec_desc.c
+++ b/media/ffvpx/libavcodec/codec_desc.c
@@ -19,13 +19,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <stdlib.h>
#include <string.h>
-#include "libavutil/common.h"
#include "libavutil/internal.h"
-#include "avcodec.h"
+#include "libavutil/macros.h"
+
+#include "codec_id.h"
+#include "codec_desc.h"
#include "profiles.h"
-#include "version.h"
#define MT(...) (const char *const[]){ __VA_ARGS__, NULL }
@@ -1404,6 +1406,35 @@ static const AVCodecDescriptor codec_descriptors[] = {
.props = AV_CODEC_PROP_LOSSY,
},
{
+ .id = AV_CODEC_ID_PGX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pgx",
+ .long_name = NULL_IF_CONFIG_SMALL("PGX (JPEG2000 Test Format)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_AVS3,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "avs3",
+ .long_name = NULL_IF_CONFIG_SMALL("AVS3-P2/IEEE1857.10"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSP2,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "msp2",
+ .long_name = NULL_IF_CONFIG_SMALL("Microsoft Paint (MSP) version 2"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_VVC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vvc",
+ .long_name = NULL_IF_CONFIG_SMALL("H.266 / VVC (Versatile Video Coding)"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_vvc_profiles),
+ },
+ {
.id = AV_CODEC_ID_Y41P,
.type = AVMEDIA_TYPE_VIDEO,
.name = "y41p",
@@ -1431,6 +1462,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
+#if FF_API_AYUV_CODECID
{
.id = AV_CODEC_ID_AYUV,
.type = AVMEDIA_TYPE_VIDEO,
@@ -1438,6 +1470,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed MS 4:4:4:4"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
+#endif
{
.id = AV_CODEC_ID_TARGA_Y216,
.type = AVMEDIA_TYPE_VIDEO,
@@ -1496,6 +1529,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.id = AV_CODEC_ID_SMVJPEG,
.type = AVMEDIA_TYPE_VIDEO,
.name = "smvjpeg",
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
.long_name = NULL_IF_CONFIG_SMALL("Sigmatel Motion Video"),
},
{
@@ -1517,7 +1551,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
.id = AV_CODEC_ID_CFHD,
.type = AVMEDIA_TYPE_VIDEO,
.name = "cfhd",
- .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
+ .long_name = NULL_IF_CONFIG_SMALL("GoPro CineForm HD"),
.props = AV_CODEC_PROP_LOSSY,
},
{
@@ -1726,6 +1760,169 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("On2 VP4"),
.props = AV_CODEC_PROP_LOSSY,
},
+ {
+ .id = AV_CODEC_ID_IMM5,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "imm5",
+ .long_name = NULL_IF_CONFIG_SMALL("Infinity IMM5"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MVDV,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvdv",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MVHA,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mvha",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CDTOONS,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cdtoons",
+ .long_name = NULL_IF_CONFIG_SMALL("CDToons video"),
+ .props = AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MV30,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mv30",
+ .long_name = NULL_IF_CONFIG_SMALL("MidiVid 3.0"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_NOTCHLC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "notchlc",
+ .long_name = NULL_IF_CONFIG_SMALL("NotchLC"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PFM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "pfm",
+ .long_name = NULL_IF_CONFIG_SMALL("PFM (Portable FloatMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MOBICLIP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "mobiclip",
+ .long_name = NULL_IF_CONFIG_SMALL("MobiClip Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PHOTOCD,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "photocd",
+ .long_name = NULL_IF_CONFIG_SMALL("Kodak Photo CD"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_IPU,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "ipu",
+ .long_name = NULL_IF_CONFIG_SMALL("IPU Video"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ARGO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "argo",
+ .long_name = NULL_IF_CONFIG_SMALL("Argonaut Games Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CRI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "cri",
+ .long_name = NULL_IF_CONFIG_SMALL("Cintel RAW"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_SIMBIOSIS_IMX,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "simbiosis_imx",
+ .long_name = NULL_IF_CONFIG_SMALL("Simbiosis Interactive IMX Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SGA_VIDEO,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "sga",
+ .long_name = NULL_IF_CONFIG_SMALL("Digital Pictures SGA Video"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_GEM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "gem",
+ .long_name = NULL_IF_CONFIG_SMALL("GEM Raster image"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VBN,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vbn",
+ .long_name = NULL_IF_CONFIG_SMALL("Vizrt Binary Image"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_JPEGXL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "jpegxl",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG XL"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+ .mime_types= MT("image/jxl"),
+ },
+ {
+ .id = AV_CODEC_ID_QOI,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "qoi",
+ .long_name = NULL_IF_CONFIG_SMALL("QOI (Quite OK Image)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_PHM,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "phm",
+ .long_name = NULL_IF_CONFIG_SMALL("PHM (Portable HalfFloatMap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RADIANCE_HDR,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "hdr",
+ .long_name = NULL_IF_CONFIG_SMALL("HDR (Radiance RGBE format) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WBMP,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "wbmp",
+ .long_name = NULL_IF_CONFIG_SMALL("WBMP (Wireless Application Protocol Bitmap) image"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MEDIA100,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "media100",
+ .long_name = NULL_IF_CONFIG_SMALL("Media 100i"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_VQC,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vqc",
+ .long_name = NULL_IF_CONFIG_SMALL("ViewQuest VQC"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
/* various PCM "codecs" */
{
@@ -1733,252 +1930,252 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s16le",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S16BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s16be",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U16LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u16le",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U16BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u16be",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S8,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s8",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U8,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u8",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 8-bit"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_MULAW,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_mulaw",
.long_name = NULL_IF_CONFIG_SMALL("PCM mu-law / G.711 mu-law"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_PCM_ALAW,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_alaw",
.long_name = NULL_IF_CONFIG_SMALL("PCM A-law / G.711 A-law"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_PCM_S32LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s32le",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S32BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s32be",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U32LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u32le",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U32BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u32be",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S24LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s24le",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S24BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s24be",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U24LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u24le",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_U24BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_u24be",
.long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S24DAUD,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s24daud",
.long_name = NULL_IF_CONFIG_SMALL("PCM D-Cinema audio signed 24-bit"),
- .props = AV_CODEC_PROP_LOSSLESS,
- },
- {
- .id = AV_CODEC_ID_PCM_ZORK,
- .type = AVMEDIA_TYPE_AUDIO,
- .name = "pcm_zork",
- .long_name = NULL_IF_CONFIG_SMALL("PCM Zork"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S16LE_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s16le_planar",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_DVD,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_dvd",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 20|24-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F32BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f32be",
.long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F32LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f32le",
.long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F64BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f64be",
.long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F64LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f64le",
.long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_BLURAY,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_bluray",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_LXF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_lxf",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 20-bit little-endian planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_S302M,
.type = AVMEDIA_TYPE_AUDIO,
.name = "s302m",
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S8_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s8_planar",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S24LE_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s24le_planar",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S32LE_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s32le_planar",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S16BE_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s16be_planar",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian planar"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S64LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s64le",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_S64BE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_s64be",
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit big-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F16LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f16le",
.long_name = NULL_IF_CONFIG_SMALL("PCM 16.8 floating point little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_F24LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_f24le",
.long_name = NULL_IF_CONFIG_SMALL("PCM 24.0 floating point little-endian"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_PCM_VIDC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "pcm_vidc",
.long_name = NULL_IF_CONFIG_SMALL("PCM Archimedes VIDC"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_PCM_SGA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "pcm_sga",
+ .long_name = NULL_IF_CONFIG_SMALL("PCM SGA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
/* various ADPCM codecs */
@@ -1987,294 +2184,364 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_qt",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA QuickTime"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_WAV,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_wav",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA WAV"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_DK3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_dk3",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK3"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_DK4,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_dk4",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK4"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_WS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_ws",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Westwood"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_SMJPEG,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_smjpeg",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Loki SDL MJPEG"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_MS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ms",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Microsoft"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_4XM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_4xm",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM 4X Movie"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_XA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_xa",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM CDROM XA"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_ADX,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_adx",
.long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_G726,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_g726",
.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_CT,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ct",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Creative Technology"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_SWF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_swf",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Shockwave Flash"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_YAMAHA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_yamaha",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_SBPRO_4,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_sbpro_4",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 4-bit"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_SBPRO_3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_sbpro_3",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2.6-bit"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_SBPRO_2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_sbpro_2",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2-bit"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_THP,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_thp",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_AMV,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_amv",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA AMV"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA_R1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea_r1",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA_R3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea_r3",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R3"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA_R2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea_r2",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R2"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_ea_sead",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts SEAD"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_EA_EACS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_ea_eacs",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts EACS"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA_XAS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea_xas",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts XAS"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ea_maxis_xa",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts Maxis CDROM XA"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_ISS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_iss",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Funcom ISS"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_G722,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_g722",
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_APC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_apc",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA CRYO APC"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_VIMA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_vima",
.long_name = NULL_IF_CONFIG_SMALL("LucasArts VIMA audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_AFC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_afc",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube AFC"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_OKI,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_oki",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Dialogic OKI"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_DTK,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_dtk",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube DTK"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_RAD,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_rad",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Radical"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_G726LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_g726le",
.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM little-endian"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_THP_LE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_thp_le",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP (Little-Endian)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_PSX,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_psx",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Playstation"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_AICA,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_aica",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha AICA"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_IMA_DAT4,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_ima_dat4",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Eurocom DAT4"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_MTAF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_mtaf",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM MTAF"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ADPCM_AGM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "adpcm_agm",
.long_name = NULL_IF_CONFIG_SMALL("ADPCM AmuseGraphics Movie AGM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_ARGO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_argo",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Argonaut Games"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_SSI,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_ssi",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Simon & Schuster Interactive"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_ZORK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_zork",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Zork"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_APM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_apm",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Ubisoft APM"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_ALP,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_alp",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA High Voltage Software ALP"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_MTF,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_mtf",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Capcom's MT Framework"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_cunning",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Cunning Developments"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_moflex",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA MobiClip MOFLEX"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_IMA_ACORN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_ima_acorn",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Acorn Replay"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ADPCM_XMD,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "adpcm_xmd",
+ .long_name = NULL_IF_CONFIG_SMALL("ADPCM Konami XMD"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* AMR */
@@ -2283,14 +2550,14 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "amr_nb",
.long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_AMR_WB,
.type = AVMEDIA_TYPE_AUDIO,
.name = "amr_wb",
.long_name = NULL_IF_CONFIG_SMALL("AMR-WB (Adaptive Multi-Rate WideBand)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* RealAudio codecs*/
@@ -2299,14 +2566,14 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "ra_144",
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_RA_288,
.type = AVMEDIA_TYPE_AUDIO,
.name = "ra_288",
.long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* various DPCM codecs */
@@ -2315,42 +2582,63 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "roq_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM id RoQ"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_INTERPLAY_DPCM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "interplay_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM Interplay"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_XAN_DPCM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "xan_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM Xan"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SOL_DPCM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sol_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM Sol"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SDX2_DPCM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sdx2_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM Squareroot-Delta-Exact"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_GREMLIN_DPCM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "gremlin_dpcm",
.long_name = NULL_IF_CONFIG_SMALL("DPCM Gremlin"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DERF_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "derf_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Xilam DERF"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WADY_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wady_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Marble WADY"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_CBD2_DPCM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "cbd2_dpcm",
+ .long_name = NULL_IF_CONFIG_SMALL("DPCM Cuberoot-Delta-Exact"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* audio codecs */
@@ -2359,21 +2647,21 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp2",
.long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MP3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp3",
.long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_AAC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "aac",
.long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
.profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
},
{
@@ -2381,14 +2669,14 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "ac3",
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DTS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dts",
.long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
- .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
.profiles = NULL_IF_CONFIG_SMALL(ff_dca_profiles),
},
{
@@ -2396,49 +2684,49 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "vorbis",
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DVAUDIO,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dvaudio",
.long_name = NULL_IF_CONFIG_SMALL("DV audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WMAV1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wmav1",
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WMAV2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wmav2",
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MACE3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "mace3",
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MACE6,
.type = AVMEDIA_TYPE_AUDIO,
.name = "mace6",
.long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_VMDAUDIO,
.type = AVMEDIA_TYPE_AUDIO,
.name = "vmdaudio",
.long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_FLAC,
@@ -2452,21 +2740,21 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp3adu",
.long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MP3ON4,
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp3on4",
.long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SHORTEN,
.type = AVMEDIA_TYPE_AUDIO,
.name = "shorten",
.long_name = NULL_IF_CONFIG_SMALL("Shorten"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_ALAC,
@@ -2480,35 +2768,35 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "westwood_snd1",
.long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_GSM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "gsm",
.long_name = NULL_IF_CONFIG_SMALL("GSM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_QDM2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "qdm2",
.long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_COOK,
.type = AVMEDIA_TYPE_AUDIO,
.name = "cook",
.long_name = NULL_IF_CONFIG_SMALL("Cook / Cooker / Gecko (RealAudio G2)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TRUESPEECH,
.type = AVMEDIA_TYPE_AUDIO,
.name = "truespeech",
.long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TTA,
@@ -2522,14 +2810,14 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "smackaudio",
.long_name = NULL_IF_CONFIG_SMALL("Smacker audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_QCELP,
.type = AVMEDIA_TYPE_AUDIO,
.name = "qcelp",
.long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WAVPACK,
@@ -2544,21 +2832,21 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "dsicinaudio",
.long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_IMC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "imc",
.long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MUSEPACK7,
.type = AVMEDIA_TYPE_AUDIO,
.name = "musepack7",
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MLP,
@@ -2572,98 +2860,98 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "gsm_ms",
.long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ATRAC3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac3",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC3 (Adaptive TRansform Acoustic Coding 3)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_APE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "ape",
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_NELLYMOSER,
.type = AVMEDIA_TYPE_AUDIO,
.name = "nellymoser",
.long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MUSEPACK8,
.type = AVMEDIA_TYPE_AUDIO,
.name = "musepack8",
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SPEEX,
.type = AVMEDIA_TYPE_AUDIO,
.name = "speex",
.long_name = NULL_IF_CONFIG_SMALL("Speex"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WMAVOICE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wmavoice",
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WMAPRO,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wmapro",
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_WMALOSSLESS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wmalossless",
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_ATRAC3P,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac3p",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_EAC3,
.type = AVMEDIA_TYPE_AUDIO,
.name = "eac3",
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SIPR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sipr",
.long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MP1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp1",
.long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TWINVQ,
.type = AVMEDIA_TYPE_AUDIO,
.name = "twinvq",
.long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TRUEHD,
@@ -2677,35 +2965,35 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "mp4als",
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_ATRAC1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac1",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC1 (Adaptive TRansform Acoustic Coding)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_BINKAUDIO_RDFT,
.type = AVMEDIA_TYPE_AUDIO,
.name = "binkaudio_rdft",
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_BINKAUDIO_DCT,
.type = AVMEDIA_TYPE_AUDIO,
.name = "binkaudio_dct",
.long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_AAC_LATM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "aac_latm",
.long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
.profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles),
},
{
@@ -2713,278 +3001,372 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_AUDIO,
.name = "qdmc",
.long_name = NULL_IF_CONFIG_SMALL("QDesign Music"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_CELT,
.type = AVMEDIA_TYPE_AUDIO,
.name = "celt",
.long_name = NULL_IF_CONFIG_SMALL("Constrained Energy Lapped Transform (CELT)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_G723_1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "g723_1",
.long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_G729,
.type = AVMEDIA_TYPE_AUDIO,
.name = "g729",
.long_name = NULL_IF_CONFIG_SMALL("G.729"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_8SVX_EXP,
.type = AVMEDIA_TYPE_AUDIO,
.name = "8svx_exp",
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_8SVX_FIB,
.type = AVMEDIA_TYPE_AUDIO,
.name = "8svx_fib",
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_BMV_AUDIO,
.type = AVMEDIA_TYPE_AUDIO,
.name = "bmv_audio",
.long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_RALF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "ralf",
.long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_IAC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "iac",
.long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ILBC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "ilbc",
.long_name = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_OPUS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "opus",
.long_name = NULL_IF_CONFIG_SMALL("Opus (Opus Interactive Audio Codec)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_COMFORT_NOISE,
.type = AVMEDIA_TYPE_AUDIO,
.name = "comfortnoise",
.long_name = NULL_IF_CONFIG_SMALL("RFC 3389 Comfort Noise"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_TAK,
.type = AVMEDIA_TYPE_AUDIO,
.name = "tak",
.long_name = NULL_IF_CONFIG_SMALL("TAK (Tom's lossless Audio Kompressor)"),
- .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_METASOUND,
.type = AVMEDIA_TYPE_AUDIO,
.name = "metasound",
.long_name = NULL_IF_CONFIG_SMALL("Voxware MetaSound"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_PAF_AUDIO,
.type = AVMEDIA_TYPE_AUDIO,
.name = "paf_audio",
.long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Audio"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ON2AVC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "avc",
.long_name = NULL_IF_CONFIG_SMALL("On2 Audio for Video Codec"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DSS_SP,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dss_sp",
.long_name = NULL_IF_CONFIG_SMALL("Digital Speech Standard - Standard Play mode (DSS SP)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_CODEC2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "codec2",
.long_name = NULL_IF_CONFIG_SMALL("codec2 (very low bitrate speech codec)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_FFWAVESYNTH,
.type = AVMEDIA_TYPE_AUDIO,
.name = "wavesynth",
.long_name = NULL_IF_CONFIG_SMALL("Wave synthesis pseudo-codec"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
},
{
.id = AV_CODEC_ID_SONIC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sonic",
.long_name = NULL_IF_CONFIG_SMALL("Sonic"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
},
{
.id = AV_CODEC_ID_SONIC_LS,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sonicls",
.long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"),
+ .props = AV_CODEC_PROP_INTRA_ONLY,
},
{
.id = AV_CODEC_ID_EVRC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "evrc",
.long_name = NULL_IF_CONFIG_SMALL("EVRC (Enhanced Variable Rate Codec)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SMV,
.type = AVMEDIA_TYPE_AUDIO,
.name = "smv",
.long_name = NULL_IF_CONFIG_SMALL("SMV (Selectable Mode Vocoder)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DSD_LSBF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dsd_lsbf",
.long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DSD_MSBF,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dsd_msbf",
.long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DSD_LSBF_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dsd_lsbf_planar",
.long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first, planar"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DSD_MSBF_PLANAR,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dsd_msbf_planar",
.long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first, planar"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_4GV,
.type = AVMEDIA_TYPE_AUDIO,
.name = "4gv",
.long_name = NULL_IF_CONFIG_SMALL("4GV (Fourth Generation Vocoder)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_INTERPLAY_ACM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "interplayacm",
.long_name = NULL_IF_CONFIG_SMALL("Interplay ACM"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_XMA1,
.type = AVMEDIA_TYPE_AUDIO,
.name = "xma1",
.long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 1"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_XMA2,
.type = AVMEDIA_TYPE_AUDIO,
.name = "xma2",
.long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 2"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_DST,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dst",
.long_name = NULL_IF_CONFIG_SMALL("DST (Direct Stream Transfer)"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_ATRAC3AL,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac3al",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_ATRAC3PAL,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac3pal",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ AL (Adaptive TRansform Acoustic Coding 3+ Advanced Lossless)"),
- .props = AV_CODEC_PROP_LOSSLESS,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
},
{
.id = AV_CODEC_ID_DOLBY_E,
.type = AVMEDIA_TYPE_AUDIO,
.name = "dolby_e",
.long_name = NULL_IF_CONFIG_SMALL("Dolby E"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_APTX,
.type = AVMEDIA_TYPE_AUDIO,
.name = "aptx",
.long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_APTX_HD,
.type = AVMEDIA_TYPE_AUDIO,
.name = "aptx_hd",
.long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_SBC,
.type = AVMEDIA_TYPE_AUDIO,
.name = "sbc",
.long_name = NULL_IF_CONFIG_SMALL("SBC (low-complexity subband codec)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_ATRAC9,
.type = AVMEDIA_TYPE_AUDIO,
.name = "atrac9",
.long_name = NULL_IF_CONFIG_SMALL("ATRAC9 (Adaptive TRansform Acoustic Coding 9)"),
- .props = AV_CODEC_PROP_LOSSY,
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_HCOM,
.type = AVMEDIA_TYPE_AUDIO,
.name = "hcom",
.long_name = NULL_IF_CONFIG_SMALL("HCOM Audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_ACELP_KELVIN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "acelp.kelvin",
+ .long_name = NULL_IF_CONFIG_SMALL("Sipro ACELP.KELVIN"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MPEGH_3D_AUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "mpegh_3d_audio",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-H 3D Audio"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_SIREN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "siren",
+ .long_name = NULL_IF_CONFIG_SMALL("Siren"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_HCA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "hca",
+ .long_name = NULL_IF_CONFIG_SMALL("CRI HCA"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_FASTAUDIO,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "fastaudio",
+ .long_name = NULL_IF_CONFIG_SMALL("MobiClip FastAudio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_MSNSIREN,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "msnsiren",
+ .long_name = NULL_IF_CONFIG_SMALL("MSN Siren"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_DFPWM,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "dfpwm",
+ .long_name = NULL_IF_CONFIG_SMALL("DFPWM (Dynamic Filter Pulse Width Modulation)"),
.props = AV_CODEC_PROP_LOSSY,
},
+ {
+ .id = AV_CODEC_ID_BONK,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "bonk",
+ .long_name = NULL_IF_CONFIG_SMALL("Bonk audio"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_MISC4,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "misc4",
+ .long_name = NULL_IF_CONFIG_SMALL("Micronas SC-4 Audio"),
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_INTRA_ONLY,
+ },
+ {
+ .id = AV_CODEC_ID_APAC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "apac",
+ .long_name = NULL_IF_CONFIG_SMALL("Marian's A-pac audio"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_FTR,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "ftr",
+ .long_name = NULL_IF_CONFIG_SMALL("FTR Voice"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+ },
+ {
+ .id = AV_CODEC_ID_WAVARC,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "wavarc",
+ .long_name = NULL_IF_CONFIG_SMALL("Waveform Archiver"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
+ },
+ {
+ .id = AV_CODEC_ID_RKA,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "rka",
+ .long_name = NULL_IF_CONFIG_SMALL("RKA (RK Audio)"),
+ .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS,
+ },
/* subtitle codecs */
{
@@ -3185,6 +3567,12 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("SCTE 35 Message Queue"),
},
{
+ .id = AV_CODEC_ID_EPG,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "epg",
+ .long_name = NULL_IF_CONFIG_SMALL("Electronic Program Guide"),
+ },
+ {
.id = AV_CODEC_ID_BINTEXT,
.type = AVMEDIA_TYPE_VIDEO,
.name = "bintext",
@@ -3238,12 +3626,31 @@ static const AVCodecDescriptor codec_descriptors[] = {
.mime_types= MT("application/octet-stream"),
},
{
+ .id = AV_CODEC_ID_MPEG2TS,
+ .type = AVMEDIA_TYPE_DATA,
+ .name = "mpegts",
+ .long_name = NULL_IF_CONFIG_SMALL("raw MPEG-TS stream"),
+ .mime_types= MT("application/MP2T"),
+ },
+ {
.id = AV_CODEC_ID_WRAPPED_AVFRAME,
.type = AVMEDIA_TYPE_VIDEO,
.name = "wrapped_avframe",
.long_name = NULL_IF_CONFIG_SMALL("AVFrame to AVPacket passthrough"),
.props = AV_CODEC_PROP_LOSSLESS,
},
+ {
+ .id = AV_CODEC_ID_VNULL,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "vnull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null video codec"),
+ },
+ {
+ .id = AV_CODEC_ID_ANULL,
+ .type = AVMEDIA_TYPE_AUDIO,
+ .name = "anull",
+ .long_name = NULL_IF_CONFIG_SMALL("Null audio codec"),
+ },
};
static int descriptor_compare(const void *key, const void *member)
diff --git a/media/ffvpx/libavcodec/codec_desc.h b/media/ffvpx/libavcodec/codec_desc.h
new file mode 100644
index 0000000000..126b52df47
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_desc.h
@@ -0,0 +1,128 @@
+/*
+ * Codec descriptors public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_DESC_H
+#define AVCODEC_CODEC_DESC_H
+
+#include "libavutil/avutil.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+ /**
+ * MIME type(s) associated with the codec.
+ * May be NULL; if not, a NULL-terminated array of MIME types.
+ * The first item is always non-NULL and is the preferred MIME type.
+ */
+ const char *const *mime_types;
+ /**
+ * If non-NULL, an array of profiles recognized for this codec.
+ * Terminated with FF_PROFILE_UNKNOWN.
+ */
+ const struct AVProfile *profiles;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video and audio codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER (1 << 3)
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_DESC_H
diff --git a/media/ffvpx/libavcodec/codec_id.h b/media/ffvpx/libavcodec/codec_id.h
new file mode 100644
index 0000000000..89a4a0cb89
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_id.h
@@ -0,0 +1,661 @@
+/*
+ * Codec IDs
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_ID_H
+#define AVCODEC_CODEC_ID_H
+
+#include "libavutil/avutil.h"
+#include "libavutil/samplefmt.h"
+
+#include "version_major.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of an existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX,
+ AV_CODEC_ID_PAF_VIDEO,
+ AV_CODEC_ID_EXR,
+ AV_CODEC_ID_VP7,
+ AV_CODEC_ID_SANM,
+ AV_CODEC_ID_SGIRLE,
+ AV_CODEC_ID_MVC1,
+ AV_CODEC_ID_MVC2,
+ AV_CODEC_ID_HQX,
+ AV_CODEC_ID_TDSC,
+ AV_CODEC_ID_HQ_HQA,
+ AV_CODEC_ID_HAP,
+ AV_CODEC_ID_DDS,
+ AV_CODEC_ID_DXV,
+ AV_CODEC_ID_SCREENPRESSO,
+ AV_CODEC_ID_RSCC,
+ AV_CODEC_ID_AVS2,
+ AV_CODEC_ID_PGX,
+ AV_CODEC_ID_AVS3,
+ AV_CODEC_ID_MSP2,
+ AV_CODEC_ID_VVC,
+#define AV_CODEC_ID_H266 AV_CODEC_ID_VVC
+ AV_CODEC_ID_Y41P,
+ AV_CODEC_ID_AVRP,
+ AV_CODEC_ID_012V,
+ AV_CODEC_ID_AVUI,
+#if FF_API_AYUV_CODECID
+ AV_CODEC_ID_AYUV,
+#endif
+ AV_CODEC_ID_TARGA_Y216,
+ AV_CODEC_ID_V308,
+ AV_CODEC_ID_V408,
+ AV_CODEC_ID_YUV4,
+ AV_CODEC_ID_AVRN,
+ AV_CODEC_ID_CPIA,
+ AV_CODEC_ID_XFACE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_SMVJPEG,
+ AV_CODEC_ID_APNG,
+ AV_CODEC_ID_DAALA,
+ AV_CODEC_ID_CFHD,
+ AV_CODEC_ID_TRUEMOTION2RT,
+ AV_CODEC_ID_M101,
+ AV_CODEC_ID_MAGICYUV,
+ AV_CODEC_ID_SHEERVIDEO,
+ AV_CODEC_ID_YLC,
+ AV_CODEC_ID_PSD,
+ AV_CODEC_ID_PIXLET,
+ AV_CODEC_ID_SPEEDHQ,
+ AV_CODEC_ID_FMVC,
+ AV_CODEC_ID_SCPR,
+ AV_CODEC_ID_CLEARVIDEO,
+ AV_CODEC_ID_XPM,
+ AV_CODEC_ID_AV1,
+ AV_CODEC_ID_BITPACKED,
+ AV_CODEC_ID_MSCC,
+ AV_CODEC_ID_SRGC,
+ AV_CODEC_ID_SVG,
+ AV_CODEC_ID_GDV,
+ AV_CODEC_ID_FITS,
+ AV_CODEC_ID_IMM4,
+ AV_CODEC_ID_PROSUMER,
+ AV_CODEC_ID_MWSC,
+ AV_CODEC_ID_WCMV,
+ AV_CODEC_ID_RASC,
+ AV_CODEC_ID_HYMT,
+ AV_CODEC_ID_ARBC,
+ AV_CODEC_ID_AGM,
+ AV_CODEC_ID_LSCR,
+ AV_CODEC_ID_VP4,
+ AV_CODEC_ID_IMM5,
+ AV_CODEC_ID_MVDV,
+ AV_CODEC_ID_MVHA,
+ AV_CODEC_ID_CDTOONS,
+ AV_CODEC_ID_MV30,
+ AV_CODEC_ID_NOTCHLC,
+ AV_CODEC_ID_PFM,
+ AV_CODEC_ID_MOBICLIP,
+ AV_CODEC_ID_PHOTOCD,
+ AV_CODEC_ID_IPU,
+ AV_CODEC_ID_ARGO,
+ AV_CODEC_ID_CRI,
+ AV_CODEC_ID_SIMBIOSIS_IMX,
+ AV_CODEC_ID_SGA_VIDEO,
+ AV_CODEC_ID_GEM,
+ AV_CODEC_ID_VBN,
+ AV_CODEC_ID_JPEGXL,
+ AV_CODEC_ID_QOI,
+ AV_CODEC_ID_PHM,
+ AV_CODEC_ID_RADIANCE_HDR,
+ AV_CODEC_ID_WBMP,
+ AV_CODEC_ID_MEDIA100,
+ AV_CODEC_ID_VQC,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+ AV_CODEC_ID_PCM_S16BE_PLANAR,
+ AV_CODEC_ID_PCM_S64LE,
+ AV_CODEC_ID_PCM_S64BE,
+ AV_CODEC_ID_PCM_F16LE,
+ AV_CODEC_ID_PCM_F24LE,
+ AV_CODEC_ID_PCM_VIDC,
+ AV_CODEC_ID_PCM_SGA,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA,
+ AV_CODEC_ID_ADPCM_AFC,
+ AV_CODEC_ID_ADPCM_IMA_OKI,
+ AV_CODEC_ID_ADPCM_DTK,
+ AV_CODEC_ID_ADPCM_IMA_RAD,
+ AV_CODEC_ID_ADPCM_G726LE,
+ AV_CODEC_ID_ADPCM_THP_LE,
+ AV_CODEC_ID_ADPCM_PSX,
+ AV_CODEC_ID_ADPCM_AICA,
+ AV_CODEC_ID_ADPCM_IMA_DAT4,
+ AV_CODEC_ID_ADPCM_MTAF,
+ AV_CODEC_ID_ADPCM_AGM,
+ AV_CODEC_ID_ADPCM_ARGO,
+ AV_CODEC_ID_ADPCM_IMA_SSI,
+ AV_CODEC_ID_ADPCM_ZORK,
+ AV_CODEC_ID_ADPCM_IMA_APM,
+ AV_CODEC_ID_ADPCM_IMA_ALP,
+ AV_CODEC_ID_ADPCM_IMA_MTF,
+ AV_CODEC_ID_ADPCM_IMA_CUNNING,
+ AV_CODEC_ID_ADPCM_IMA_MOFLEX,
+ AV_CODEC_ID_ADPCM_IMA_ACORN,
+ AV_CODEC_ID_ADPCM_XMD,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+ AV_CODEC_ID_SDX2_DPCM,
+ AV_CODEC_ID_GREMLIN_DPCM,
+ AV_CODEC_ID_DERF_DPCM,
+ AV_CODEC_ID_WADY_DPCM,
+ AV_CODEC_ID_CBD2_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
+ AV_CODEC_ID_CODEC2,
+ AV_CODEC_ID_FFWAVESYNTH,
+ AV_CODEC_ID_SONIC,
+ AV_CODEC_ID_SONIC_LS,
+ AV_CODEC_ID_EVRC,
+ AV_CODEC_ID_SMV,
+ AV_CODEC_ID_DSD_LSBF,
+ AV_CODEC_ID_DSD_MSBF,
+ AV_CODEC_ID_DSD_LSBF_PLANAR,
+ AV_CODEC_ID_DSD_MSBF_PLANAR,
+ AV_CODEC_ID_4GV,
+ AV_CODEC_ID_INTERPLAY_ACM,
+ AV_CODEC_ID_XMA1,
+ AV_CODEC_ID_XMA2,
+ AV_CODEC_ID_DST,
+ AV_CODEC_ID_ATRAC3AL,
+ AV_CODEC_ID_ATRAC3PAL,
+ AV_CODEC_ID_DOLBY_E,
+ AV_CODEC_ID_APTX,
+ AV_CODEC_ID_APTX_HD,
+ AV_CODEC_ID_SBC,
+ AV_CODEC_ID_ATRAC9,
+ AV_CODEC_ID_HCOM,
+ AV_CODEC_ID_ACELP_KELVIN,
+ AV_CODEC_ID_MPEGH_3D_AUDIO,
+ AV_CODEC_ID_SIREN,
+ AV_CODEC_ID_HCA,
+ AV_CODEC_ID_FASTAUDIO,
+ AV_CODEC_ID_MSNSIREN,
+ AV_CODEC_ID_DFPWM,
+ AV_CODEC_ID_BONK,
+ AV_CODEC_ID_MISC4,
+ AV_CODEC_ID_APAC,
+ AV_CODEC_ID_FTR,
+ AV_CODEC_ID_WAVARC,
+ AV_CODEC_ID_RKA,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+ AV_CODEC_ID_MICRODVD,
+ AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_JACOSUB,
+ AV_CODEC_ID_SAMI,
+ AV_CODEC_ID_REALTEXT,
+ AV_CODEC_ID_STL,
+ AV_CODEC_ID_SUBVIEWER1,
+ AV_CODEC_ID_SUBVIEWER,
+ AV_CODEC_ID_SUBRIP,
+ AV_CODEC_ID_WEBVTT,
+ AV_CODEC_ID_MPL2,
+ AV_CODEC_ID_VPLAYER,
+ AV_CODEC_ID_PJS,
+ AV_CODEC_ID_ASS,
+ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+ AV_CODEC_ID_TTML,
+ AV_CODEC_ID_ARIB_CAPTION,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream.
+ AV_CODEC_ID_EPG,
+ AV_CODEC_ID_BINTEXT,
+ AV_CODEC_ID_XBIN,
+ AV_CODEC_ID_IDF,
+ AV_CODEC_ID_OTF,
+ AV_CODEC_ID_SMPTE_KLV,
+ AV_CODEC_ID_DVD_NAV,
+ AV_CODEC_ID_TIMED_ID3,
+ AV_CODEC_ID_BIN_DATA,
+
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+ AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+ /**
+ * Dummy null video codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_VNULL,
+ /**
+ * Dummy null audio codec, useful mainly for development and debugging.
+ * Null encoder/decoder discard all input and never return any output.
+ */
+ AV_CODEC_ID_ANULL,
+};
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char *avcodec_get_name(enum AVCodecID id);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec_id the ID of the codec to which the requested profile belongs
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ *
+ * @note unlike av_get_profile_name(), which searches a list of profiles
+ * supported by a specific decoder or encoder implementation, this
+ * function searches the list of profiles from the AVCodecDescriptor
+ */
+const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_ID_H
diff --git a/media/ffvpx/libavcodec/codec_internal.h b/media/ffvpx/libavcodec/codec_internal.h
new file mode 100644
index 0000000000..130a7dc3cd
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_internal.h
@@ -0,0 +1,330 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_INTERNAL_H
+#define AVCODEC_CODEC_INTERNAL_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "codec.h"
+#include "config.h"
+
+/**
+ * The codec is not known to be init-threadsafe (i.e. it might be unsafe
+ * to initialize this codec and another codec concurrently, typically because
+ * the codec calls external APIs that are not known to be thread-safe).
+ * Therefore calling the codec's init function needs to be guarded with a lock.
+ */
+#define FF_CODEC_CAP_NOT_INIT_THREADSAFE (1 << 0)
+/**
+ * The codec allows calling the close function for deallocation even if
+ * the init function returned a failure. Without this capability flag, a
+ * codec does such cleanup internally when returning failures from the
+ * init function and does not expect the close function to be called at
+ * all.
+ */
+#define FF_CODEC_CAP_INIT_CLEANUP (1 << 1)
+/**
+ * Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set
+ * AVFrame.pkt_dts manually. If the flag is set, decode.c won't overwrite
+ * this field. If it's unset, decode.c tries to guess the pkt_dts field
+ * from the input AVPacket.
+ */
+#define FF_CODEC_CAP_SETS_PKT_DTS (1 << 2)
+/**
+ * The decoder extracts and fills its parameters even if the frame is
+ * skipped due to the skip_frame setting.
+ */
+#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM (1 << 3)
+/**
+ * The decoder sets the cropping fields in the output frames manually.
+ * If this cap is set, the generic code will initialize output frame
+ * dimensions to coded rather than display values.
+ */
+#define FF_CODEC_CAP_EXPORTS_CROPPING (1 << 4)
+/**
+ * Codec initializes slice-based threading with a main function
+ */
+#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF (1 << 5)
+/*
+ * The codec supports frame threading and has inter-frame dependencies, so it
+ * uses ff_thread_report/await_progress().
+ */
+#define FF_CODEC_CAP_ALLOCATE_PROGRESS (1 << 6)
+/**
+ * Codec handles avctx->thread_count == 0 (auto) internally.
+ */
+#define FF_CODEC_CAP_AUTO_THREADS (1 << 7)
+/**
+ * Codec handles output frame properties internally instead of letting the
+ * internal logic derive them from AVCodecInternal.last_pkt_props.
+ */
+#define FF_CODEC_CAP_SETS_FRAME_PROPS (1 << 8)
+/**
+ * Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
+ */
+#define FF_CODEC_CAP_ICC_PROFILES (1 << 9)
+/**
+ * The encoder has AV_CODEC_CAP_DELAY set, but does not actually have delay - it
+ * only wants to be flushed at the end to update some context variables (e.g.
+ * 2pass stats) or produce a trailing packet. Besides that it immediately
+ * produces exactly one output packet per each input frame, just as no-delay
+ * encoders do.
+ */
+#define FF_CODEC_CAP_EOF_FLUSH (1 << 10)
+
+/**
+ * FFCodec.codec_tags termination value
+ */
+#define FF_CODEC_TAGS_END -1
+
+typedef struct FFCodecDefault {
+ const char *key;
+ const char *value;
+} FFCodecDefault;
+
+struct AVCodecContext;
+struct AVSubtitle;
+struct AVPacket;
+
+enum FFCodecType {
+ /* The codec is a decoder using the decode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_DECODE,
+ /* The codec is a decoder using the decode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_DECODE_SUB,
+ /* The codec is a decoder using the receive_frame callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_FRAME,
+ /* The codec is an encoder using the encode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE,
+ /* The codec is an encoder using the encode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE_SUB,
+ /* The codec is an encoder using the receive_packet callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_PACKET,
+};
+
+typedef struct FFCodec {
+ /**
+ * The public AVCodec. See codec.h for it.
+ */
+ AVCodec p;
+
+ /**
+ * Internal codec capabilities FF_CODEC_CAP_*.
+ */
+ unsigned caps_internal:29;
+
+ /**
+ * This field determines the type of the codec (decoder/encoder)
+ * and also the exact callback cb implemented by the codec.
+ * cb_type uses enum FFCodecType values.
+ */
+ unsigned cb_type:3;
+
+ int priv_data_size;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(struct AVCodecContext *dst, const struct AVCodecContext *src);
+
+ /**
+ * Copy variables back to the user-facing context
+ */
+ int (*update_thread_context_for_user)(struct AVCodecContext *dst, const struct AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const FFCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from av_codec_iterate().
+ *
+ * This is not intended for time consuming operations as it is
+ * run for every codec regardless of that codec being used.
+ */
+ void (*init_static_data)(struct FFCodec *codec);
+
+ int (*init)(struct AVCodecContext *);
+
+ union {
+ /**
+ * Decode to an AVFrame.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE.
+ *
+ * @param avctx codec context
+ * @param[out] frame AVFrame for output
+ * @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that
+ * a non-empty frame was returned in frame.
+ * @param[in] avpkt AVPacket containing the data to be decoded
+ * @return amount of bytes read from the packet on success,
+ * negative error code on failure
+ */
+ int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
+ int *got_frame_ptr, struct AVPacket *avpkt);
+ /**
+ * Decode subtitle data to an AVSubtitle.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE_SUB.
+ *
+ * Apart from that this is like the decode callback.
+ */
+ int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
+ int *got_frame_ptr, const struct AVPacket *avpkt);
+ /**
+ * Decode API with decoupled packet/frame dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_FRAME.
+ *
+ * This function is called to get one output frame. It should call
+ * ff_decode_get_packet() to obtain input data.
+ */
+ int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
+ /**
+ * Encode data to an AVPacket.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE
+ *
+ * @param avctx codec context
+ * @param[out] avpkt output AVPacket
+ * @param[in] frame AVFrame containing the input to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
+ const struct AVFrame *frame, int *got_packet_ptr);
+ /**
+ * Encode subtitles to a raw buffer.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE_SUB.
+ */
+ int (*encode_sub)(struct AVCodecContext *avctx, uint8_t *buf,
+ int buf_size, const struct AVSubtitle *sub);
+ /**
+ * Encode API with decoupled frame/packet dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_PACKET.
+ *
+ * This function is called to get one output packet.
+ * It should call ff_encode_get_frame() to obtain input data.
+ */
+ int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
+ } cb;
+
+ int (*close)(struct AVCodecContext *);
+
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(struct AVCodecContext *);
+
+ /**
+ * Decoding only, a comma-separated list of bitstream filters to apply to
+ * packets before decoding.
+ */
+ const char *bsfs;
+
+ /**
+ * Array of pointers to hardware configurations supported by the codec,
+ * or NULL if no hardware supported. The array is terminated by a NULL
+ * pointer.
+ *
+ * The user can only access this field via avcodec_get_hw_config().
+ */
+ const struct AVCodecHWConfigInternal *const *hw_configs;
+
+ /**
+ * List of supported codec_tags, terminated by FF_CODEC_TAGS_END.
+ */
+ const uint32_t *codec_tags;
+} FFCodec;
+
+#if CONFIG_SMALL
+#define CODEC_LONG_NAME(str) .p.long_name = NULL
+#else
+#define CODEC_LONG_NAME(str) .p.long_name = str
+#endif
+
+#if HAVE_THREADS
+#define UPDATE_THREAD_CONTEXT(func) \
+ .update_thread_context = (func)
+#define UPDATE_THREAD_CONTEXT_FOR_USER(func) \
+ .update_thread_context_for_user = (func)
+#else
+#define UPDATE_THREAD_CONTEXT(func) \
+ .update_thread_context = NULL
+#define UPDATE_THREAD_CONTEXT_FOR_USER(func) \
+ .update_thread_context_for_user = NULL
+#endif
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+#define CODEC_OLD_CHANNEL_LAYOUTS(...) CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(((const uint64_t[]) { __VA_ARGS__, 0 }))
+#if defined(__clang__)
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array) \
+ FF_DISABLE_DEPRECATION_WARNINGS \
+ .p.channel_layouts = (array), \
+ FF_ENABLE_DEPRECATION_WARNINGS
+#else
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array) .p.channel_layouts = (array),
+#endif
+#else
+/* This is only provided to allow to test disabling FF_API_OLD_CHANNEL_LAYOUT
+ * without removing all the FF_API_OLD_CHANNEL_LAYOUT codeblocks.
+ * It is of course still expected to be removed when FF_API_OLD_CHANNEL_LAYOUT
+ * will be finally removed (along with all usages of these macros). */
+#define CODEC_OLD_CHANNEL_LAYOUTS(...)
+#define CODEC_OLD_CHANNEL_LAYOUTS_ARRAY(array)
+#endif
+
+#define FF_CODEC_DECODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE, \
+ .cb.decode = (func)
+#define FF_CODEC_DECODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE_SUB, \
+ .cb.decode_sub = (func)
+#define FF_CODEC_RECEIVE_FRAME_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_FRAME, \
+ .cb.receive_frame = (func)
+#define FF_CODEC_ENCODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE, \
+ .cb.encode = (func)
+#define FF_CODEC_ENCODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE_SUB, \
+ .cb.encode_sub = (func)
+#define FF_CODEC_RECEIVE_PACKET_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_PACKET, \
+ .cb.receive_packet = (func)
+
+static av_always_inline const FFCodec *ffcodec(const AVCodec *codec)
+{
+ return (const FFCodec*)codec;
+}
+
+#endif /* AVCODEC_CODEC_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/codec_list.c b/media/ffvpx/libavcodec/codec_list.c
index 063f8ff78d..73cf1fd296 100644
--- a/media/ffvpx/libavcodec/codec_list.c
+++ b/media/ffvpx/libavcodec/codec_list.c
@@ -1,4 +1,4 @@
-static const AVCodec * const codec_list[] = {
+static const FFCodec * const codec_list[] = {
#if CONFIG_VP8_DECODER
&ff_vp8_decoder,
#endif
diff --git a/media/ffvpx/libavcodec/codec_par.c b/media/ffvpx/libavcodec/codec_par.c
new file mode 100644
index 0000000000..abda649aa8
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_par.c
@@ -0,0 +1,263 @@
+/*
+ * AVCodecParameters functions for libavcodec
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AVCodecParameters functions for libavcodec.
+ */
+
+#include <string.h>
+#include "libavutil/mem.h"
+#include "avcodec.h"
+#include "codec_par.h"
+
+static void codec_parameters_reset(AVCodecParameters *par)
+{
+ av_freep(&par->extradata);
+ av_channel_layout_uninit(&par->ch_layout);
+
+ memset(par, 0, sizeof(*par));
+
+ par->codec_type = AVMEDIA_TYPE_UNKNOWN;
+ par->codec_id = AV_CODEC_ID_NONE;
+ par->format = -1;
+ par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ par->field_order = AV_FIELD_UNKNOWN;
+ par->color_range = AVCOL_RANGE_UNSPECIFIED;
+ par->color_primaries = AVCOL_PRI_UNSPECIFIED;
+ par->color_trc = AVCOL_TRC_UNSPECIFIED;
+ par->color_space = AVCOL_SPC_UNSPECIFIED;
+ par->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
+ par->sample_aspect_ratio = (AVRational){ 0, 1 };
+ par->profile = FF_PROFILE_UNKNOWN;
+ par->level = FF_LEVEL_UNKNOWN;
+}
+
+AVCodecParameters *avcodec_parameters_alloc(void)
+{
+ AVCodecParameters *par = av_mallocz(sizeof(*par));
+
+ if (!par)
+ return NULL;
+ codec_parameters_reset(par);
+ return par;
+}
+
+void avcodec_parameters_free(AVCodecParameters **ppar)
+{
+ AVCodecParameters *par = *ppar;
+
+ if (!par)
+ return;
+ codec_parameters_reset(par);
+
+ av_freep(ppar);
+}
+
+int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
+{
+ int ret;
+
+ codec_parameters_reset(dst);
+ memcpy(dst, src, sizeof(*dst));
+
+ dst->ch_layout = (AVChannelLayout){0};
+ dst->extradata = NULL;
+ dst->extradata_size = 0;
+ if (src->extradata) {
+ dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!dst->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(dst->extradata, src->extradata, src->extradata_size);
+ dst->extradata_size = src->extradata_size;
+ }
+
+ ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int avcodec_parameters_from_context(AVCodecParameters *par,
+ const AVCodecContext *codec)
+{
+ int ret;
+
+ codec_parameters_reset(par);
+
+ par->codec_type = codec->codec_type;
+ par->codec_id = codec->codec_id;
+ par->codec_tag = codec->codec_tag;
+
+ par->bit_rate = codec->bit_rate;
+ par->bits_per_coded_sample = codec->bits_per_coded_sample;
+ par->bits_per_raw_sample = codec->bits_per_raw_sample;
+ par->profile = codec->profile;
+ par->level = codec->level;
+
+ switch (par->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ par->format = codec->pix_fmt;
+ par->width = codec->width;
+ par->height = codec->height;
+ par->field_order = codec->field_order;
+ par->color_range = codec->color_range;
+ par->color_primaries = codec->color_primaries;
+ par->color_trc = codec->color_trc;
+ par->color_space = codec->colorspace;
+ par->chroma_location = codec->chroma_sample_location;
+ par->sample_aspect_ratio = codec->sample_aspect_ratio;
+ par->video_delay = codec->has_b_frames;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ par->format = codec->sample_fmt;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ // if the old/new fields are set inconsistently, prefer the old ones
+ if ((codec->channels && codec->channels != codec->ch_layout.nb_channels) ||
+ (codec->channel_layout && (codec->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ codec->ch_layout.u.mask != codec->channel_layout))) {
+ if (codec->channel_layout)
+ av_channel_layout_from_mask(&par->ch_layout, codec->channel_layout);
+ else {
+ par->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ par->ch_layout.nb_channels = codec->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+ } else {
+#endif
+ ret = av_channel_layout_copy(&par->ch_layout, &codec->ch_layout);
+ if (ret < 0)
+ return ret;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ }
+ par->channel_layout = par->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ par->ch_layout.u.mask : 0;
+ par->channels = par->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ par->sample_rate = codec->sample_rate;
+ par->block_align = codec->block_align;
+ par->frame_size = codec->frame_size;
+ par->initial_padding = codec->initial_padding;
+ par->trailing_padding = codec->trailing_padding;
+ par->seek_preroll = codec->seek_preroll;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ par->width = codec->width;
+ par->height = codec->height;
+ break;
+ }
+
+ if (codec->extradata) {
+ par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!par->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(par->extradata, codec->extradata, codec->extradata_size);
+ par->extradata_size = codec->extradata_size;
+ }
+
+ return 0;
+}
+
+int avcodec_parameters_to_context(AVCodecContext *codec,
+ const AVCodecParameters *par)
+{
+ int ret;
+
+ codec->codec_type = par->codec_type;
+ codec->codec_id = par->codec_id;
+ codec->codec_tag = par->codec_tag;
+
+ codec->bit_rate = par->bit_rate;
+ codec->bits_per_coded_sample = par->bits_per_coded_sample;
+ codec->bits_per_raw_sample = par->bits_per_raw_sample;
+ codec->profile = par->profile;
+ codec->level = par->level;
+
+ switch (par->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ codec->pix_fmt = par->format;
+ codec->width = par->width;
+ codec->height = par->height;
+ codec->field_order = par->field_order;
+ codec->color_range = par->color_range;
+ codec->color_primaries = par->color_primaries;
+ codec->color_trc = par->color_trc;
+ codec->colorspace = par->color_space;
+ codec->chroma_sample_location = par->chroma_location;
+ codec->sample_aspect_ratio = par->sample_aspect_ratio;
+ codec->has_b_frames = par->video_delay;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ codec->sample_fmt = par->format;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ // if the old/new fields are set inconsistently, prefer the old ones
+ if ((par->channels && par->channels != par->ch_layout.nb_channels) ||
+ (par->channel_layout && (par->ch_layout.order != AV_CHANNEL_ORDER_NATIVE ||
+ par->ch_layout.u.mask != par->channel_layout))) {
+ if (par->channel_layout)
+ av_channel_layout_from_mask(&codec->ch_layout, par->channel_layout);
+ else {
+ codec->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ codec->ch_layout.nb_channels = par->channels;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+ } else {
+#endif
+ ret = av_channel_layout_copy(&codec->ch_layout, &par->ch_layout);
+ if (ret < 0)
+ return ret;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ }
+ codec->channel_layout = codec->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ codec->ch_layout.u.mask : 0;
+ codec->channels = codec->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ codec->sample_rate = par->sample_rate;
+ codec->block_align = par->block_align;
+ codec->frame_size = par->frame_size;
+ codec->delay =
+ codec->initial_padding = par->initial_padding;
+ codec->trailing_padding = par->trailing_padding;
+ codec->seek_preroll = par->seek_preroll;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ codec->width = par->width;
+ codec->height = par->height;
+ break;
+ }
+
+ if (par->extradata) {
+ av_freep(&codec->extradata);
+ codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!codec->extradata)
+ return AVERROR(ENOMEM);
+ memcpy(codec->extradata, par->extradata, par->extradata_size);
+ codec->extradata_size = par->extradata_size;
+ }
+
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/codec_par.h b/media/ffvpx/libavcodec/codec_par.h
new file mode 100644
index 0000000000..f51d27c590
--- /dev/null
+++ b/media/ffvpx/libavcodec/codec_par.h
@@ -0,0 +1,247 @@
+/*
+ * Codec parameters public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_CODEC_PAR_H
+#define AVCODEC_CODEC_PAR_H
+
+#include <stdint.h>
+
+#include "libavutil/avutil.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/rational.h"
+#include "libavutil/pixfmt.h"
+
+#include "codec_id.h"
+
+/**
+ * @addtogroup lavc_core
+ * @{
+ */
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, ///< Top coded_first, top displayed first
+ AV_FIELD_BB, ///< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, ///< Top coded first, bottom displayed first
+ AV_FIELD_BT, ///< Bottom coded first, top displayed first
+};
+
+/**
+ * This struct describes the properties of an encoded stream.
+ *
+ * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must
+ * be allocated with avcodec_parameters_alloc() and freed with
+ * avcodec_parameters_free().
+ */
+typedef struct AVCodecParameters {
+ /**
+ * General type of the encoded data.
+ */
+ enum AVMediaType codec_type;
+ /**
+ * Specific type of the encoded data (the codec used).
+ */
+ enum AVCodecID codec_id;
+ /**
+ * Additional information about the codec (corresponds to the AVI FOURCC).
+ */
+ uint32_t codec_tag;
+
+ /**
+ * Extra binary data needed for initializing the decoder, codec-dependent.
+ *
+ * Must be allocated with av_malloc() and will be freed by
+ * avcodec_parameters_free(). The allocated size of extradata must be at
+ * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding
+ * bytes zeroed.
+ */
+ uint8_t *extradata;
+ /**
+ * Size of the extradata content in bytes.
+ */
+ int extradata_size;
+
+ /**
+ * - video: the pixel format, the value corresponds to enum AVPixelFormat.
+ * - audio: the sample format, the value corresponds to enum AVSampleFormat.
+ */
+ int format;
+
+ /**
+ * The average bitrate of the encoded data (in bits per second).
+ */
+ int64_t bit_rate;
+
+ /**
+ * The number of bits per sample in the codedwords.
+ *
+ * This is basically the bitrate per sample. It is mandatory for a bunch of
+ * formats to actually decode them. It's the number of bits for one sample in
+ * the actual coded bitstream.
+ *
+ * This could be for example 4 for ADPCM
+ * For PCM formats this matches bits_per_raw_sample
+ * Can be 0
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * This is the number of valid bits in each output sample. If the
+ * sample format has more bits, the least significant bits are additional
+ * padding bits, which are always 0. Use right shifts to reduce the sample
+ * to its actual size. For example, audio formats with 24 bit samples will
+ * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32.
+ * To get the original sample use "(int32_t)sample >> 8"."
+ *
+ * For ADPCM this might be 12 or 16 or similar
+ * Can be 0
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Codec-specific bitstream restrictions that the stream conforms to.
+ */
+ int profile;
+ int level;
+
+ /**
+ * Video only. The dimensions of the video frame in pixels.
+ */
+ int width;
+ int height;
+
+ /**
+ * Video only. The aspect ratio (width / height) which a single pixel
+ * should have when displayed.
+ *
+ * When the aspect ratio is unknown / undefined, the numerator should be
+ * set to 0 (the denominator may have any value).
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Video only. The order of the fields in interlaced video.
+ */
+ enum AVFieldOrder field_order;
+
+ /**
+ * Video only. Additional colorspace characteristics.
+ */
+ enum AVColorRange color_range;
+ enum AVColorPrimaries color_primaries;
+ enum AVColorTransferCharacteristic color_trc;
+ enum AVColorSpace color_space;
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * Video only. Number of delayed frames.
+ */
+ int video_delay;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * Audio only. The channel layout bitmask. May be 0 if the channel layout is
+ * unknown or unspecified, otherwise the number of bits set must be equal to
+ * the channels field.
+ * @deprecated use ch_layout
+ */
+ attribute_deprecated
+ uint64_t channel_layout;
+ /**
+ * Audio only. The number of audio channels.
+ * @deprecated use ch_layout.nb_channels
+ */
+ attribute_deprecated
+ int channels;
+#endif
+ /**
+ * Audio only. The number of audio samples per second.
+ */
+ int sample_rate;
+ /**
+ * Audio only. The number of bytes per coded audio frame, required by some
+ * formats.
+ *
+ * Corresponds to nBlockAlign in WAVEFORMATEX.
+ */
+ int block_align;
+ /**
+ * Audio only. Audio frame size, if known. Required by some formats to be static.
+ */
+ int frame_size;
+
+ /**
+ * Audio only. The amount of padding (in samples) inserted by the encoder at
+ * the beginning of the audio. I.e. this number of leading decoded samples
+ * must be discarded by the caller to get the original audio without leading
+ * padding.
+ */
+ int initial_padding;
+ /**
+ * Audio only. The amount of padding (in samples) appended by the encoder to
+ * the end of the audio. I.e. this number of decoded samples must be
+ * discarded by the caller from the end of the stream to get the original
+ * audio without any trailing padding.
+ */
+ int trailing_padding;
+ /**
+ * Audio only. Number of samples to skip after a discontinuity.
+ */
+ int seek_preroll;
+
+ /**
+ * Audio only. The channel layout and number of channels.
+ */
+ AVChannelLayout ch_layout;
+} AVCodecParameters;
+
+/**
+ * Allocate a new AVCodecParameters and set its fields to default values
+ * (unknown/invalid/0). The returned struct must be freed with
+ * avcodec_parameters_free().
+ */
+AVCodecParameters *avcodec_parameters_alloc(void);
+
+/**
+ * Free an AVCodecParameters instance and everything associated with it and
+ * write NULL to the supplied pointer.
+ */
+void avcodec_parameters_free(AVCodecParameters **par);
+
+/**
+ * Copy the contents of src to dst. Any allocated fields in dst are freed and
+ * replaced with newly allocated duplicates of the corresponding fields in src.
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure.
+ */
+int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);
+
+/**
+ * This function is the same as av_get_audio_frame_duration(), except it works
+ * with AVCodecParameters instead of an AVCodecContext.
+ */
+int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_CODEC_PAR_H
diff --git a/media/ffvpx/libavcodec/decode.c b/media/ffvpx/libavcodec/decode.c
index 9a6c57b7b8..be2be81089 100644
--- a/media/ffvpx/libavcodec/decode.c
+++ b/media/ffvpx/libavcodec/decode.c
@@ -30,7 +30,9 @@
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
+#include "libavutil/fifo.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
#include "libavutil/imgutils.h"
@@ -40,14 +42,17 @@
#include "avcodec.h"
#include "bytestream.h"
+#include "bsf.h"
+#include "codec_internal.h"
#include "decode.h"
-#include "hwaccel.h"
+#include "hwconfig.h"
#include "internal.h"
#include "thread.h"
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
{
- int size = 0, ret;
+ int ret;
+ size_t size;
const uint8_t *data;
uint32_t flags;
int64_t val;
@@ -69,6 +74,8 @@ static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
flags = bytestream_get_le32(&data);
size -= 4;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
if (size < 4)
goto fail;
@@ -87,6 +94,8 @@ static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
avctx->channel_layout = bytestream_get_le64(&data);
size -= 8;
}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
if (size < 4)
goto fail;
@@ -131,201 +140,46 @@ static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
if (pkt) {
ret = av_packet_copy_props(avci->last_pkt_props, pkt);
if (!ret)
- avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
+ avci->last_pkt_props->opaque = (void *)(intptr_t)pkt->size; // Needed for ff_decode_frame_props().
}
return ret;
}
-static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
-{
- int ret;
-
- /* move the original frame to our backup */
- av_frame_unref(avci->to_free);
- av_frame_move_ref(avci->to_free, frame);
-
- /* now copy everything except the AVBufferRefs back
- * note that we make a COPY of the side data, so calling av_frame_free() on
- * the caller's frame will work properly */
- ret = av_frame_copy_props(frame, avci->to_free);
- if (ret < 0)
- return ret;
-
- memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
- memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
- if (avci->to_free->extended_data != avci->to_free->data) {
- int planes = avci->to_free->channels;
- int size = planes * sizeof(*frame->extended_data);
-
- if (!size) {
- av_frame_unref(frame);
- return AVERROR_BUG;
- }
-
- frame->extended_data = av_malloc(size);
- if (!frame->extended_data) {
- av_frame_unref(frame);
- return AVERROR(ENOMEM);
- }
- memcpy(frame->extended_data, avci->to_free->extended_data,
- size);
- } else
- frame->extended_data = frame->data;
-
- frame->format = avci->to_free->format;
- frame->width = avci->to_free->width;
- frame->height = avci->to_free->height;
- frame->channel_layout = avci->to_free->channel_layout;
- frame->nb_samples = avci->to_free->nb_samples;
- frame->channels = avci->to_free->channels;
-
- return 0;
-}
-
-int ff_decode_bsfs_init(AVCodecContext *avctx)
+static int decode_bsfs_init(AVCodecContext *avctx)
{
AVCodecInternal *avci = avctx->internal;
- DecodeFilterContext *s = &avci->filter;
- const char *bsfs_str;
+ const FFCodec *const codec = ffcodec(avctx->codec);
int ret;
- if (s->nb_bsfs)
+ if (avci->bsf)
return 0;
- bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
- while (bsfs_str && *bsfs_str) {
- AVBSFContext **tmp;
- const AVBitStreamFilter *filter;
- char *bsf, *bsf_options_str, *bsf_name;
-
- bsf = av_get_token(&bsfs_str, ",");
- if (!bsf) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- bsf_name = av_strtok(bsf, "=", &bsf_options_str);
- if (!bsf_name) {
- av_freep(&bsf);
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- filter = av_bsf_get_by_name(bsf_name);
- if (!filter) {
- av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
- "requested by a decoder. This is a bug, please report it.\n",
- bsf_name);
- av_freep(&bsf);
+ ret = av_bsf_list_parse_str(codec->bsfs, &avci->bsf);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", codec->bsfs, av_err2str(ret));
+ if (ret != AVERROR(ENOMEM))
ret = AVERROR_BUG;
- goto fail;
- }
-
- tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
- if (!tmp) {
- av_freep(&bsf);
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- s->bsfs = tmp;
- s->nb_bsfs++;
-
- ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
- if (ret < 0) {
- av_freep(&bsf);
- goto fail;
- }
-
- if (s->nb_bsfs == 1) {
- /* We do not currently have an API for passing the input timebase into decoders,
- * but no filters used here should actually need it.
- * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
- s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
- ret = avcodec_parameters_from_context(s->bsfs[s->nb_bsfs - 1]->par_in,
- avctx);
- } else {
- s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
- ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
- s->bsfs[s->nb_bsfs - 2]->par_out);
- }
- if (ret < 0) {
- av_freep(&bsf);
- goto fail;
- }
-
- if (bsf_options_str && filter->priv_class) {
- const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL);
- const char * shorthand[2] = {NULL};
-
- if (opt)
- shorthand[0] = opt->name;
-
- ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":");
- if (ret < 0) {
- if (ret != AVERROR(ENOMEM)) {
- av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s "
- "requested by the decoder. This is a bug, please report it.\n",
- bsf_name);
- ret = AVERROR_BUG;
- }
- av_freep(&bsf);
- goto fail;
- }
- }
- av_freep(&bsf);
+ goto fail;
+ }
- ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
- if (ret < 0)
- goto fail;
+ /* We do not currently have an API for passing the input timebase into decoders,
+ * but no filters used here should actually need it.
+ * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
+ avci->bsf->time_base_in = (AVRational){ 1, 90000 };
+ ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
+ if (ret < 0)
+ goto fail;
- if (*bsfs_str)
- bsfs_str++;
- }
+ ret = av_bsf_init(avci->bsf);
+ if (ret < 0)
+ goto fail;
return 0;
fail:
- ff_decode_bsfs_uninit(avctx);
+ av_bsf_free(&avci->bsf);
return ret;
}
-/* try to get one output packet from the filter chain */
-static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
-{
- DecodeFilterContext *s = &avctx->internal->filter;
- int idx, ret;
-
- /* start with the last filter in the chain */
- idx = s->nb_bsfs - 1;
- while (idx >= 0) {
- /* request a packet from the currently selected filter */
- ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
- if (ret == AVERROR(EAGAIN)) {
- /* no packets available, try the next filter up the chain */
- ret = 0;
- idx--;
- continue;
- } else if (ret < 0 && ret != AVERROR_EOF) {
- return ret;
- }
-
- /* got a packet or EOF -- pass it to the caller or to the next filter
- * down the chain */
- if (idx == s->nb_bsfs - 1) {
- return ret;
- } else {
- idx++;
- ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR,
- "Error pre-processing a packet before decoding\n");
- av_packet_unref(pkt);
- return ret;
- }
- }
- }
-
- return AVERROR(EAGAIN);
-}
-
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
{
AVCodecInternal *avci = avctx->internal;
@@ -334,23 +188,22 @@ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
if (avci->draining)
return AVERROR_EOF;
- ret = bsfs_poll(avctx, pkt);
+ ret = av_bsf_receive_packet(avci->bsf, pkt);
if (ret == AVERROR_EOF)
avci->draining = 1;
if (ret < 0)
return ret;
- ret = extract_packet_props(avctx->internal, pkt);
- if (ret < 0)
- goto finish;
+ if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ ret = extract_packet_props(avctx->internal, pkt);
+ if (ret < 0)
+ goto finish;
+ }
ret = apply_param_change(avctx, pkt);
if (ret < 0)
goto finish;
- if (avctx->codec->receive_frame)
- avci->compat_decode_consumed += pkt->size;
-
return 0;
finish:
av_packet_unref(pkt);
@@ -399,12 +252,11 @@ static int64_t guess_correct_pts(AVCodecContext *ctx,
* returning any output, so this function needs to be called in a loop until it
* returns EAGAIN.
**/
-static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
+static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
{
AVCodecInternal *avci = avctx->internal;
- DecodeSimpleContext *ds = &avci->ds;
- AVPacket *pkt = ds->in_pkt;
- // copy to ensure we do not change pkt
+ AVPacket *const pkt = avci->in_pkt;
+ const FFCodec *const codec = ffcodec(avctx->codec);
int got_frame, actual_got_frame;
int ret;
@@ -430,9 +282,9 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
} else {
- ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
+ ret = codec->cb.decode(avctx, frame, &got_frame, pkt);
- if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
+ if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts;
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if(!avctx->has_b_frames)
@@ -453,93 +305,97 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->flags & AV_FRAME_FLAG_DISCARD)
got_frame = 0;
- if (got_frame)
- frame->best_effort_timestamp = guess_correct_pts(avctx,
- frame->pts,
- frame->pkt_dts);
} else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
uint8_t *side;
- int side_size;
+ size_t side_size;
uint32_t discard_padding = 0;
uint8_t skip_reason = 0;
uint8_t discard_reason = 0;
if (ret >= 0 && got_frame) {
- frame->best_effort_timestamp = guess_correct_pts(avctx,
- frame->pts,
- frame->pkt_dts);
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
+ if (!frame->ch_layout.nb_channels) {
+ int ret2 = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret2 < 0) {
+ ret = ret2;
+ got_frame = 0;
+ }
+ }
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
if (!frame->channel_layout)
- frame->channel_layout = avctx->channel_layout;
+ frame->channel_layout = avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ avctx->ch_layout.u.mask : 0;
if (!frame->channels)
- frame->channels = avctx->channels;
+ frame->channels = avctx->ch_layout.nb_channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (!frame->sample_rate)
frame->sample_rate = avctx->sample_rate;
}
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if(side && side_size>=10) {
- avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
+ avci->skip_samples = AV_RL32(side);
+ avci->skip_samples = FFMAX(0, avci->skip_samples);
discard_padding = AV_RL32(side + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
- avctx->internal->skip_samples, (int)discard_padding);
+ avci->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side + 8);
discard_reason = AV_RL8(side + 9);
}
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
- avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
+ avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
got_frame = 0;
+ *discarded_samples += frame->nb_samples;
}
- if (avctx->internal->skip_samples > 0 && got_frame &&
+ if (avci->skip_samples > 0 && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
- if(frame->nb_samples <= avctx->internal->skip_samples){
+ if(frame->nb_samples <= avci->skip_samples){
got_frame = 0;
- avctx->internal->skip_samples -= frame->nb_samples;
+ *discarded_samples += frame->nb_samples;
+ avci->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
- avctx->internal->skip_samples);
+ avci->skip_samples);
} else {
- av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
- frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
+ av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
+ frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format);
if(avctx->pkt_timebase.num && avctx->sample_rate) {
- int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
+ int64_t diff_ts = av_rescale_q(avci->skip_samples,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
if(frame->pts!=AV_NOPTS_VALUE)
frame->pts += diff_ts;
-#if FF_API_PKT_PTS
-FF_DISABLE_DEPRECATION_WARNINGS
- if(frame->pkt_pts!=AV_NOPTS_VALUE)
- frame->pkt_pts += diff_ts;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
if(frame->pkt_dts!=AV_NOPTS_VALUE)
frame->pkt_dts += diff_ts;
- if (frame->pkt_duration >= diff_ts)
- frame->pkt_duration -= diff_ts;
+ if (frame->duration >= diff_ts)
+ frame->duration -= diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
- avctx->internal->skip_samples, frame->nb_samples);
- frame->nb_samples -= avctx->internal->skip_samples;
- avctx->internal->skip_samples = 0;
+ avci->skip_samples, frame->nb_samples);
+ *discarded_samples += avci->skip_samples;
+ frame->nb_samples -= avci->skip_samples;
+ avci->skip_samples = 0;
}
}
if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (discard_padding == frame->nb_samples) {
+ *discarded_samples += frame->nb_samples;
got_frame = 0;
} else {
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
- frame->pkt_duration = diff_ts;
+ frame->duration = diff_ts;
} else {
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
}
@@ -552,11 +408,11 @@ FF_ENABLE_DEPRECATION_WARNINGS
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
- AV_WL32(fside->data, avctx->internal->skip_samples);
+ AV_WL32(fside->data, avci->skip_samples);
AV_WL32(fside->data + 4, discard_padding);
AV_WL8(fside->data + 8, skip_reason);
AV_WL8(fside->data + 9, discard_reason);
- avctx->internal->skip_samples = 0;
+ avci->skip_samples = 0;
}
}
}
@@ -571,17 +427,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (!got_frame)
av_frame_unref(frame);
- if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
+ if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
ret = pkt->size;
-#if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
-#endif
-
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
- if (avctx->internal->draining && !actual_got_frame) {
+ if (avci->draining && !actual_got_frame) {
if (ret < 0) {
/* prevent infinite loop if a decoder wrongly always return error on draining */
/* reasonable nb_errors_max = maximum b frames + thread count */
@@ -599,8 +450,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
- avci->compat_decode_consumed += ret;
-
if (ret >= pkt->size || ret < 0) {
av_packet_unref(pkt);
} else {
@@ -608,11 +457,14 @@ FF_ENABLE_DEPRECATION_WARNINGS
pkt->data += consumed;
pkt->size -= consumed;
- avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
- avci->last_pkt_props->pts = AV_NOPTS_VALUE;
- avci->last_pkt_props->dts = AV_NOPTS_VALUE;
+ if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ // See extract_packet_props() comment.
+ avci->last_pkt_props->opaque = (void *)((intptr_t)avci->last_pkt_props->opaque - consumed);
+ avci->last_pkt_props->pts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->dts = AV_NOPTS_VALUE;
+ }
}
if (got_frame)
@@ -621,12 +473,63 @@ FF_ENABLE_DEPRECATION_WARNINGS
return ret < 0 ? ret : 0;
}
+#if CONFIG_LCMS2
+static int detect_colorspace(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ enum AVColorTransferCharacteristic trc;
+ AVColorPrimariesDesc coeffs;
+ enum AVColorPrimaries prim;
+ cmsHPROFILE profile;
+ AVFrameSideData *sd;
+ int ret;
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
+ return 0;
+
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE);
+ if (!sd || !sd->size)
+ return 0;
+
+ if (!avci->icc.avctx) {
+ ret = ff_icc_context_init(&avci->icc, avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ profile = cmsOpenProfileFromMemTHR(avci->icc.ctx, sd->data, sd->size);
+ if (!profile)
+ return AVERROR_INVALIDDATA;
+
+ ret = ff_icc_profile_read_primaries(&avci->icc, profile, &coeffs);
+ if (!ret)
+ ret = ff_icc_profile_detect_transfer(&avci->icc, profile, &trc);
+ cmsCloseProfile(profile);
+ if (ret < 0)
+ return ret;
+
+ prim = av_csp_primaries_id_from_desc(&coeffs);
+ if (prim != AVCOL_PRI_UNSPECIFIED)
+ frame->color_primaries = prim;
+ if (trc != AVCOL_TRC_UNSPECIFIED)
+ frame->color_trc = trc;
+ return 0;
+}
+#else /* !CONFIG_LCMS2 */
+static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f)
+{
+ return 0;
+}
+#endif
+
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
int ret;
+ int64_t discarded_samples = 0;
while (!frame->buf[0]) {
- ret = decode_simple_internal(avctx, frame);
+ if (discarded_samples > avctx->max_samples)
+ return AVERROR(EAGAIN);
+ ret = decode_simple_internal(avctx, frame, &discarded_samples);
if (ret < 0)
return ret;
}
@@ -637,19 +540,37 @@ static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
- int ret;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int ret, ok;
av_assert0(!frame->buf[0]);
- if (avctx->codec->receive_frame)
- ret = avctx->codec->receive_frame(avctx, frame);
- else
+ if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
+ ret = codec->cb.receive_frame(avctx, frame);
+ } else
ret = decode_simple_receive_frame(avctx, frame);
if (ret == AVERROR_EOF)
avci->draining_done = 1;
+ /* preserve ret */
+ ok = detect_colorspace(avctx, frame);
+ if (ok < 0) {
+ av_frame_unref(frame);
+ return ok;
+ }
+
if (!ret) {
+ frame->best_effort_timestamp = guess_correct_pts(avctx,
+ frame->pts,
+ frame->pkt_dts);
+
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->pkt_duration = frame->duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
/* the only case where decode data is not set should be decoders
* that do not call ff_get_buffer() */
av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
@@ -695,7 +616,7 @@ int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacke
return ret;
}
- ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
+ ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
if (ret < 0) {
av_packet_unref(avci->buffer_pkt);
return ret;
@@ -737,13 +658,38 @@ static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
AV_FRAME_CROP_UNALIGNED : 0);
}
-int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+// make sure frames returned to the caller are valid
+static int frame_validate(AVCodecContext *avctx, AVFrame *frame)
+{
+ if (!frame->buf[0] || frame->format < 0)
+ goto fail;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (frame->width <= 0 || frame->height <= 0)
+ goto fail;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (!av_channel_layout_check(&frame->ch_layout) ||
+ frame->sample_rate <= 0)
+ goto fail;
+
+ break;
+ default: av_assert0(0);
+ }
+
+ return 0;
+fail:
+ av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. "
+ "This is a bug, please report it.\n");
+ return AVERROR_BUG;
+}
+
+int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
int ret, changed;
- av_frame_unref(frame);
-
if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
return AVERROR(EINVAL);
@@ -755,19 +701,26 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr
return ret;
}
+ ret = frame_validate(avctx, frame);
+ if (ret < 0)
+ goto fail;
+
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = apply_cropping(avctx, frame);
- if (ret < 0) {
- av_frame_unref(frame);
- return ret;
- }
+ if (ret < 0)
+ goto fail;
}
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
- if (avctx->frame_number == 1) {
+ if (avctx->frame_num == 1) {
avci->initial_format = frame->format;
switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
@@ -777,16 +730,17 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr
case AVMEDIA_TYPE_AUDIO:
avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
avctx->sample_rate;
- avci->initial_channels = frame->channels;
- avci->initial_channel_layout = frame->channel_layout;
+ ret = av_channel_layout_copy(&avci->initial_ch_layout, &frame->ch_layout);
+ if (ret < 0)
+ goto fail;
break;
}
}
- if (avctx->frame_number > 1) {
+ if (avctx->frame_num > 1) {
changed = avci->initial_format != frame->format;
- switch(avctx->codec_type) {
+ switch(avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
changed |= avci->initial_width != frame->width ||
avci->initial_height != frame->height;
@@ -794,122 +748,27 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr
case AVMEDIA_TYPE_AUDIO:
changed |= avci->initial_sample_rate != frame->sample_rate ||
avci->initial_sample_rate != avctx->sample_rate ||
- avci->initial_channels != frame->channels ||
- avci->initial_channel_layout != frame->channel_layout;
+ av_channel_layout_compare(&avci->initial_ch_layout, &frame->ch_layout);
break;
}
if (changed) {
avci->changed_frames_dropped++;
- av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
+ av_log(avctx, AV_LOG_INFO, "dropped changed frame #%"PRId64" pts %"PRId64
" drop count: %d \n",
- avctx->frame_number, frame->pts,
+ avctx->frame_num, frame->pts,
avci->changed_frames_dropped);
- av_frame_unref(frame);
- return AVERROR_INPUT_CHANGED;
+ ret = AVERROR_INPUT_CHANGED;
+ goto fail;
}
}
}
return 0;
-}
-
-static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame, const AVPacket *pkt)
-{
- AVCodecInternal *avci = avctx->internal;
- int ret = 0;
-
- av_assert0(avci->compat_decode_consumed == 0);
-
- if (avci->draining_done && pkt && pkt->size != 0) {
- av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
- avcodec_flush_buffers(avctx);
- }
-
- *got_frame = 0;
- avci->compat_decode = 1;
-
- if (avci->compat_decode_partial_size > 0 &&
- avci->compat_decode_partial_size != pkt->size) {
- av_log(avctx, AV_LOG_ERROR,
- "Got unexpected packet size after a partial decode\n");
- ret = AVERROR(EINVAL);
- goto finish;
- }
-
- if (!avci->compat_decode_partial_size) {
- ret = avcodec_send_packet(avctx, pkt);
- if (ret == AVERROR_EOF)
- ret = 0;
- else if (ret == AVERROR(EAGAIN)) {
- /* we fully drain all the output in each decode call, so this should not
- * ever happen */
- ret = AVERROR_BUG;
- goto finish;
- } else if (ret < 0)
- goto finish;
- }
-
- while (ret >= 0) {
- ret = avcodec_receive_frame(avctx, frame);
- if (ret < 0) {
- if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
- ret = 0;
- goto finish;
- }
-
- if (frame != avci->compat_decode_frame) {
- if (!avctx->refcounted_frames) {
- ret = unrefcount_frame(avci, frame);
- if (ret < 0)
- goto finish;
- }
-
- *got_frame = 1;
- frame = avci->compat_decode_frame;
- } else {
- if (!avci->compat_decode_warned) {
- av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
- "API cannot return all the frames for this decoder. "
- "Some frames will be dropped. Update your code to the "
- "new decoding API to fix this.\n");
- avci->compat_decode_warned = 1;
- }
- }
-
- if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
- break;
- }
-
-finish:
- if (ret == 0) {
- /* if there are any bsfs then assume full packet is always consumed */
- if (avctx->codec->bsfs)
- ret = pkt->size;
- else
- ret = FFMIN(avci->compat_decode_consumed, pkt->size);
- }
- avci->compat_decode_consumed = 0;
- avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
-
+fail:
+ av_frame_unref(frame);
return ret;
}
-int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
- int *got_picture_ptr,
- const AVPacket *avpkt)
-{
- return compat_decode(avctx, picture, got_picture_ptr, avpkt);
-}
-
-int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
- AVFrame *frame,
- int *got_frame_ptr,
- const AVPacket *avpkt)
-{
- return compat_decode(avctx, frame, got_frame_ptr, avpkt);
-}
-
static void get_subtitle_defaults(AVSubtitle *sub)
{
memset(sub, 0, sizeof(*sub));
@@ -917,55 +776,58 @@ static void get_subtitle_defaults(AVSubtitle *sub)
}
#define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
-static int recode_subtitle(AVCodecContext *avctx,
- AVPacket *outpkt, const AVPacket *inpkt)
+static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt,
+ const AVPacket *inpkt, AVPacket *buf_pkt)
{
#if CONFIG_ICONV
iconv_t cd = (iconv_t)-1;
int ret = 0;
char *inb, *outb;
size_t inl, outl;
- AVPacket tmp;
#endif
- if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
+ if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) {
+ *outpkt = inpkt;
return 0;
+ }
#if CONFIG_ICONV
- cd = iconv_open("UTF-8", avctx->sub_charenc);
- av_assert0(cd != (iconv_t)-1);
-
inb = inpkt->data;
inl = inpkt->size;
if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
- ret = AVERROR(ENOMEM);
- goto end;
+ return AVERROR(ERANGE);
}
- ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
+ cd = iconv_open("UTF-8", avctx->sub_charenc);
+ av_assert0(cd != (iconv_t)-1);
+
+ ret = av_new_packet(buf_pkt, inl * UTF8_MAX_BYTES);
if (ret < 0)
goto end;
- outpkt->buf = tmp.buf;
- outpkt->data = tmp.data;
- outpkt->size = tmp.size;
- outb = outpkt->data;
- outl = outpkt->size;
+ ret = av_packet_copy_props(buf_pkt, inpkt);
+ if (ret < 0)
+ goto end;
+ outb = buf_pkt->data;
+ outl = buf_pkt->size;
if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
- outl >= outpkt->size || inl != 0) {
+ outl >= buf_pkt->size || inl != 0) {
ret = FFMIN(AVERROR(errno), -1);
av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
"from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
- av_packet_unref(&tmp);
goto end;
}
- outpkt->size -= outl;
- memset(outpkt->data + outpkt->size, 0, outl);
+ buf_pkt->size -= outl;
+ memset(buf_pkt->data + buf_pkt->size, 0, outl);
+ *outpkt = buf_pkt;
+ ret = 0;
end:
+ if (ret < 0)
+ av_packet_unref(buf_pkt);
if (cd != (iconv_t)-1)
iconv_close(cd);
return ret;
@@ -994,84 +856,10 @@ static int utf8_check(const uint8_t *str)
return 1;
}
-#if FF_API_ASS_TIMING
-static void insert_ts(AVBPrint *buf, int ts)
-{
- if (ts == -1) {
- av_bprintf(buf, "9:59:59.99,");
- } else {
- int h, m, s;
-
- h = ts/360000; ts -= 360000*h;
- m = ts/ 6000; ts -= 6000*m;
- s = ts/ 100; ts -= 100*s;
- av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
- }
-}
-
-static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
-{
- int i;
- AVBPrint buf;
-
- av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
-
- for (i = 0; i < sub->num_rects; i++) {
- char *final_dialog;
- const char *dialog;
- AVSubtitleRect *rect = sub->rects[i];
- int ts_start, ts_duration = -1;
- long int layer;
-
- if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
- continue;
-
- av_bprint_clear(&buf);
-
- /* skip ReadOrder */
- dialog = strchr(rect->ass, ',');
- if (!dialog)
- continue;
- dialog++;
-
- /* extract Layer or Marked */
- layer = strtol(dialog, (char**)&dialog, 10);
- if (*dialog != ',')
- continue;
- dialog++;
-
- /* rescale timing to ASS time base (ms) */
- ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
- if (pkt->duration != -1)
- ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
- sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
-
- /* construct ASS (standalone file form with timestamps) string */
- av_bprintf(&buf, "Dialogue: %ld,", layer);
- insert_ts(&buf, ts_start);
- insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
- av_bprintf(&buf, "%s\r\n", dialog);
-
- final_dialog = av_strdup(buf.str);
- if (!av_bprint_is_complete(&buf) || !final_dialog) {
- av_freep(&final_dialog);
- av_bprint_finalize(&buf, NULL);
- return AVERROR(ENOMEM);
- }
- av_freep(&rect->ass);
- rect->ass = final_dialog;
- }
-
- av_bprint_finalize(&buf, NULL);
- return 0;
-}
-#endif
-
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
- int *got_sub_ptr,
- AVPacket *avpkt)
+ int *got_sub_ptr, const AVPacket *avpkt)
{
- int i, ret = 0;
+ int ret = 0;
if (!avpkt->data && avpkt->size) {
av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
@@ -1088,69 +876,57 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
get_subtitle_defaults(sub);
if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
- AVPacket pkt_recoded = *avpkt;
+ AVCodecInternal *avci = avctx->internal;
+ const AVPacket *pkt;
- ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
+ ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt);
+ if (ret < 0)
+ return ret;
+
+ if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
+ sub->pts = av_rescale_q(avpkt->pts,
+ avctx->pkt_timebase, AV_TIME_BASE_Q);
+ ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
+ if (pkt == avci->buffer_pkt) // did we recode?
+ av_packet_unref(avci->buffer_pkt);
if (ret < 0) {
*got_sub_ptr = 0;
- } else {
- ret = extract_packet_props(avctx->internal, &pkt_recoded);
- if (ret < 0)
- return ret;
-
- if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
- sub->pts = av_rescale_q(avpkt->pts,
- avctx->pkt_timebase, AV_TIME_BASE_Q);
- ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
- av_assert1((ret >= 0) >= !!*got_sub_ptr &&
- !!*got_sub_ptr >= !!sub->num_rects);
-
-#if FF_API_ASS_TIMING
- if (avctx->sub_text_format == FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
- && *got_sub_ptr && sub->num_rects) {
- const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
- : avctx->time_base;
- int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
- if (err < 0)
- ret = err;
- }
-#endif
-
- if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
- avctx->pkt_timebase.num) {
- AVRational ms = { 1, 1000 };
- sub->end_display_time = av_rescale_q(avpkt->duration,
- avctx->pkt_timebase, ms);
- }
+ avsubtitle_free(sub);
+ return ret;
+ }
+ av_assert1(!sub->num_rects || *got_sub_ptr);
- if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
- sub->format = 0;
- else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
- sub->format = 1;
-
- for (i = 0; i < sub->num_rects; i++) {
- if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
- sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
- av_log(avctx, AV_LOG_ERROR,
- "Invalid UTF-8 in decoded subtitles text; "
- "maybe missing -sub_charenc option\n");
- avsubtitle_free(sub);
- ret = AVERROR_INVALIDDATA;
- break;
- }
- }
+ if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
+ avctx->pkt_timebase.num) {
+ AVRational ms = { 1, 1000 };
+ sub->end_display_time = av_rescale_q(avpkt->duration,
+ avctx->pkt_timebase, ms);
+ }
- if (avpkt->data != pkt_recoded.data) { // did we recode?
- /* prevent from destroying side data from original packet */
- pkt_recoded.side_data = NULL;
- pkt_recoded.side_data_elems = 0;
+ if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
+ sub->format = 0;
+ else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
+ sub->format = 1;
- av_packet_unref(&pkt_recoded);
+ for (unsigned i = 0; i < sub->num_rects; i++) {
+ if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
+ sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid UTF-8 in decoded subtitles text; "
+ "maybe missing -sub_charenc option\n");
+ avsubtitle_free(sub);
+ *got_sub_ptr = 0;
+ return AVERROR_INVALIDDATA;
}
}
if (*got_sub_ptr)
- avctx->frame_number++;
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
}
return ret;
@@ -1165,11 +941,11 @@ enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
// If a device was supplied when the codec was opened, assume that the
// user wants to use it.
- if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
+ if (avctx->hw_device_ctx && ffcodec(avctx->codec)->hw_configs) {
AVHWDeviceContext *device_ctx =
(AVHWDeviceContext*)avctx->hw_device_ctx->data;
for (i = 0;; i++) {
- config = &avctx->codec->hw_configs[i]->public;
+ config = &ffcodec(avctx->codec)->hw_configs[i]->public;
if (!config)
break;
if (!(config->methods &
@@ -1281,7 +1057,7 @@ int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
int i, ret;
for (i = 0;; i++) {
- hw_config = avctx->codec->hw_configs[i];
+ hw_config = ffcodec(avctx->codec)->hw_configs[i];
if (!hw_config)
return AVERROR(ENOENT);
if (hw_config->public.pix_fmt == hw_pix_fmt)
@@ -1389,12 +1165,10 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
avctx->sw_pix_fmt = fmt[n - 1];
}
- choices = av_malloc_array(n + 1, sizeof(*choices));
+ choices = av_memdup(fmt, (n + 1) * sizeof(*choices));
if (!choices)
return AV_PIX_FMT_NONE;
- memcpy(choices, fmt, (n + 1) * sizeof(*choices));
-
for (;;) {
// Remove the previous hwaccel, if there was one.
hwaccel_uninit(avctx);
@@ -1427,9 +1201,9 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
break;
}
- if (avctx->codec->hw_configs) {
+ if (ffcodec(avctx->codec)->hw_configs) {
for (i = 0;; i++) {
- hw_config = avctx->codec->hw_configs[i];
+ hw_config = ffcodec(avctx->codec)->hw_configs[i];
if (!hw_config)
break;
if (hw_config->public.pix_fmt == user_choice)
@@ -1505,227 +1279,9 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
return ret;
}
-static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
-{
- FramePool *pool = avctx->internal->pool;
- int i, ret;
-
- switch (avctx->codec_type) {
- case AVMEDIA_TYPE_VIDEO: {
- uint8_t *data[4];
- int linesize[4];
- int size[4] = { 0 };
- int w = frame->width;
- int h = frame->height;
- int tmpsize, unaligned;
-
- if (pool->format == frame->format &&
- pool->width == frame->width && pool->height == frame->height)
- return 0;
-
- avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
-
- do {
- // NOTE: do not align linesizes individually, this breaks e.g. assumptions
- // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
- ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
- if (ret < 0)
- return ret;
- // increase alignment of w for next try (rhs gives the lowest bit set in w)
- w += w & ~(w - 1);
-
- unaligned = 0;
- for (i = 0; i < 4; i++)
- unaligned |= linesize[i] % pool->stride_align[i];
- } while (unaligned);
-
- tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
- NULL, linesize);
- if (tmpsize < 0)
- return tmpsize;
-
- for (i = 0; i < 3 && data[i + 1]; i++)
- size[i] = data[i + 1] - data[i];
- size[i] = tmpsize - (data[i] - data[0]);
-
- for (i = 0; i < 4; i++) {
- av_buffer_pool_uninit(&pool->pools[i]);
- pool->linesize[i] = linesize[i];
- if (size[i]) {
- pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
- CONFIG_MEMORY_POISONING ?
- NULL :
- av_buffer_allocz);
- if (!pool->pools[i]) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
- }
- }
- pool->format = frame->format;
- pool->width = frame->width;
- pool->height = frame->height;
-
- break;
- }
- case AVMEDIA_TYPE_AUDIO: {
- int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
- int planar = av_sample_fmt_is_planar(frame->format);
- int planes = planar ? ch : 1;
-
- if (pool->format == frame->format && pool->planes == planes &&
- pool->channels == ch && frame->nb_samples == pool->samples)
- return 0;
-
- av_buffer_pool_uninit(&pool->pools[0]);
- ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
- frame->nb_samples, frame->format, 0);
- if (ret < 0)
- goto fail;
-
- pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
- if (!pool->pools[0]) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
-
- pool->format = frame->format;
- pool->planes = planes;
- pool->channels = ch;
- pool->samples = frame->nb_samples;
- break;
- }
- default: av_assert0(0);
- }
- return 0;
-fail:
- for (i = 0; i < 4; i++)
- av_buffer_pool_uninit(&pool->pools[i]);
- pool->format = -1;
- pool->planes = pool->channels = pool->samples = 0;
- pool->width = pool->height = 0;
- return ret;
-}
-
-static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
-{
- FramePool *pool = avctx->internal->pool;
- int planes = pool->planes;
- int i;
-
- frame->linesize[0] = pool->linesize[0];
-
- if (planes > AV_NUM_DATA_POINTERS) {
- frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
- frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
- frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
- sizeof(*frame->extended_buf));
- if (!frame->extended_data || !frame->extended_buf) {
- av_freep(&frame->extended_data);
- av_freep(&frame->extended_buf);
- return AVERROR(ENOMEM);
- }
- } else {
- frame->extended_data = frame->data;
- av_assert0(frame->nb_extended_buf == 0);
- }
-
- for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
- frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
- if (!frame->buf[i])
- goto fail;
- frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
- }
- for (i = 0; i < frame->nb_extended_buf; i++) {
- frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
- if (!frame->extended_buf[i])
- goto fail;
- frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
- }
-
- if (avctx->debug & FF_DEBUG_BUFFERS)
- av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
-
- return 0;
-fail:
- av_frame_unref(frame);
- return AVERROR(ENOMEM);
-}
-
-static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
-{
- FramePool *pool = s->internal->pool;
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
- int i;
-
- if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
- av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
- return -1;
- }
-
- if (!desc) {
- av_log(s, AV_LOG_ERROR,
- "Unable to get pixel format descriptor for format %s\n",
- av_get_pix_fmt_name(pic->format));
- return AVERROR(EINVAL);
- }
-
- memset(pic->data, 0, sizeof(pic->data));
- pic->extended_data = pic->data;
-
- for (i = 0; i < 4 && pool->pools[i]; i++) {
- pic->linesize[i] = pool->linesize[i];
-
- pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
- if (!pic->buf[i])
- goto fail;
-
- pic->data[i] = pic->buf[i]->data;
- }
- for (; i < AV_NUM_DATA_POINTERS; i++) {
- pic->data[i] = NULL;
- pic->linesize[i] = 0;
- }
- if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
- ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
- avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
-
- if (s->debug & FF_DEBUG_BUFFERS)
- av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
-
- return 0;
-fail:
- av_frame_unref(pic);
- return AVERROR(ENOMEM);
-}
-
-int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
-{
- int ret;
-
- if (avctx->hw_frames_ctx) {
- ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
- frame->width = avctx->coded_width;
- frame->height = avctx->coded_height;
- return ret;
- }
-
- if ((ret = update_frame_pool(avctx, frame)) < 0)
- return ret;
-
- switch (avctx->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- return video_get_buffer(avctx, frame);
- case AVMEDIA_TYPE_AUDIO:
- return audio_get_buffer(avctx, frame);
- default:
- return -1;
- }
-}
-
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
{
- int size;
+ size_t size;
const uint8_t *side_metadata;
AVDictionary **frame_md = &frame->metadata;
@@ -1735,10 +1291,9 @@ static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
return av_packet_unpack_dictionary(side_metadata, size, frame_md);
}
-int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt)
{
- const AVPacket *pkt = avctx->internal->last_pkt_props;
- int i;
static const struct {
enum AVPacketSideDataType packet;
enum AVFrameSideDataType frame;
@@ -1751,41 +1306,62 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
{ AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
{ AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
{ AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
+ { AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
+ { AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE },
+ { AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS },
};
- if (pkt) {
- frame->pts = pkt->pts;
-#if FF_API_PKT_PTS
-FF_DISABLE_DEPRECATION_WARNINGS
- frame->pkt_pts = pkt->pts;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- frame->pkt_pos = pkt->pos;
- frame->pkt_duration = pkt->duration;
- frame->pkt_size = pkt->size;
-
- for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
- int size;
- uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
- if (packet_sd) {
- AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
- sd[i].frame,
- size);
- if (!frame_sd)
- return AVERROR(ENOMEM);
-
- memcpy(frame_sd->data, packet_sd, size);
- }
+ frame->pts = pkt->pts;
+ frame->pkt_pos = pkt->pos;
+ frame->duration = pkt->duration;
+ frame->pkt_size = pkt->size;
+
+ for (int i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
+ size_t size;
+ uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
+ if (packet_sd) {
+ AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
+ sd[i].frame,
+ size);
+ if (!frame_sd)
+ return AVERROR(ENOMEM);
+
+ memcpy(frame_sd->data, packet_sd, size);
}
- add_metadata_from_side_data(pkt, frame);
+ }
+ add_metadata_from_side_data(pkt, frame);
- if (pkt->flags & AV_PKT_FLAG_DISCARD) {
- frame->flags |= AV_FRAME_FLAG_DISCARD;
- } else {
- frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
- }
+ if (pkt->flags & AV_PKT_FLAG_DISCARD) {
+ frame->flags |= AV_FRAME_FLAG_DISCARD;
+ } else {
+ frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
+ }
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref);
+ if (ret < 0)
+ return ret;
+ frame->opaque = pkt->opaque;
+ }
+
+ return 0;
+}
+
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
+{
+ const AVPacket *pkt = avctx->internal->last_pkt_props;
+
+ if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
+ int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
+ if (ret < 0)
+ return ret;
+ frame->pkt_size = (int)(intptr_t)pkt->opaque;
}
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
frame->reordered_opaque = avctx->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
frame->color_primaries = avctx->color_primaries;
@@ -1819,25 +1395,18 @@ FF_ENABLE_DEPRECATION_WARNINGS
frame->sample_rate = avctx->sample_rate;
if (frame->format < 0)
frame->format = avctx->sample_fmt;
- if (!frame->channel_layout) {
- if (avctx->channel_layout) {
- if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
- avctx->channels) {
- av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
- "configuration.\n");
- return AVERROR(EINVAL);
- }
-
- frame->channel_layout = avctx->channel_layout;
- } else {
- if (avctx->channels > FF_SANE_NB_CHANNELS) {
- av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
- avctx->channels);
- return AVERROR(ENOSYS);
- }
- }
+ if (!frame->ch_layout.nb_channels) {
+ int ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret < 0)
+ return ret;
}
- frame->channels = avctx->channels;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->channels = frame->ch_layout.nb_channels;
+ frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ frame->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
break;
}
return 0;
@@ -1852,8 +1421,6 @@ static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
int flags = desc ? desc->flags : 0;
if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
num_planes = 2;
- if ((flags & FF_PSEUDOPAL) && frame->data[1])
- num_planes = 2;
for (i = 0; i < num_planes; i++) {
av_assert0(frame->data[i]);
}
@@ -1903,17 +1470,20 @@ int ff_attach_decode_data(AVFrame *frame)
return 0;
}
-static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
{
const AVHWAccel *hwaccel = avctx->hwaccel;
int override_dimensions = 1;
int ret;
+ av_assert0(av_codec_is_decoder(avctx->codec));
+
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
(ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
- return AVERROR(EINVAL);
+ ret = AVERROR(EINVAL);
+ goto fail;
}
if (frame->width <= 0 || frame->height <= 0) {
@@ -1924,12 +1494,28 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
- return AVERROR(EINVAL);
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ /* compat layer for old-style get_buffer() implementations */
+ avctx->channels = avctx->ch_layout.nb_channels;
+ avctx->channel_layout = (avctx->ch_layout.order == AV_CHANNEL_ORDER_NATIVE) ?
+ avctx->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (frame->nb_samples * (int64_t)avctx->ch_layout.nb_channels > avctx->max_samples) {
+ av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
+ ret = AVERROR(EINVAL);
+ goto fail;
}
}
ret = ff_decode_frame_props(avctx, frame);
if (ret < 0)
- return ret;
+ goto fail;
if (hwaccel) {
if (hwaccel->alloc_frame) {
@@ -1941,38 +1527,31 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
ret = avctx->get_buffer2(avctx, frame, flags);
if (ret < 0)
- goto end;
+ goto fail;
validate_avframe_allocation(avctx, frame);
ret = ff_attach_decode_data(frame);
if (ret < 0)
- goto end;
+ goto fail;
end:
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
- !(avctx->codec->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
+ !(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
frame->width = avctx->width;
frame->height = avctx->height;
}
- if (ret < 0)
- av_frame_unref(frame);
-
- return ret;
-}
-
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
-{
- int ret = get_buffer_internal(avctx, frame, flags);
+fail:
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- frame->width = frame->height = 0;
+ av_frame_unref(frame);
}
+
return ret;
}
-static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
+static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
{
AVFrame *tmp;
int ret;
@@ -1988,7 +1567,7 @@ static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
if (!frame->data[0])
return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
- if (av_frame_is_writable(frame))
+ if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
return ff_decode_frame_props(avctx, frame);
tmp = av_frame_alloc();
@@ -2009,55 +1588,100 @@ static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
return 0;
}
-int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
{
- int ret = reget_buffer_internal(avctx, frame);
+ int ret = reget_buffer_internal(avctx, frame, flags);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
-static void bsfs_flush(AVCodecContext *avctx)
+int ff_decode_preinit(AVCodecContext *avctx)
{
- DecodeFilterContext *s = &avctx->internal->filter;
+ AVCodecInternal *avci = avctx->internal;
+ int ret = 0;
- for (int i = 0; i < s->nb_bsfs; i++)
- av_bsf_flush(s->bsfs[i]);
-}
+ /* if the decoder init function was already called previously,
+ * free the already allocated subtitle_header before overwriting it */
+ av_freep(&avctx->subtitle_header);
-void avcodec_flush_buffers(AVCodecContext *avctx)
-{
- avctx->internal->draining = 0;
- avctx->internal->draining_done = 0;
- avctx->internal->nb_draining_errors = 0;
- av_frame_unref(avctx->internal->buffer_frame);
- av_frame_unref(avctx->internal->compat_decode_frame);
- av_packet_unref(avctx->internal->buffer_pkt);
- avctx->internal->buffer_pkt_valid = 0;
-
- av_packet_unref(avctx->internal->ds.in_pkt);
+ if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
+ av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
+ avctx->codec->max_lowres);
+ avctx->lowres = avctx->codec->max_lowres;
+ }
+ if (avctx->sub_charenc) {
+ if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
+ av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
+ "supported with subtitles codecs\n");
+ return AVERROR(EINVAL);
+ } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
+ av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
+ "subtitles character encoding will be ignored\n",
+ avctx->codec_descriptor->name);
+ avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
+ } else {
+ /* input character encoding is set for a text based subtitle
+ * codec at this point */
+ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
+ avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
- if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
- ff_thread_flush(avctx);
- else if (avctx->codec->flush)
- avctx->codec->flush(avctx);
+ if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
+#if CONFIG_ICONV
+ iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
+ if (cd == (iconv_t)-1) {
+ ret = AVERROR(errno);
+ av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
+ "with input character encoding \"%s\"\n", avctx->sub_charenc);
+ return ret;
+ }
+ iconv_close(cd);
+#else
+ av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
+ "conversion needs a libavcodec built with iconv support "
+ "for this codec\n");
+ return AVERROR(ENOSYS);
+#endif
+ }
+ }
+ }
+ avctx->pts_correction_num_faulty_pts =
+ avctx->pts_correction_num_faulty_dts = 0;
avctx->pts_correction_last_pts =
avctx->pts_correction_last_dts = INT64_MIN;
- bsfs_flush(avctx);
+ if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
+ && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
+ av_log(avctx, AV_LOG_WARNING,
+ "gray decoding requested but not enabled at configuration time\n");
+ if (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
+ avctx->export_side_data |= AV_CODEC_EXPORT_DATA_MVS;
+ }
+
+ avci->in_pkt = av_packet_alloc();
+ avci->last_pkt_props = av_packet_alloc();
+ if (!avci->in_pkt || !avci->last_pkt_props)
+ return AVERROR(ENOMEM);
- if (!avctx->refcounted_frames)
- av_frame_unref(avctx->internal->to_free);
+ ret = decode_bsfs_init(avctx);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
-void ff_decode_bsfs_uninit(AVCodecContext *avctx)
+int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
{
- DecodeFilterContext *s = &avctx->internal->filter;
- int i;
+ size_t size;
+ const void *pal = av_packet_get_side_data(src, AV_PKT_DATA_PALETTE, &size);
- for (i = 0; i < s->nb_bsfs; i++)
- av_bsf_free(&s->bsfs[i]);
- av_freep(&s->bsfs);
- s->nb_bsfs = 0;
+ if (pal && size == AVPALETTE_SIZE) {
+ memcpy(dst, pal, AVPALETTE_SIZE);
+ return 1;
+ } else if (pal) {
+ av_log(logctx, AV_LOG_ERROR,
+ "Palette size %"SIZE_SPECIFIER" is wrong\n", size);
+ }
+ return 0;
}
diff --git a/media/ffvpx/libavcodec/decode.h b/media/ffvpx/libavcodec/decode.h
index c3e0e82f4c..8430ffbd66 100644
--- a/media/ffvpx/libavcodec/decode.h
+++ b/media/ffvpx/libavcodec/decode.h
@@ -54,6 +54,11 @@ typedef struct FrameDecodeData {
} FrameDecodeData;
/**
+ * avcodec_receive_frame() implementation for decoders.
+ */
+int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
* Called by decoders to get the next packet for decoding.
*
* @param pkt An empty packet to be filled with data.
@@ -64,9 +69,16 @@ typedef struct FrameDecodeData {
*/
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt);
-int ff_decode_bsfs_init(AVCodecContext *avctx);
+/**
+ * Set various frame properties from the provided packet.
+ */
+int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
+ AVFrame *frame, const AVPacket *pkt);
-void ff_decode_bsfs_uninit(AVCodecContext *avctx);
+/**
+ * Set various frame properties from the codec context / packet data.
+ */
+int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame);
/**
* Make sure avctx.hw_frames_ctx is set. If it's not set, the function will
@@ -78,4 +90,64 @@ int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
int ff_attach_decode_data(AVFrame *frame);
+/**
+ * Check whether the side-data of src contains a palette of
+ * size AVPALETTE_SIZE; if so, copy it to dst and return 1;
+ * else return 0.
+ * Also emit an error message upon encountering a palette
+ * with invalid size.
+ */
+int ff_copy_palette(void *dst, const AVPacket *src, void *logctx);
+
+/**
+ * Perform decoder initialization and validation.
+ * Called when opening the decoder, before the FFCodec.init() call.
+ */
+int ff_decode_preinit(AVCodecContext *avctx);
+
+/**
+ * Check that the provided frame dimensions are valid and set them on the codec
+ * context.
+ */
+int ff_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Check that the provided sample aspect ratio is valid and set it on the codec
+ * context.
+ */
+int ff_set_sar(AVCodecContext *avctx, AVRational sar);
+
+/**
+ * Select the (possibly hardware accelerated) pixel format.
+ * This is a wrapper around AVCodecContext.get_format() and should be used
+ * instead of calling get_format() directly.
+ *
+ * The list of pixel formats must contain at least one valid entry, and is
+ * terminated with AV_PIX_FMT_NONE. If it is possible to decode to software,
+ * the last entry in the list must be the most accurate software format.
+ * If it is not possible to decode to software, AVCodecContext.sw_pix_fmt
+ * must be set before calling this function.
+ */
+int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
+
+/**
+ * Get a buffer for a frame. This is a wrapper around
+ * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
+ * directly.
+ */
+int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+#define FF_REGET_BUFFER_FLAG_READONLY 1 ///< the returned buffer does not need to be writable
+/**
+ * Identical in function to ff_get_buffer(), except it reuses the existing buffer
+ * if available.
+ */
+int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
+
+/**
+ * Add or update AV_FRAME_DATA_MATRIXENCODING side data.
+ */
+int ff_side_data_update_matrix_encoding(AVFrame *frame,
+ enum AVMatrixEncoding matrix_encoding);
+
#endif /* AVCODEC_DECODE_H */
diff --git a/media/ffvpx/libavcodec/defs.h b/media/ffvpx/libavcodec/defs.h
new file mode 100644
index 0000000000..fbe3254db2
--- /dev/null
+++ b/media/ffvpx/libavcodec/defs.h
@@ -0,0 +1,192 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DEFS_H
+#define AVCODEC_DEFS_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Misc types and constants that do not belong anywhere else.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define AV_INPUT_BUFFER_PADDING_SIZE 64
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the format) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder/demuxer returning an error.
+ */
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
+#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder/muxer should not do as an error
+
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan {
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+} AVPanScan;
+
+/**
+ * This structure describes the bitrate properties of an encoded bitstream. It
+ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
+ * parameters for H.264/HEVC.
+ */
+typedef struct AVCPBProperties {
+ /**
+ * Maximum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t max_bitrate;
+ /**
+ * Minimum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t min_bitrate;
+ /**
+ * Average bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int64_t avg_bitrate;
+
+ /**
+ * The size of the buffer to which the ratecontrol is applied, in bits.
+ * Zero if unknown or unspecified.
+ */
+ int64_t buffer_size;
+
+ /**
+ * The delay between the time the packet this structure is associated with
+ * is received and the time when it should be decoded, in periods of a 27MHz
+ * clock.
+ *
+ * UINT64_MAX when unknown or unspecified.
+ */
+ uint64_t vbv_delay;
+} AVCPBProperties;
+
+/**
+ * Allocate a CPB properties structure and initialize its fields to default
+ * values.
+ *
+ * @param size if non-NULL, the size of the allocated struct will be written
+ * here. This is useful for embedding it in side data.
+ *
+ * @return the newly allocated struct or NULL on failure
+ */
+AVCPBProperties *av_cpb_properties_alloc(size_t *size);
+
+/**
+ * This structure supplies correlation between a packet timestamp and a wall clock
+ * production time. The definition follows the Producer Reference Time ('prft')
+ * as defined in ISO/IEC 14496-12
+ */
+typedef struct AVProducerReferenceTime {
+ /**
+ * A UTC timestamp, in microseconds, since Unix epoch (e.g, av_gettime()).
+ */
+ int64_t wallclock;
+ int flags;
+} AVProducerReferenceTime;
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#endif // AVCODEC_DEFS_H
diff --git a/media/ffvpx/libavcodec/dummy_funcs.c b/media/ffvpx/libavcodec/dummy_funcs.c
index c6e4f9fe78..c224dc199f 100644
--- a/media/ffvpx/libavcodec/dummy_funcs.c
+++ b/media/ffvpx/libavcodec/dummy_funcs.c
@@ -4,6 +4,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "avcodec.h"
+#include "bsf.h"
+#include "bsf_internal.h"
typedef struct FFTContext FFTContext;
typedef struct H264PredContext H264PredContext;
@@ -823,20 +825,20 @@ AVCodecParser ff_vp3_parser;
AVCodecParser ff_sipr_parser;
AVCodecParser ff_xma_parser;
-AVBitStreamFilter ff_aac_adtstoasc_bsf;
-AVBitStreamFilter ff_chomp_bsf;
-AVBitStreamFilter ff_dump_extradata_bsf;
-AVBitStreamFilter ff_h264_mp4toannexb_bsf;
-AVBitStreamFilter ff_hevc_mp4toannexb_bsf;
-AVBitStreamFilter ff_imx_dump_header_bsf;
-AVBitStreamFilter ff_mjpeg2jpeg_bsf;
-AVBitStreamFilter ff_mjpega_dump_header_bsf;
-AVBitStreamFilter ff_mp3_header_decompress_bsf;
-AVBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
-AVBitStreamFilter ff_mov2textsub_bsf;
-AVBitStreamFilter ff_noise_bsf;
-AVBitStreamFilter ff_remove_extradata_bsf;
-AVBitStreamFilter ff_text2movsub_bsf;
+FFBitStreamFilter ff_aac_adtstoasc_bsf;
+FFBitStreamFilter ff_chomp_bsf;
+FFBitStreamFilter ff_dump_extradata_bsf;
+FFBitStreamFilter ff_h264_mp4toannexb_bsf;
+FFBitStreamFilter ff_hevc_mp4toannexb_bsf;
+FFBitStreamFilter ff_imx_dump_header_bsf;
+FFBitStreamFilter ff_mjpeg2jpeg_bsf;
+FFBitStreamFilter ff_mjpega_dump_header_bsf;
+FFBitStreamFilter ff_mp3_header_decompress_bsf;
+FFBitStreamFilter ff_mpeg4_unpack_bframes_bsf;
+FFBitStreamFilter ff_mov2textsub_bsf;
+FFBitStreamFilter ff_noise_bsf;
+FFBitStreamFilter ff_remove_extradata_bsf;
+FFBitStreamFilter ff_text2movsub_bsf;
void ff_fft_init_aarch64(FFTContext *s) {}
void ff_fft_init_arm(FFTContext *s) {}
diff --git a/media/ffvpx/libavcodec/encode.c b/media/ffvpx/libavcodec/encode.c
new file mode 100644
index 0000000000..041fc7670e
--- /dev/null
+++ b/media/ffvpx/libavcodec/encode.c
@@ -0,0 +1,774 @@
+/*
+ * generic encoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/frame.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/samplefmt.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "encode.h"
+#include "frame_thread_encoder.h"
+#include "internal.h"
+
+int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
+{
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
+ size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
+ return AVERROR(EINVAL);
+ }
+
+ av_assert0(!avpkt->data);
+
+ av_fast_padded_malloc(&avctx->internal->byte_buffer,
+ &avctx->internal->byte_buffer_size, size);
+ avpkt->data = avctx->internal->byte_buffer;
+ if (!avpkt->data) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
+ return AVERROR(ENOMEM);
+ }
+ avpkt->size = size;
+
+ return 0;
+}
+
+int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
+{
+ int ret;
+
+ if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ if (avpkt->data || avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
+ return AVERROR(EINVAL);
+ }
+
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
+ return ret;
+ }
+ avpkt->data = avpkt->buf->data;
+
+ return 0;
+}
+
+int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
+{
+ int ret;
+
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR(EINVAL);
+
+ av_assert0(!avpkt->data && !avpkt->buf);
+
+ avpkt->size = size;
+ ret = avctx->get_encode_buffer(avctx, avpkt, flags);
+ if (ret < 0)
+ goto fail;
+
+ if (!avpkt->data || !avpkt->buf) {
+ av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ ret = 0;
+fail:
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
+ av_packet_unref(avpkt);
+ }
+
+ return ret;
+}
+
+static int encode_make_refcounted(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ uint8_t *data = avpkt->data;
+ int ret;
+
+ if (avpkt->buf)
+ return 0;
+
+ avpkt->data = NULL;
+ ret = ff_get_encode_buffer(avctx, avpkt, avpkt->size, 0);
+ if (ret < 0)
+ return ret;
+ memcpy(avpkt->data, data, avpkt->size);
+
+ return 0;
+}
+
+/**
+ * Pad last frame with silence.
+ */
+static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src, int out_samples)
+{
+ int ret;
+
+ frame->format = src->format;
+ frame->nb_samples = out_samples;
+ ret = av_channel_layout_copy(&frame->ch_layout, &s->ch_layout);
+ if (ret < 0)
+ goto fail;
+ ret = av_frame_get_buffer(frame, 0);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_frame_copy_props(frame, src);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
+ src->nb_samples, s->ch_layout.nb_channels,
+ s->sample_fmt)) < 0)
+ goto fail;
+ if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
+ frame->nb_samples - src->nb_samples,
+ s->ch_layout.nb_channels, s->sample_fmt)) < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ av_frame_unref(frame);
+ s->internal->last_audio_frame = 0;
+ return ret;
+}
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub)
+{
+ int ret;
+ if (sub->start_display_time) {
+ av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
+ return -1;
+ }
+
+ ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ return ret;
+}
+
+int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (!avci->buffer_frame->buf[0])
+ return AVERROR(EAGAIN);
+
+ av_frame_move_ref(frame, avci->buffer_frame);
+
+ return 0;
+}
+
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame)
+{
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->reordered_opaque = frame->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+ int ret = av_buffer_replace(&pkt->opaque_ref, frame->opaque_ref);
+ if (ret < 0)
+ return ret;
+ pkt->opaque = frame->opaque;
+ }
+
+ return 0;
+}
+
+int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
+ AVFrame *frame, int *got_packet)
+{
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int ret;
+
+ ret = codec->cb.encode(avctx, avpkt, frame, got_packet);
+ emms_c();
+ av_assert0(ret <= 0);
+
+ if (!ret && *got_packet) {
+ if (avpkt->data) {
+ ret = encode_make_refcounted(avctx, avpkt);
+ if (ret < 0)
+ goto unref;
+ // Date returned by encoders must always be ref-counted
+ av_assert0(avpkt->buf);
+ }
+
+ // set the timestamps for the simple no-delay case
+ // encoders with delay have to set the timestamps themselves
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (frame && (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))) {
+ if (avpkt->pts == AV_NOPTS_VALUE)
+ avpkt->pts = frame->pts;
+
+ if (!avpkt->duration) {
+ if (frame->duration)
+ avpkt->duration = frame->duration;
+ else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ avpkt->duration = ff_samples_to_time_base(avctx,
+ frame->nb_samples);
+ }
+ }
+
+ ret = ff_encode_reordered_opaque(avctx, avpkt, frame);
+ if (ret < 0)
+ goto unref;
+ }
+
+ // dts equals pts unless there is reordering
+ // there can be no reordering if there is no encoder delay
+ if (!(avctx->codec_descriptor->props & AV_CODEC_PROP_REORDER) ||
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) ||
+ (codec->caps_internal & FF_CODEC_CAP_EOF_FLUSH))
+ avpkt->dts = avpkt->pts;
+ } else {
+unref:
+ av_packet_unref(avpkt);
+ }
+
+ if (frame)
+ av_frame_unref(frame);
+
+ return ret;
+}
+
+static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVFrame *frame = avci->in_frame;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ int got_packet;
+ int ret;
+
+ if (avci->draining_done)
+ return AVERROR_EOF;
+
+ if (!frame->buf[0] && !avci->draining) {
+ av_frame_unref(frame);
+ ret = ff_encode_get_frame(avctx, frame);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ if (!frame->buf[0]) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avci->frame_thread_encoder))
+ return AVERROR_EOF;
+
+ // Flushing is signaled with a NULL frame
+ frame = NULL;
+ }
+
+ got_packet = 0;
+
+ av_assert0(codec->cb_type == FF_CODEC_CB_TYPE_ENCODE);
+
+ if (CONFIG_FRAME_THREAD_ENCODER && avci->frame_thread_encoder)
+ /* This will unref frame. */
+ ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
+ else {
+ ret = ff_encode_encode_cb(avctx, avpkt, frame, &got_packet);
+ }
+
+ if (avci->draining && !got_packet)
+ avci->draining_done = 1;
+
+ return ret;
+}
+
+static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ int ret;
+
+ while (!avpkt->data && !avpkt->side_data) {
+ ret = encode_simple_internal(avctx, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (avci->draining_done)
+ return AVERROR_EOF;
+
+ av_assert0(!avpkt->data && !avpkt->side_data);
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
+ avctx->stats_out[0] = '\0';
+ if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
+ return AVERROR(EINVAL);
+ }
+
+ if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET) {
+ ret = ffcodec(avctx->codec)->cb.receive_packet(avctx, avpkt);
+ if (ret < 0)
+ av_packet_unref(avpkt);
+ else
+ // Encoders must always return ref-counted buffers.
+ // Side-data only packets have no data and can be not ref-counted.
+ av_assert0(!avpkt->data || avpkt->buf);
+ } else
+ ret = encode_simple_receive_packet(avctx, avpkt);
+ if (ret >= 0)
+ avpkt->flags |= avci->intra_only_flag;
+
+ if (ret == AVERROR_EOF)
+ avci->draining_done = 1;
+
+ return ret;
+}
+
+#if CONFIG_LCMS2
+static int encode_generate_icc_profile(AVCodecContext *avctx, AVFrame *frame)
+{
+ enum AVColorTransferCharacteristic trc = frame->color_trc;
+ enum AVColorPrimaries prim = frame->color_primaries;
+ const FFCodec *const codec = ffcodec(avctx->codec);
+ AVCodecInternal *avci = avctx->internal;
+ cmsHPROFILE profile;
+ int ret;
+
+ /* don't generate ICC profiles if disabled or unsupported */
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES))
+ return 0;
+ if (!(codec->caps_internal & FF_CODEC_CAP_ICC_PROFILES))
+ return 0;
+
+ if (trc == AVCOL_TRC_UNSPECIFIED)
+ trc = avctx->color_trc;
+ if (prim == AVCOL_PRI_UNSPECIFIED)
+ prim = avctx->color_primaries;
+ if (trc == AVCOL_TRC_UNSPECIFIED || prim == AVCOL_PRI_UNSPECIFIED)
+ return 0; /* can't generate ICC profile with missing csp tags */
+
+ if (av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE))
+ return 0; /* don't overwrite existing ICC profile */
+
+ if (!avci->icc.avctx) {
+ ret = ff_icc_context_init(&avci->icc, avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = ff_icc_profile_generate(&avci->icc, prim, trc, &profile);
+ if (ret < 0)
+ return ret;
+
+ ret = ff_icc_profile_attach(&avci->icc, profile, frame);
+ cmsCloseProfile(profile);
+ return ret;
+}
+#else /* !CONFIG_LCMS2 */
+static int encode_generate_icc_profile(av_unused AVCodecContext *c, av_unused AVFrame *f)
+{
+ return 0;
+}
+#endif
+
+static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
+{
+ AVCodecInternal *avci = avctx->internal;
+ AVFrame *dst = avci->buffer_frame;
+ int ret;
+
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ /* extract audio service type metadata */
+ AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
+ if (sd && sd->size >= sizeof(enum AVAudioServiceType))
+ avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
+
+ /* check for valid frame size */
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+ /* if we already got an undersized frame, that must have been the last */
+ if (avctx->internal->last_audio_frame) {
+ av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
+ return AVERROR(EINVAL);
+ }
+ if (src->nb_samples > avctx->frame_size) {
+ av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) > frame_size (%d)\n", src->nb_samples, avctx->frame_size);
+ return AVERROR(EINVAL);
+ }
+ if (src->nb_samples < avctx->frame_size) {
+ avctx->internal->last_audio_frame = 1;
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)) {
+ int pad_samples = avci->pad_samples ? avci->pad_samples : avctx->frame_size;
+ int out_samples = (src->nb_samples + pad_samples - 1) / pad_samples * pad_samples;
+
+ if (out_samples != src->nb_samples) {
+ ret = pad_last_frame(avctx, dst, src, out_samples);
+ if (ret < 0)
+ return ret;
+ goto finish;
+ }
+ }
+ }
+ }
+ }
+
+ ret = av_frame_ref(dst, src);
+ if (ret < 0)
+ return ret;
+
+finish:
+
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (dst->pkt_duration && dst->pkt_duration != dst->duration)
+ dst->duration = dst->pkt_duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ ret = encode_generate_icc_profile(avctx, dst);
+ if (ret < 0)
+ return ret;
+ }
+
+ // unset frame duration unless AV_CODEC_FLAG_FRAME_DURATION is set,
+ // since otherwise we cannot be sure that whatever value it has is in the
+ // right timebase, so we would produce an incorrect value, which is worse
+ // than none at all
+ if (!(avctx->flags & AV_CODEC_FLAG_FRAME_DURATION))
+ dst->duration = 0;
+
+ return 0;
+}
+
+int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (avci->buffer_frame->buf[0])
+ return AVERROR(EAGAIN);
+
+ if (!frame) {
+ avci->draining = 1;
+ } else {
+ ret = encode_send_frame_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
+ ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
+ if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ avctx->frame_num++;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->frame_number = avctx->frame_num;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ return 0;
+}
+
+int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_packet_unref(avpkt);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
+ av_packet_move_ref(avpkt, avci->buffer_pkt);
+ } else {
+ ret = encode_receive_packet_internal(avctx, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int encode_preinit_video(AVCodecContext *avctx)
+{
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ int i;
+
+ if (avctx->codec->pix_fmts) {
+ for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
+ if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
+ break;
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
+ avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ }
+
+ if ( avctx->bits_per_raw_sample < 0
+ || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
+ av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
+ avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
+ avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
+ }
+ if (avctx->width <= 0 || avctx->height <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->ticks_per_frame && avctx->time_base.num &&
+ avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
+ av_log(avctx, AV_LOG_ERROR,
+ "ticks_per_frame %d too large for the timebase %d/%d.",
+ avctx->ticks_per_frame,
+ avctx->time_base.num,
+ avctx->time_base.den);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->format != avctx->pix_fmt) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
+ return AVERROR(EINVAL);
+ }
+ if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
+ avctx->sw_pix_fmt != frames_ctx->sw_format) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Mismatching AVCodecContext.sw_pix_fmt (%s) "
+ "and AVHWFramesContext.sw_format (%s)\n",
+ av_get_pix_fmt_name(avctx->sw_pix_fmt),
+ av_get_pix_fmt_name(frames_ctx->sw_format));
+ return AVERROR(EINVAL);
+ }
+ avctx->sw_pix_fmt = frames_ctx->sw_format;
+ }
+
+ return 0;
+}
+
+static int encode_preinit_audio(AVCodecContext *avctx)
+{
+ int i;
+
+ if (avctx->codec->sample_fmts) {
+ for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
+ if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
+ break;
+ if (avctx->ch_layout.nb_channels == 1 &&
+ av_get_planar_sample_fmt(avctx->sample_fmt) ==
+ av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
+ avctx->sample_fmt = avctx->codec->sample_fmts[i];
+ break;
+ }
+ }
+ if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
+ av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
+ (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->codec->supported_samplerates) {
+ for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
+ if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
+ break;
+ if (avctx->codec->supported_samplerates[i] == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ }
+ if (avctx->sample_rate < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
+ avctx->sample_rate);
+ return AVERROR(EINVAL);
+ }
+ if (avctx->codec->ch_layouts) {
+ for (i = 0; avctx->codec->ch_layouts[i].nb_channels; i++) {
+ if (!av_channel_layout_compare(&avctx->ch_layout, &avctx->codec->ch_layouts[i]))
+ break;
+ }
+ if (!avctx->codec->ch_layouts[i].nb_channels) {
+ char buf[512];
+ int ret = av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf));
+ if (ret > 0)
+ av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ if (!avctx->bits_per_raw_sample)
+ avctx->bits_per_raw_sample = 8 * av_get_bytes_per_sample(avctx->sample_fmt);
+
+ return 0;
+}
+
+int ff_encode_preinit(AVCodecContext *avctx)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret = 0;
+
+ if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE)) {
+ av_log(avctx, AV_LOG_ERROR, "The copy_opaque flag is set, but the "
+ "encoder does not support it.\n");
+ return AVERROR(EINVAL);
+ }
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: ret = encode_preinit_video(avctx); break;
+ case AVMEDIA_TYPE_AUDIO: ret = encode_preinit_audio(avctx); break;
+ }
+ if (ret < 0)
+ return ret;
+
+ if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
+ && avctx->bit_rate>0 && avctx->bit_rate<1000) {
+ av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
+ }
+
+ if (!avctx->rc_initial_buffer_occupancy)
+ avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
+
+ if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY)
+ avctx->internal->intra_only_flag = AV_PKT_FLAG_KEY;
+
+ if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_ENCODE) {
+ avci->in_frame = av_frame_alloc();
+ if (!avci->in_frame)
+ return AVERROR(ENOMEM);
+ }
+
+ if ((avctx->flags & AV_CODEC_FLAG_RECON_FRAME)) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_ENCODER_RECON_FRAME)) {
+ av_log(avctx, AV_LOG_ERROR, "Reconstructed frame output requested "
+ "from an encoder not supporting it\n");
+ return AVERROR(ENOSYS);
+ }
+
+ avci->recon_frame = av_frame_alloc();
+ if (!avci->recon_frame)
+ return AVERROR(ENOMEM);
+ }
+
+ if (CONFIG_FRAME_THREAD_ENCODER) {
+ ret = ff_frame_thread_encoder_init(avctx);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ int ret;
+
+ switch (avctx->codec->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ frame->format = avctx->pix_fmt;
+ if (frame->width <= 0 || frame->height <= 0) {
+ frame->width = FFMAX(avctx->width, avctx->coded_width);
+ frame->height = FFMAX(avctx->height, avctx->coded_height);
+ }
+
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ frame->sample_rate = avctx->sample_rate;
+ frame->format = avctx->sample_fmt;
+ if (!frame->ch_layout.nb_channels) {
+ ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ }
+
+ ret = avcodec_default_get_buffer2(avctx, frame, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ av_frame_unref(frame);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+
+ if (!avci->recon_frame)
+ return AVERROR(EINVAL);
+ if (!avci->recon_frame->buf[0])
+ return avci->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
+
+ av_frame_move_ref(frame, avci->recon_frame);
+ return 0;
+}
diff --git a/media/ffvpx/libavcodec/encode.h b/media/ffvpx/libavcodec/encode.h
new file mode 100644
index 0000000000..26a3304045
--- /dev/null
+++ b/media/ffvpx/libavcodec/encode.h
@@ -0,0 +1,99 @@
+/*
+ * generic encoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ENCODE_H
+#define AVCODEC_ENCODE_H
+
+#include "libavutil/frame.h"
+
+#include "avcodec.h"
+#include "packet.h"
+
+/**
+ * avcodec_receive_frame() implementation for encoders.
+ */
+int ff_encode_receive_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Called by encoders to get the next frame for encoding.
+ *
+ * @param frame An empty frame to be filled with data.
+ * @return 0 if a new reference has been successfully written to frame
+ * AVERROR(EAGAIN) if no data is currently available
+ * AVERROR_EOF if end of stream has been reached, so no more data
+ * will be available
+ */
+int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Get a buffer for a packet. This is a wrapper around
+ * AVCodecContext.get_encode_buffer() and should be used instead calling get_encode_buffer()
+ * directly.
+ */
+int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags);
+
+/**
+ * Allocate buffers for a frame. Encoder equivalent to ff_get_buffer().
+ */
+int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
+
+/**
+ * Check AVPacket size and allocate data.
+ *
+ * Encoders of type FF_CODEC_CB_TYPE_ENCODE can use this as a convenience to
+ * obtain a big enough buffer for the encoded bitstream.
+ *
+ * @param avctx the AVCodecContext of the encoder
+ * @param avpkt The AVPacket: on success, avpkt->data will point to a buffer
+ * of size at least `size`; the packet will not be refcounted.
+ * This packet must be initially blank.
+ * @param size an upper bound of the size of the packet to encode
+ * @return non negative on success, negative error code on failure
+ */
+int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size);
+
+/**
+ * Propagate user opaque values from the frame to avctx/pkt as needed.
+ */
+int ff_encode_reordered_opaque(AVCodecContext *avctx,
+ AVPacket *pkt, const AVFrame *frame);
+
+/*
+ * Perform encoder initialization and validation.
+ * Called when opening the encoder, before the FFCodec.init() call.
+ */
+int ff_encode_preinit(AVCodecContext *avctx);
+
+int ff_encode_encode_cb(AVCodecContext *avctx, AVPacket *avpkt,
+ AVFrame *frame, int *got_packet);
+
+/**
+ * Rescale from sample rate to AVCodecContext.time_base.
+ */
+static av_always_inline int64_t ff_samples_to_time_base(const AVCodecContext *avctx,
+ int64_t samples)
+{
+ if (samples == AV_NOPTS_VALUE)
+ return AV_NOPTS_VALUE;
+ return av_rescale_q(samples, (AVRational){ 1, avctx->sample_rate },
+ avctx->time_base);
+}
+
+#endif /* AVCODEC_ENCODE_H */
diff --git a/media/ffvpx/libavcodec/error_resilience.h b/media/ffvpx/libavcodec/error_resilience.h
index 664a765659..47cc8a4fc6 100644
--- a/media/ffvpx/libavcodec/error_resilience.h
+++ b/media/ffvpx/libavcodec/error_resilience.h
@@ -24,7 +24,7 @@
#include "avcodec.h"
#include "me_cmp.h"
-#include "thread.h"
+#include "threadframe.h"
///< current MB is the first after a resync marker
#define VP_START 1
@@ -52,7 +52,8 @@ typedef struct ERPicture {
typedef struct ERContext {
AVCodecContext *avctx;
- MECmpContext mecc;
+
+ me_cmp_func sad;
int mecc_inited;
int *mb_index2xy;
@@ -74,14 +75,13 @@ typedef struct ERContext {
ERPicture last_pic;
ERPicture next_pic;
- AVBufferRef *ref_index_buf[2];
- AVBufferRef *motion_val_buf[2];
+ int8_t *ref_index[2];
+ int16_t (*motion_val_base[2])[2];
uint16_t pp_time;
uint16_t pb_time;
int quarter_sample;
int partitioned_frame;
- int ref_count;
void (*decode_mb)(void *opaque, int ref, int mv_dir, int mv_type,
int (*mv)[2][4][2],
diff --git a/media/ffvpx/libavcodec/fft-internal.h b/media/ffvpx/libavcodec/fft-internal.h
index 0a8f7d05cf..d89a3e38ca 100644
--- a/media/ffvpx/libavcodec/fft-internal.h
+++ b/media/ffvpx/libavcodec/fft-internal.h
@@ -19,6 +19,9 @@
#ifndef AVCODEC_FFT_INTERNAL_H
#define AVCODEC_FFT_INTERNAL_H
+#include "libavutil/mathematics.h"
+#include "fft.h"
+
#if FFT_FLOAT
#define FIX15(v) (v)
@@ -34,11 +37,7 @@
(dim) = (are) * (bim) + (aim) * (bre); \
} while (0)
-#else
-
-#define SCALE_FLOAT(a, bits) lrint((a) * (double)(1 << (bits)))
-
-#if FFT_FIXED_32
+#else /* FFT_FLOAT */
#define CMUL(dre, dim, are, aim, bre, bim) do { \
int64_t accu; \
@@ -50,37 +49,6 @@
(dim) = (int)(((accu) + 0x40000000) >> 31); \
} while (0)
-#define FIX15(a) av_clip(SCALE_FLOAT(a, 31), -2147483647, 2147483647)
-
-#else /* FFT_FIXED_32 */
-
-#include "fft.h"
-#include "mathops.h"
-
-void ff_mdct_calcw_c(FFTContext *s, FFTDouble *output, const FFTSample *input);
-
-#define FIX15(a) av_clip(SCALE_FLOAT(a, 15), -32767, 32767)
-
-#define sqrthalf ((int16_t)((1<<15)*M_SQRT1_2))
-
-#define BF(x, y, a, b) do { \
- x = (a - b) >> 1; \
- y = (a + b) >> 1; \
- } while (0)
-
-#define CMULS(dre, dim, are, aim, bre, bim, sh) do { \
- (dre) = (MUL16(are, bre) - MUL16(aim, bim)) >> sh; \
- (dim) = (MUL16(are, bim) + MUL16(aim, bre)) >> sh; \
- } while (0)
-
-#define CMUL(dre, dim, are, aim, bre, bim) \
- CMULS(dre, dim, are, aim, bre, bim, 15)
-
-#define CMULL(dre, dim, are, aim, bre, bim) \
- CMULS(dre, dim, are, aim, bre, bim, 0)
-
-#endif /* FFT_FIXED_32 */
-
#endif /* FFT_FLOAT */
#define ff_imdct_calc_c FFT_NAME(ff_imdct_calc_c)
diff --git a/media/ffvpx/libavcodec/fft.h b/media/ffvpx/libavcodec/fft.h
index c858570a21..d46e5a3f0b 100644
--- a/media/ffvpx/libavcodec/fft.h
+++ b/media/ffvpx/libavcodec/fft.h
@@ -26,13 +26,11 @@
#define FFT_FLOAT 1
#endif
-#ifndef FFT_FIXED_32
-#define FFT_FIXED_32 0
-#endif
-
#include <stdint.h>
#include "config.h"
-#include "libavutil/mem.h"
+
+#include "libavutil/attributes_internal.h"
+#include "libavutil/mem_internal.h"
#if FFT_FLOAT
@@ -44,21 +42,11 @@ typedef float FFTDouble;
#else
-#if FFT_FIXED_32
-
#define Q31(x) (int)((x)*2147483648.0 + 0.5)
#define FFT_NAME(x) x ## _fixed_32
typedef int32_t FFTSample;
-#else /* FFT_FIXED_32 */
-
-#define FFT_NAME(x) x ## _fixed
-
-typedef int16_t FFTSample;
-
-#endif /* FFT_FIXED_32 */
-
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;
@@ -107,7 +95,6 @@ struct FFTContext {
void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
- void (*mdct_calcw)(struct FFTContext *s, FFTDouble *output, const FFTSample *input);
enum fft_permutation_type fft_permutation;
enum mdct_permutation_type mdct_permutation;
uint32_t *revtab32;
@@ -115,12 +102,20 @@ struct FFTContext {
#if CONFIG_HARDCODED_TABLES
#define COSTABLE_CONST const
+#define ff_init_ff_cos_tabs(index)
#else
#define COSTABLE_CONST
+#define ff_init_ff_cos_tabs FFT_NAME(ff_init_ff_cos_tabs)
+
+/**
+ * Initialize the cosine table in ff_cos_tabs[index]
+ * @param index index in ff_cos_tabs array of the table to initialize
+ */
+void ff_init_ff_cos_tabs(int index);
#endif
#define COSTABLE(size) \
- COSTABLE_CONST DECLARE_ALIGNED(32, FFTSample, FFT_NAME(ff_cos_##size))[size/2]
+ COSTABLE_CONST attribute_visibility_hidden DECLARE_ALIGNED(32, FFTSample, FFT_NAME(ff_cos_##size))[size/2]
extern COSTABLE(16);
extern COSTABLE(32);
@@ -138,14 +133,6 @@ extern COSTABLE(65536);
extern COSTABLE(131072);
extern COSTABLE_CONST FFTSample* const FFT_NAME(ff_cos_tabs)[18];
-#define ff_init_ff_cos_tabs FFT_NAME(ff_init_ff_cos_tabs)
-
-/**
- * Initialize the cosine table in ff_cos_tabs[index]
- * @param index index in ff_cos_tabs array of the table to initialize
- */
-void ff_init_ff_cos_tabs(int index);
-
#define ff_fft_init FFT_NAME(ff_fft_init)
#define ff_fft_end FFT_NAME(ff_fft_end)
@@ -162,8 +149,6 @@ void ff_fft_init_arm(FFTContext *s);
void ff_fft_init_mips(FFTContext *s);
void ff_fft_init_ppc(FFTContext *s);
-void ff_fft_fixed_init_arm(FFTContext *s);
-
void ff_fft_end(FFTContext *s);
#define ff_mdct_init FFT_NAME(ff_mdct_init)
diff --git a/media/ffvpx/libavcodec/fft_float.c b/media/ffvpx/libavcodec/fft_float.c
index 73cc98d0d4..a9fd01978d 100644
--- a/media/ffvpx/libavcodec/fft_float.c
+++ b/media/ffvpx/libavcodec/fft_float.c
@@ -17,5 +17,4 @@
*/
#define FFT_FLOAT 1
-#define FFT_FIXED_32 0
#include "fft_template.c"
diff --git a/media/ffvpx/libavcodec/fft_template.c b/media/ffvpx/libavcodec/fft_template.c
index 20a62e4290..f2742a3ae8 100644
--- a/media/ffvpx/libavcodec/fft_template.c
+++ b/media/ffvpx/libavcodec/fft_template.c
@@ -33,16 +33,9 @@
#include "fft.h"
#include "fft-internal.h"
-#if FFT_FIXED_32
+#if !FFT_FLOAT
#include "fft_table.h"
-
-static void av_cold fft_lut_init(void)
-{
- int n = 0;
- ff_fft_lut_init(ff_fft_offsets_lut, 0, 1 << 17, &n);
-}
-
-#else /* FFT_FIXED_32 */
+#else /* !FFT_FLOAT */
/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */
#if !CONFIG_HARDCODED_TABLES
@@ -120,6 +113,10 @@ static CosTabsInitOnce cos_tabs_init_once[] = {
{ init_ff_cos_tabs_131072, AV_ONCE_INIT },
};
+av_cold void ff_init_ff_cos_tabs(int index)
+{
+ ff_thread_once(&cos_tabs_init_once[index].control, cos_tabs_init_once[index].func);
+}
#endif
COSTABLE_CONST FFTSample * const FFT_NAME(ff_cos_tabs)[] = {
NULL, NULL, NULL, NULL,
@@ -139,7 +136,7 @@ COSTABLE_CONST FFTSample * const FFT_NAME(ff_cos_tabs)[] = {
FFT_NAME(ff_cos_131072),
};
-#endif /* FFT_FIXED_32 */
+#endif /* FFT_FLOAT */
static void fft_permute_c(FFTContext *s, FFTComplex *z);
static void fft_calc_c(FFTContext *s, FFTComplex *z);
@@ -155,12 +152,6 @@ static int split_radix_permutation(int i, int n, int inverse)
else return split_radix_permutation(i, m, inverse)*4 - 1;
}
-av_cold void ff_init_ff_cos_tabs(int index)
-{
-#if (!CONFIG_HARDCODED_TABLES) && (!FFT_FIXED_32)
- ff_thread_once(&cos_tabs_init_once[index].control, cos_tabs_init_once[index].func);
-#endif
-}
static const int avx_tab[] = {
0, 4, 1, 5, 8, 12, 9, 13, 2, 6, 3, 7, 10, 14, 11, 15
@@ -235,30 +226,28 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
s->mdct_calc = ff_mdct_calc_c;
#endif
-#if FFT_FIXED_32
- {
- static AVOnce control = AV_ONCE_INIT;
- ff_thread_once(&control, fft_lut_init);
- }
-#else /* FFT_FIXED_32 */
#if FFT_FLOAT
- if (ARCH_AARCH64) ff_fft_init_aarch64(s);
- if (ARCH_ARM) ff_fft_init_arm(s);
- if (ARCH_PPC) ff_fft_init_ppc(s);
- if (ARCH_X86) ff_fft_init_x86(s);
- if (CONFIG_MDCT) s->mdct_calcw = s->mdct_calc;
- if (HAVE_MIPSFPU) ff_fft_init_mips(s);
-#else
- if (CONFIG_MDCT) s->mdct_calcw = ff_mdct_calcw_c;
- if (ARCH_ARM) ff_fft_fixed_init_arm(s);
+#if ARCH_AARCH64
+ ff_fft_init_aarch64(s);
+#elif ARCH_ARM
+ ff_fft_init_arm(s);
+#elif ARCH_PPC
+ ff_fft_init_ppc(s);
+#elif ARCH_X86
+ ff_fft_init_x86(s);
+#endif
+#if HAVE_MIPSFPU
+ ff_fft_init_mips(s);
#endif
for(j=4; j<=nbits; j++) {
ff_init_ff_cos_tabs(j);
}
-#endif /* FFT_FIXED_32 */
+#else /* FFT_FLOAT */
+ ff_fft_lut_init();
+#endif
- if (s->fft_permutation == FF_FFT_PERM_AVX) {
+ if (ARCH_X86 && FFT_FLOAT && s->fft_permutation == FF_FFT_PERM_AVX) {
fft_perm_avx(s);
} else {
#define PROCESS_FFT_PERM_SWAP_LSBS(num) do {\
@@ -328,7 +317,7 @@ av_cold void ff_fft_end(FFTContext *s)
av_freep(&s->tmp_buf);
}
-#if FFT_FIXED_32
+#if !FFT_FLOAT
static void fft_calc_c(FFTContext *s, FFTComplex *z) {
@@ -486,7 +475,7 @@ static void fft_calc_c(FFTContext *s, FFTComplex *z) {
}
}
-#else /* FFT_FIXED_32 */
+#else /* !FFT_FLOAT */
#define BUTTERFLIES(a0,a1,a2,a3) {\
BF(t3, t5, t5, t1);\
@@ -636,4 +625,4 @@ static void fft_calc_c(FFTContext *s, FFTComplex *z)
{
fft_dispatch[s->nbits-2](z);
}
-#endif /* FFT_FIXED_32 */
+#endif /* !FFT_FLOAT */
diff --git a/media/ffvpx/libavcodec/flac.c b/media/ffvpx/libavcodec/flac.c
index 5ffbf93190..174b4801be 100644
--- a/media/ffvpx/libavcodec/flac.c
+++ b/media/ffvpx/libavcodec/flac.c
@@ -26,18 +26,19 @@
#include "get_bits.h"
#include "flac.h"
#include "flacdata.h"
-
-static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 0 };
-
-static const uint64_t flac_channel_layouts[8] = {
- AV_CH_LAYOUT_MONO,
- AV_CH_LAYOUT_STEREO,
- AV_CH_LAYOUT_SURROUND,
- AV_CH_LAYOUT_QUAD,
- AV_CH_LAYOUT_5POINT0,
- AV_CH_LAYOUT_5POINT1,
- AV_CH_LAYOUT_6POINT1,
- AV_CH_LAYOUT_7POINT1
+#include "flac_parse.h"
+
+static const int8_t sample_size_table[] = { 0, 8, 12, 0, 16, 20, 24, 32 };
+
+static const AVChannelLayout flac_channel_layouts[8] = {
+ AV_CHANNEL_LAYOUT_MONO,
+ AV_CHANNEL_LAYOUT_STEREO,
+ AV_CHANNEL_LAYOUT_SURROUND,
+ AV_CHANNEL_LAYOUT_QUAD,
+ AV_CHANNEL_LAYOUT_5POINT0,
+ AV_CHANNEL_LAYOUT_5POINT1,
+ AV_CHANNEL_LAYOUT_6POINT1,
+ AV_CHANNEL_LAYOUT_7POINT1
};
static int64_t get_utf8(GetBitContext *gb)
@@ -81,7 +82,7 @@ int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
/* bits per sample */
bps_code = get_bits(gb, 3);
- if (bps_code == 3 || bps_code == 7) {
+ if (bps_code == 3) {
av_log(avctx, AV_LOG_ERROR + log_level_offset,
"invalid sample size code (%d)\n",
bps_code);
@@ -145,29 +146,7 @@ int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
return 0;
}
-int ff_flac_get_max_frame_size(int blocksize, int ch, int bps)
-{
- /* Technically, there is no limit to FLAC frame size, but an encoder
- should not write a frame that is larger than if verbatim encoding mode
- were to be used. */
-
- int count;
-
- count = 16; /* frame header */
- count += ch * ((7+bps+7)/8); /* subframe headers */
- if (ch == 2) {
- /* for stereo, need to account for using decorrelation */
- count += (( 2*bps+1) * blocksize + 7) / 8;
- } else {
- count += ( ch*bps * blocksize + 7) / 8;
- }
- count += 2; /* frame footer */
-
- return count;
-}
-
int ff_flac_is_extradata_valid(AVCodecContext *avctx,
- enum FLACExtradataFormat *format,
uint8_t **streaminfo_start)
{
if (!avctx->extradata || avctx->extradata_size < FLAC_STREAMINFO_SIZE) {
@@ -180,25 +159,29 @@ int ff_flac_is_extradata_valid(AVCodecContext *avctx,
av_log(avctx, AV_LOG_WARNING, "extradata contains %d bytes too many.\n",
FLAC_STREAMINFO_SIZE-avctx->extradata_size);
}
- *format = FLAC_EXTRADATA_FORMAT_STREAMINFO;
*streaminfo_start = avctx->extradata;
} else {
if (avctx->extradata_size < 8+FLAC_STREAMINFO_SIZE) {
av_log(avctx, AV_LOG_ERROR, "extradata too small.\n");
return 0;
}
- *format = FLAC_EXTRADATA_FORMAT_FULL_HEADER;
*streaminfo_start = &avctx->extradata[8];
}
return 1;
}
-void ff_flac_set_channel_layout(AVCodecContext *avctx)
+void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels)
{
- if (avctx->channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
- avctx->channel_layout = flac_channel_layouts[avctx->channels - 1];
+ if (channels == avctx->ch_layout.nb_channels &&
+ avctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC)
+ return;
+
+ av_channel_layout_uninit(&avctx->ch_layout);
+ if (channels <= FF_ARRAY_ELEMS(flac_channel_layouts))
+ avctx->ch_layout = flac_channel_layouts[channels - 1];
else
- avctx->channel_layout = 0;
+ avctx->ch_layout = (AVChannelLayout){ .order = AV_CHANNEL_ORDER_UNSPEC,
+ .nb_channels = channels };
}
int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
@@ -217,9 +200,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
}
skip_bits(&gb, 24); /* skip min frame size */
- s->max_framesize = get_bits_long(&gb, 24);
+ s->max_framesize = get_bits(&gb, 24);
- s->samplerate = get_bits_long(&gb, 20);
+ s->samplerate = get_bits(&gb, 20);
s->channels = get_bits(&gb, 3) + 1;
s->bps = get_bits(&gb, 5) + 1;
@@ -229,13 +212,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
return AVERROR_INVALIDDATA;
}
- avctx->channels = s->channels;
avctx->sample_rate = s->samplerate;
avctx->bits_per_raw_sample = s->bps;
-
- if (!avctx->channel_layout ||
- av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels)
- ff_flac_set_channel_layout(avctx);
+ ff_flac_set_channel_layout(avctx, s->channels);
s->samples = get_bits64(&gb, 36);
diff --git a/media/ffvpx/libavcodec/flac.h b/media/ffvpx/libavcodec/flac.h
index 991ab43f3c..00e631ed20 100644
--- a/media/ffvpx/libavcodec/flac.h
+++ b/media/ffvpx/libavcodec/flac.h
@@ -1,5 +1,5 @@
/*
- * FLAC (Free Lossless Audio Codec) decoder/demuxer common functions
+ * FLAC (Free Lossless Audio Codec) common stuff
* Copyright (c) 2008 Justin Ruggles
*
* This file is part of FFmpeg.
@@ -21,21 +21,19 @@
/**
* @file
- * FLAC (Free Lossless Audio Codec) decoder/demuxer common functions
+ * FLAC (Free Lossless Audio Codec) common stuff
*/
#ifndef AVCODEC_FLAC_H
#define AVCODEC_FLAC_H
-#include "avcodec.h"
-#include "bytestream.h"
-#include "get_bits.h"
+#include "libavutil/intreadwrite.h"
#define FLAC_STREAMINFO_SIZE 34
#define FLAC_MAX_CHANNELS 8
#define FLAC_MIN_BLOCKSIZE 16
#define FLAC_MAX_BLOCKSIZE 65535
-#define FLAC_MIN_FRAME_SIZE 11
+#define FLAC_MIN_FRAME_SIZE 10
enum {
FLAC_CHMODE_INDEPENDENT = 0,
@@ -55,84 +53,6 @@ enum {
FLAC_METADATA_TYPE_INVALID = 127
};
-enum FLACExtradataFormat {
- FLAC_EXTRADATA_FORMAT_STREAMINFO = 0,
- FLAC_EXTRADATA_FORMAT_FULL_HEADER = 1
-};
-
-#define FLACCOMMONINFO \
- int samplerate; /**< sample rate */\
- int channels; /**< number of channels */\
- int bps; /**< bits-per-sample */\
-
-/**
- * Data needed from the Streaminfo header for use by the raw FLAC demuxer
- * and/or the FLAC decoder.
- */
-#define FLACSTREAMINFO \
- FLACCOMMONINFO \
- int max_blocksize; /**< maximum block size, in samples */\
- int max_framesize; /**< maximum frame size, in bytes */\
- int64_t samples; /**< total number of samples */\
-
-typedef struct FLACStreaminfo {
- FLACSTREAMINFO
-} FLACStreaminfo;
-
-typedef struct FLACFrameInfo {
- FLACCOMMONINFO
- int blocksize; /**< block size of the frame */
- int ch_mode; /**< channel decorrelation mode */
- int64_t frame_or_sample_num; /**< frame number or sample number */
- int is_var_size; /**< specifies if the stream uses variable
- block sizes or a fixed block size;
- also determines the meaning of
- frame_or_sample_num */
-} FLACFrameInfo;
-
-/**
- * Parse the Streaminfo metadata block
- * @param[out] avctx codec context to set basic stream parameters
- * @param[out] s where parsed information is stored
- * @param[in] buffer pointer to start of 34-byte streaminfo data
- *
- * @return negative error code on faiure or >= 0 on success
- */
-int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
- const uint8_t *buffer);
-
-/**
- * Validate the FLAC extradata.
- * @param[in] avctx codec context containing the extradata.
- * @param[out] format extradata format.
- * @param[out] streaminfo_start pointer to start of 34-byte STREAMINFO data.
- * @return 1 if valid, 0 if not valid.
- */
-int ff_flac_is_extradata_valid(AVCodecContext *avctx,
- enum FLACExtradataFormat *format,
- uint8_t **streaminfo_start);
-
-/**
- * Calculate an estimate for the maximum frame size based on verbatim mode.
- * @param blocksize block size, in samples
- * @param ch number of channels
- * @param bps bits-per-sample
- */
-int ff_flac_get_max_frame_size(int blocksize, int ch, int bps);
-
-/**
- * Validate and decode a frame header.
- * @param avctx AVCodecContext to use as av_log() context
- * @param gb GetBitContext from which to read frame header
- * @param[out] fi frame information
- * @param log_level_offset log level offset. can be used to silence error messages.
- * @return non-zero on error, 0 if ok
- */
-int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
- FLACFrameInfo *fi, int log_level_offset);
-
-void ff_flac_set_channel_layout(AVCodecContext *avctx);
-
/**
* Parse the metadata block parameters from the header.
* @param[in] block_header header data, at least 4 bytes
@@ -143,13 +63,13 @@ void ff_flac_set_channel_layout(AVCodecContext *avctx);
static av_always_inline void flac_parse_block_header(const uint8_t *block_header,
int *last, int *type, int *size)
{
- int tmp = bytestream_get_byte(&block_header);
+ int tmp = *block_header;
if (last)
*last = tmp & 0x80;
if (type)
*type = tmp & 0x7F;
if (size)
- *size = bytestream_get_be24(&block_header);
+ *size = AV_RB24(block_header + 1);
}
#endif /* AVCODEC_FLAC_H */
diff --git a/media/ffvpx/libavcodec/flac_parse.h b/media/ffvpx/libavcodec/flac_parse.h
new file mode 100644
index 0000000000..67a7320bea
--- /dev/null
+++ b/media/ffvpx/libavcodec/flac_parse.h
@@ -0,0 +1,89 @@
+/*
+ * FLAC (Free Lossless Audio Codec) decoder/parser common functions
+ * Copyright (c) 2008 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FLAC (Free Lossless Audio Codec) decoder/parser common functions
+ */
+
+#ifndef AVCODEC_FLAC_PARSE_H
+#define AVCODEC_FLAC_PARSE_H
+
+#include "avcodec.h"
+#include "get_bits.h"
+
+typedef struct FLACStreaminfo {
+ int samplerate; /**< sample rate */
+ int channels; /**< number of channels */
+ int bps; /**< bits-per-sample */
+ int max_blocksize; /**< maximum block size, in samples */
+ int max_framesize; /**< maximum frame size, in bytes */
+ int64_t samples; /**< total number of samples */
+} FLACStreaminfo;
+
+typedef struct FLACFrameInfo {
+ int samplerate; /**< sample rate */
+ int channels; /**< number of channels */
+ int bps; /**< bits-per-sample */
+ int blocksize; /**< block size of the frame */
+ int ch_mode; /**< channel decorrelation mode */
+ int64_t frame_or_sample_num; /**< frame number or sample number */
+ int is_var_size; /**< specifies if the stream uses variable
+ block sizes or a fixed block size;
+ also determines the meaning of
+ frame_or_sample_num */
+} FLACFrameInfo;
+
+/**
+ * Parse the Streaminfo metadata block
+ * @param[out] avctx codec context to set basic stream parameters
+ * @param[out] s where parsed information is stored
+ * @param[in] buffer pointer to start of 34-byte streaminfo data
+ *
+ * @return negative error code on faiure or >= 0 on success
+ */
+int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
+ const uint8_t *buffer);
+
+/**
+ * Validate the FLAC extradata.
+ * @param[in] avctx codec context containing the extradata.
+ * @param[out] format extradata format.
+ * @param[out] streaminfo_start pointer to start of 34-byte STREAMINFO data.
+ * @return 1 if valid, 0 if not valid.
+ */
+int ff_flac_is_extradata_valid(AVCodecContext *avctx,
+ uint8_t **streaminfo_start);
+
+/**
+ * Validate and decode a frame header.
+ * @param avctx AVCodecContext to use as av_log() context
+ * @param gb GetBitContext from which to read frame header
+ * @param[out] fi frame information
+ * @param log_level_offset log level offset. can be used to silence error messages.
+ * @return non-zero on error, 0 if ok
+ */
+int ff_flac_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
+ FLACFrameInfo *fi, int log_level_offset);
+
+void ff_flac_set_channel_layout(AVCodecContext *avctx, int channels);
+
+#endif /* AVCODEC_FLAC_PARSE_H */
diff --git a/media/ffvpx/libavcodec/flac_parser.c b/media/ffvpx/libavcodec/flac_parser.c
index db6765f34c..bd91cc1a05 100644
--- a/media/ffvpx/libavcodec/flac_parser.c
+++ b/media/ffvpx/libavcodec/flac_parser.c
@@ -34,10 +34,9 @@
#include "libavutil/attributes.h"
#include "libavutil/crc.h"
-#include "libavutil/fifo.h"
#include "bytestream.h"
#include "parser.h"
-#include "flac.h"
+#include "flac_parse.h"
/** maximum number of adjacent headers that compare CRCs against each other */
#define FLAC_MAX_SEQUENTIAL_HEADERS 4
@@ -55,12 +54,21 @@
/** largest possible size of flac header */
#define MAX_FRAME_HEADER_SIZE 16
-#define MAX_FRAME_VERIFY_SIZE (MAX_FRAME_HEADER_SIZE)
+#define MAX_FRAME_VERIFY_SIZE (MAX_FRAME_HEADER_SIZE + 1)
+
+typedef struct FifoBuffer {
+ uint8_t *buffer;
+ uint8_t *end;
+ uint8_t *rptr;
+ uint8_t *wptr;
+ int empty;
+} FifoBuffer;
typedef struct FLACHeaderMarker {
int offset; /**< byte offset from start of FLACParseContext->buffer */
- int *link_penalty; /**< pointer to array of local scores between this header
- and the one at a distance equal array position */
+ int link_penalty[FLAC_MAX_SEQUENTIAL_HEADERS]; /**< array of local scores
+ between this header and the one at a distance equal
+ array position */
int max_score; /**< maximum score found after checking each child that
has a valid CRC */
FLACFrameInfo fi; /**< decoded frame header info */
@@ -83,7 +91,7 @@ typedef struct FLACParseContext {
int nb_headers_buffered; /**< number of headers that are buffered */
int best_header_valid; /**< flag set when the parser returns junk;
if set return best_header next time */
- AVFifoBuffer *fifo_buf; /**< buffer to store all data until headers
+ FifoBuffer fifo_buf; /**< buffer to store all data until headers
can be verified */
int end_padded; /**< specifies if fifo_buf's end is padded */
uint8_t *wrap_buf; /**< general fifo read buffer when wrapped */
@@ -96,8 +104,46 @@ static int frame_header_is_valid(AVCodecContext *avctx, const uint8_t *buf,
FLACFrameInfo *fi)
{
GetBitContext gb;
- init_get_bits(&gb, buf, MAX_FRAME_HEADER_SIZE * 8);
- return !ff_flac_decode_frame_header(avctx, &gb, fi, 127);
+ uint8_t subframe_type;
+
+ // header plus one byte from first subframe
+ init_get_bits(&gb, buf, MAX_FRAME_VERIFY_SIZE * 8);
+ if (ff_flac_decode_frame_header(avctx, &gb, fi, 127)) {
+ return 0;
+ }
+ // subframe zero bit
+ if (get_bits1(&gb) != 0) {
+ return 0;
+ }
+ // subframe type
+ // 000000 : SUBFRAME_CONSTANT
+ // 000001 : SUBFRAME_VERBATIM
+ // 00001x : reserved
+ // 0001xx : reserved
+ // 001xxx : if(xxx <= 4) SUBFRAME_FIXED, xxx=order ; else reserved
+ // 01xxxx : reserved
+ // 1xxxxx : SUBFRAME_LPC, xxxxx=order-1
+ subframe_type = get_bits(&gb, 6);
+ if (!(subframe_type == 0 ||
+ subframe_type == 1 ||
+ ((subframe_type >= 8) && (subframe_type <= 12)) ||
+ (subframe_type >= 32))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static size_t flac_fifo_size(const FifoBuffer *f)
+{
+ if (f->wptr <= f->rptr && !f->empty)
+ return (f->wptr - f->buffer) + (f->end - f->rptr);
+ return f->wptr - f->rptr;
+}
+
+static size_t flac_fifo_space(const FifoBuffer *f)
+{
+ return f->end - f->buffer - flac_fifo_size(f);
}
/**
@@ -113,10 +159,10 @@ static int frame_header_is_valid(AVCodecContext *avctx, const uint8_t *buf,
* This function is based on av_fifo_generic_read, which is why there is a comment
* about a memory barrier for SMP.
*/
-static uint8_t* flac_fifo_read_wrap(FLACParseContext *fpc, int offset, int len,
- uint8_t** wrap_buf, int* allocated_size)
+static uint8_t *flac_fifo_read_wrap(FLACParseContext *fpc, int offset, int len,
+ uint8_t **wrap_buf, int *allocated_size)
{
- AVFifoBuffer *f = fpc->fifo_buf;
+ FifoBuffer *f = &fpc->fifo_buf;
uint8_t *start = f->rptr + offset;
uint8_t *tmp_buf;
@@ -153,9 +199,8 @@ static uint8_t* flac_fifo_read_wrap(FLACParseContext *fpc, int offset, int len,
* A second call to flac_fifo_read (with new offset and len) should be called
* to get the post-wrap buf if the returned len is less than the requested.
**/
-static uint8_t* flac_fifo_read(FLACParseContext *fpc, int offset, int *len)
+static uint8_t *flac_fifo_read(FifoBuffer *f, int offset, int *len)
{
- AVFifoBuffer *f = fpc->fifo_buf;
uint8_t *start = f->rptr + offset;
if (start >= f->end)
@@ -164,6 +209,108 @@ static uint8_t* flac_fifo_read(FLACParseContext *fpc, int offset, int *len)
return start;
}
+static int flac_fifo_grow(FifoBuffer *f, size_t inc)
+{
+ size_t size_old = f->end - f->buffer;
+ size_t offset_r = f->rptr - f->buffer;
+ size_t offset_w = f->wptr - f->buffer;
+ size_t size_new;
+
+ uint8_t *tmp;
+
+ if (size_old > SIZE_MAX - inc)
+ return AVERROR(EINVAL);
+ size_new = size_old + inc;
+
+ tmp = av_realloc(f->buffer, size_new);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ // move the data from the beginning of the ring buffer
+ // to the newly allocated space
+ if (offset_w <= offset_r && !f->empty) {
+ const size_t copy = FFMIN(inc, offset_w);
+ memcpy(tmp + size_old, tmp, copy);
+ if (copy < offset_w) {
+ memmove(tmp, tmp + copy, offset_w - copy);
+ offset_w -= copy;
+ } else
+ offset_w = size_old + copy;
+ }
+
+ f->buffer = tmp;
+ f->end = f->buffer + size_new;
+ f->rptr = f->buffer + offset_r;
+ f->wptr = f->buffer + offset_w;
+
+ return 0;
+}
+
+static int flac_fifo_write(FifoBuffer *f, const uint8_t *src, size_t size)
+{
+ uint8_t *wptr;
+
+ if (flac_fifo_space(f) < size) {
+ int ret = flac_fifo_grow(f, FFMAX(flac_fifo_size(f), size));
+ if (ret < 0)
+ return ret;
+ }
+
+ if (size)
+ f->empty = 0;
+
+ wptr = f->wptr;
+ do {
+ size_t len = FFMIN(f->end - wptr, size);
+ memcpy(wptr, src, len);
+ src += len;
+ wptr += len;
+ if (wptr >= f->end)
+ wptr = f->buffer;
+ size -= len;
+ } while (size > 0);
+
+ f->wptr = wptr;
+
+ return 0;
+}
+
+static void flac_fifo_drain(FifoBuffer *f, size_t size)
+{
+ size_t size_cur = flac_fifo_size(f);
+
+ av_assert0(size_cur >= size);
+ if (size_cur == size)
+ f->empty = 1;
+
+ f->rptr += size;
+ if (f->rptr >= f->end)
+ f->rptr -= f->end - f->buffer;
+}
+
+static int flac_fifo_alloc(FifoBuffer *f, size_t size)
+{
+ memset(f, 0, sizeof(*f));
+
+ f->buffer = av_realloc(NULL, size);
+ if (!f->buffer)
+ return AVERROR(ENOMEM);
+
+ f->wptr = f->buffer;
+ f->rptr = f->buffer;
+ f->end = f->buffer + size;
+
+ f->empty = 1;
+
+ return 0;
+}
+
+static void flac_fifo_free(FifoBuffer *f)
+{
+ av_freep(&f->buffer);
+ memset(f, 0, sizeof(*f));
+}
+
static int find_headers_search_validate(FLACParseContext *fpc, int offset)
{
FLACFrameInfo fi;
@@ -189,16 +336,8 @@ static int find_headers_search_validate(FLACParseContext *fpc, int offset)
"couldn't allocate FLACHeaderMarker\n");
return AVERROR(ENOMEM);
}
- (*end_handle)->fi = fi;
- (*end_handle)->offset = offset;
- (*end_handle)->link_penalty = av_malloc(sizeof(int) *
- FLAC_MAX_SEQUENTIAL_HEADERS);
- if (!(*end_handle)->link_penalty) {
- av_freep(end_handle);
- av_log(fpc->avctx, AV_LOG_ERROR,
- "couldn't allocate link_penalty\n");
- return AVERROR(ENOMEM);
- }
+ (*end_handle)->fi = fi;
+ (*end_handle)->offset = offset;
for (i = 0; i < FLAC_MAX_SEQUENTIAL_HEADERS; i++)
(*end_handle)->link_penalty[i] = FLAC_HEADER_NOT_PENALIZED_YET;
@@ -209,9 +348,8 @@ static int find_headers_search_validate(FLACParseContext *fpc, int offset)
return size;
}
-static int find_headers_search(FLACParseContext *fpc, uint8_t *buf, int buf_size,
- int search_start)
-
+static int find_headers_search(FLACParseContext *fpc, uint8_t *buf,
+ int buf_size, int search_start)
{
int size = 0, mod_offset = (buf_size - 1) % 4, i, j;
uint32_t x;
@@ -224,7 +362,7 @@ static int find_headers_search(FLACParseContext *fpc, uint8_t *buf, int buf_size
}
for (; i < buf_size - 1; i += 4) {
- x = AV_RB32(buf + i);
+ x = AV_RN32(buf + i);
if (((x & ~(x + 0x01010101)) & 0x80808080)) {
for (j = 0; j < 4; j++) {
if ((AV_RB16(buf + i + j) & 0xFFFE) == 0xFFF8) {
@@ -245,9 +383,9 @@ static int find_new_headers(FLACParseContext *fpc, int search_start)
fpc->nb_headers_found = 0;
/* Search for a new header of at most 16 bytes. */
- search_end = av_fifo_size(fpc->fifo_buf) - (MAX_FRAME_HEADER_SIZE - 1);
+ search_end = flac_fifo_size(&fpc->fifo_buf) - (MAX_FRAME_HEADER_SIZE - 1);
read_len = search_end - search_start + 1;
- buf = flac_fifo_read(fpc, search_start, &read_len);
+ buf = flac_fifo_read(&fpc->fifo_buf, search_start, &read_len);
size = find_headers_search(fpc, buf, read_len, search_start);
search_start += read_len - 1;
@@ -256,10 +394,10 @@ static int find_new_headers(FLACParseContext *fpc, int search_start)
uint8_t wrap[2];
wrap[0] = buf[read_len - 1];
- read_len = search_end - search_start + 1;
-
/* search_start + 1 is the post-wrap offset in the fifo. */
- buf = flac_fifo_read(fpc, search_start + 1, &read_len);
+ read_len = search_end - (search_start + 1) + 1;
+
+ buf = flac_fifo_read(&fpc->fifo_buf, search_start + 1, &read_len);
wrap[1] = buf[0];
if ((AV_RB16(wrap) & 0xFFFE) == 0xFFF8) {
@@ -317,7 +455,7 @@ static int check_header_mismatch(FLACParseContext *fpc,
int log_level_offset)
{
FLACFrameInfo *header_fi = &header->fi, *child_fi = &child->fi;
- int deduction, deduction_expected = 0, i;
+ int check_crc, deduction, deduction_expected = 0, i;
deduction = check_header_fi_mismatch(fpc, header_fi, child_fi,
log_level_offset);
/* Check sample and frame numbers. */
@@ -326,7 +464,7 @@ static int check_header_mismatch(FLACParseContext *fpc,
(child_fi->frame_or_sample_num
!= header_fi->frame_or_sample_num + 1)) {
FLACHeaderMarker *curr;
- int expected_frame_num, expected_sample_num;
+ int64_t expected_frame_num, expected_sample_num;
/* If there are frames in the middle we expect this deduction,
as they are probably valid and this one follows it */
@@ -353,8 +491,22 @@ static int check_header_mismatch(FLACParseContext *fpc,
"sample/frame number mismatch in adjacent frames\n");
}
+ if (fpc->last_fi.is_var_size == header_fi->is_var_size) {
+ if (fpc->last_fi.is_var_size &&
+ fpc->last_fi.frame_or_sample_num + fpc->last_fi.blocksize == header_fi->frame_or_sample_num) {
+ check_crc = 0;
+ } else if (!fpc->last_fi.is_var_size &&
+ fpc->last_fi.frame_or_sample_num + 1 == header_fi->frame_or_sample_num) {
+ check_crc = 0;
+ } else {
+ check_crc = !deduction && !deduction_expected;
+ }
+ } else {
+ check_crc = !deduction && !deduction_expected;
+ }
+
/* If we have suspicious headers, check the CRC between them */
- if (deduction && !deduction_expected) {
+ if (check_crc || (deduction && !deduction_expected)) {
FLACHeaderMarker *curr;
int read_len;
uint8_t *buf;
@@ -388,12 +540,12 @@ static int check_header_mismatch(FLACParseContext *fpc,
}
read_len = end->offset - start->offset;
- buf = flac_fifo_read(fpc, start->offset, &read_len);
+ buf = flac_fifo_read(&fpc->fifo_buf, start->offset, &read_len);
crc = av_crc(av_crc_get_table(AV_CRC_16_ANSI), 0, buf, read_len);
read_len = (end->offset - start->offset) - read_len;
if (read_len) {
- buf = flac_fifo_read(fpc, end->offset - read_len, &read_len);
+ buf = flac_fifo_read(&fpc->fifo_buf, end->offset - read_len, &read_len);
crc = av_crc(av_crc_get_table(AV_CRC_16_ANSI), crc, buf, read_len);
}
}
@@ -462,7 +614,7 @@ static int score_header(FLACParseContext *fpc, FLACHeaderMarker *header)
static void score_sequences(FLACParseContext *fpc)
{
FLACHeaderMarker *curr;
- int best_score = 0;//FLAC_HEADER_NOT_SCORED_YET;
+ int best_score = FLAC_HEADER_NOT_SCORED_YET;
/* First pass to clear all old scores. */
for (curr = fpc->headers; curr; curr = curr->next)
curr->max_score = FLAC_HEADER_NOT_SCORED_YET;
@@ -476,13 +628,13 @@ static void score_sequences(FLACParseContext *fpc)
}
}
-static int get_best_header(FLACParseContext* fpc, const uint8_t **poutbuf,
+static int get_best_header(FLACParseContext *fpc, const uint8_t **poutbuf,
int *poutbuf_size)
{
FLACHeaderMarker *header = fpc->best_header;
FLACHeaderMarker *child = header->best_child;
if (!child) {
- *poutbuf_size = av_fifo_size(fpc->fifo_buf) - header->offset;
+ *poutbuf_size = flac_fifo_size(&fpc->fifo_buf) - header->offset;
} else {
*poutbuf_size = child->offset - header->offset;
@@ -490,19 +642,15 @@ static int get_best_header(FLACParseContext* fpc, const uint8_t **poutbuf,
check_header_mismatch(fpc, header, child, 0);
}
- if (header->fi.channels != fpc->avctx->channels ||
- !fpc->avctx->channel_layout) {
- fpc->avctx->channels = header->fi.channels;
- ff_flac_set_channel_layout(fpc->avctx);
- }
+ ff_flac_set_channel_layout(fpc->avctx, header->fi.channels);
+
fpc->avctx->sample_rate = header->fi.samplerate;
fpc->pc->duration = header->fi.blocksize;
*poutbuf = flac_fifo_read_wrap(fpc, header->offset, *poutbuf_size,
&fpc->wrap_buf,
&fpc->wrap_buf_allocated_size);
-
- if (fpc->pc->flags & PARSER_FLAG_USE_CODEC_TS){
+ if (fpc->pc->flags & PARSER_FLAG_USE_CODEC_TS) {
if (header->fi.is_var_size)
fpc->pc->pts = header->fi.frame_or_sample_num;
else if (header->best_child)
@@ -515,8 +663,11 @@ static int get_best_header(FLACParseContext* fpc, const uint8_t **poutbuf,
/* Return the negative overread index so the client can compute pos.
This should be the amount overread to the beginning of the child */
- if (child)
- return child->offset - av_fifo_size(fpc->fifo_buf);
+ if (child) {
+ int64_t offset = child->offset - flac_fifo_size(&fpc->fifo_buf);
+ if (offset > -(1 << 28))
+ return offset;
+ }
return 0;
}
@@ -536,7 +687,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
s->duration = fi.blocksize;
if (!avctx->sample_rate)
avctx->sample_rate = fi.samplerate;
- if (fpc->pc->flags & PARSER_FLAG_USE_CODEC_TS){
+ if (fpc->pc->flags & PARSER_FLAG_USE_CODEC_TS) {
fpc->pc->pts = fi.frame_or_sample_num;
if (!fi.is_var_size)
fpc->pc->pts *= fi.blocksize;
@@ -548,7 +699,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
}
fpc->avctx = avctx;
- if (fpc->best_header_valid)
+ if (fpc->best_header_valid && fpc->nb_headers_buffered >= FLAC_MIN_HEADERS)
return get_best_header(fpc, poutbuf, poutbuf_size);
/* If a best_header was found last call remove it with the buffer data. */
@@ -564,18 +715,16 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
curr->max_score, curr->offset, curr->next->offset);
}
temp = curr->next;
- av_freep(&curr->link_penalty);
av_free(curr);
fpc->nb_headers_buffered--;
}
/* Release returned data from ring buffer. */
- av_fifo_drain(fpc->fifo_buf, best_child->offset);
+ flac_fifo_drain(&fpc->fifo_buf, best_child->offset);
/* Fix the offset for the headers remaining to match the new buffer. */
for (curr = best_child->next; curr; curr = curr->next)
curr->offset -= best_child->offset;
- fpc->nb_headers_buffered--;
best_child->offset = 0;
fpc->headers = best_child;
if (fpc->nb_headers_buffered >= FLAC_MIN_HEADERS) {
@@ -589,30 +738,26 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
for (curr = fpc->headers; curr != fpc->best_header; curr = temp) {
temp = curr->next;
- av_freep(&curr->link_penalty);
av_free(curr);
fpc->nb_headers_buffered--;
}
fpc->headers = fpc->best_header->next;
- av_freep(&fpc->best_header->link_penalty);
av_freep(&fpc->best_header);
fpc->nb_headers_buffered--;
}
/* Find and score new headers. */
- /* buf_size is to zero when padding, so check for this since we do */
+ /* buf_size is zero when flushing, so check for this since we do */
/* not want to try to read more input once we have found the end. */
- /* Note that as (non-modified) parameters, buf can be non-NULL, */
- /* while buf_size is 0. */
- while ((buf && buf_size && read_end < buf + buf_size &&
+ /* Also note that buf can't be NULL. */
+ while ((buf_size && read_end < buf + buf_size &&
fpc->nb_headers_buffered < FLAC_MIN_HEADERS)
- || ((!buf || !buf_size) && !fpc->end_padded)) {
- int start_offset;
+ || (!buf_size && !fpc->end_padded)) {
+ int start_offset, ret;
/* Pad the end once if EOF, to check the final region for headers. */
- if (!buf || !buf_size) {
- fpc->end_padded = 1;
- buf_size = MAX_FRAME_HEADER_SIZE;
+ if (!buf_size) {
+ fpc->end_padded = 1;
read_end = read_start + MAX_FRAME_HEADER_SIZE;
} else {
/* The maximum read size is the upper-bound of what the parser
@@ -622,8 +767,8 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
nb_desired * FLAC_AVG_FRAME_SIZE);
}
- if (!av_fifo_space(fpc->fifo_buf) &&
- av_fifo_size(fpc->fifo_buf) / FLAC_AVG_FRAME_SIZE >
+ if (!flac_fifo_space(&fpc->fifo_buf) &&
+ flac_fifo_size(&fpc->fifo_buf) / FLAC_AVG_FRAME_SIZE >
fpc->nb_headers_buffered * 20) {
/* There is less than one valid flac header buffered for 20 headers
* buffered. Therefore the fifo is most likely filled with invalid
@@ -632,24 +777,20 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
}
/* Fill the buffer. */
- if ( av_fifo_space(fpc->fifo_buf) < read_end - read_start
- && av_fifo_realloc2(fpc->fifo_buf, (read_end - read_start) + 2*av_fifo_size(fpc->fifo_buf)) < 0) {
- av_log(avctx, AV_LOG_ERROR,
- "couldn't reallocate buffer of size %"PTRDIFF_SPECIFIER"\n",
- (read_end - read_start) + av_fifo_size(fpc->fifo_buf));
- goto handle_error;
- }
-
- if (buf && buf_size) {
- av_fifo_generic_write(fpc->fifo_buf, (void*) read_start,
- read_end - read_start, NULL);
+ if (buf_size) {
+ ret = flac_fifo_write(&fpc->fifo_buf, read_start,
+ read_end - read_start);
} else {
int8_t pad[MAX_FRAME_HEADER_SIZE] = { 0 };
- av_fifo_generic_write(fpc->fifo_buf, pad, sizeof(pad), NULL);
+ ret = flac_fifo_write(&fpc->fifo_buf, pad, sizeof(pad));
+ }
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error buffering data\n");
+ goto handle_error;
}
/* Tag headers and update sequences. */
- start_offset = av_fifo_size(fpc->fifo_buf) -
+ start_offset = flac_fifo_size(&fpc->fifo_buf) -
((read_end - read_start) + (MAX_FRAME_HEADER_SIZE - 1));
start_offset = FFMAX(0, start_offset);
nb_headers = find_new_headers(fpc, start_offset);
@@ -663,7 +804,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
fpc->nb_headers_buffered = nb_headers;
/* Wait till FLAC_MIN_HEADERS to output a valid frame. */
if (!fpc->end_padded && fpc->nb_headers_buffered < FLAC_MIN_HEADERS) {
- if (buf && read_end < buf + buf_size) {
+ if (read_end < buf + buf_size) {
read_start = read_end;
continue;
} else {
@@ -677,15 +818,15 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
/* restore the state pre-padding */
if (fpc->end_padded) {
- int warp = fpc->fifo_buf->wptr - fpc->fifo_buf->buffer < MAX_FRAME_HEADER_SIZE;
+ int empty = flac_fifo_size(&fpc->fifo_buf) == MAX_FRAME_HEADER_SIZE;
+ int warp = fpc->fifo_buf.wptr - fpc->fifo_buf.buffer < MAX_FRAME_HEADER_SIZE;
/* HACK: drain the tail of the fifo */
- fpc->fifo_buf->wptr -= MAX_FRAME_HEADER_SIZE;
- fpc->fifo_buf->wndx -= MAX_FRAME_HEADER_SIZE;
+ fpc->fifo_buf.wptr -= MAX_FRAME_HEADER_SIZE;
if (warp) {
- fpc->fifo_buf->wptr += fpc->fifo_buf->end -
- fpc->fifo_buf->buffer;
+ fpc->fifo_buf.wptr += fpc->fifo_buf.end -
+ fpc->fifo_buf.buffer;
}
- buf_size = 0;
+ fpc->fifo_buf.empty = empty;
read_start = read_end = NULL;
}
}
@@ -698,7 +839,7 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
if (fpc->best_header && fpc->best_header->max_score <= 0) {
// Only accept a bad header if there is no other option to continue
- if (!buf_size || !buf || read_end != buf || fpc->nb_headers_buffered < FLAC_MIN_HEADERS)
+ if (!buf_size || read_end != buf || fpc->nb_headers_buffered < FLAC_MIN_HEADERS)
fpc->best_header = NULL;
}
@@ -710,13 +851,13 @@ static int flac_parse(AVCodecParserContext *s, AVCodecContext *avctx,
fpc->best_header->offset);
/* Set duration to 0. It is unknown or invalid in a junk frame. */
- s->duration = 0;
- *poutbuf_size = fpc->best_header->offset;
- *poutbuf = flac_fifo_read_wrap(fpc, 0, *poutbuf_size,
- &fpc->wrap_buf,
- &fpc->wrap_buf_allocated_size);
+ s->duration = 0;
+ *poutbuf_size = fpc->best_header->offset;
+ *poutbuf = flac_fifo_read_wrap(fpc, 0, *poutbuf_size,
+ &fpc->wrap_buf,
+ &fpc->wrap_buf_allocated_size);
return buf_size ? (read_end - buf) : (fpc->best_header->offset -
- av_fifo_size(fpc->fifo_buf));
+ flac_fifo_size(&fpc->fifo_buf));
}
if (!buf_size)
return get_best_header(fpc, poutbuf, poutbuf_size);
@@ -731,11 +872,13 @@ handle_error:
static av_cold int flac_parse_init(AVCodecParserContext *c)
{
FLACParseContext *fpc = c->priv_data;
+ int ret;
+
fpc->pc = c;
/* There will generally be FLAC_MIN_HEADERS buffered in the fifo before
it drains. This is allocated early to avoid slow reallocation. */
- fpc->fifo_buf = av_fifo_alloc_array(FLAC_MIN_HEADERS + 3, FLAC_AVG_FRAME_SIZE);
- if (!fpc->fifo_buf) {
+ ret = flac_fifo_alloc(&fpc->fifo_buf, (FLAC_MIN_HEADERS + 3) * FLAC_AVG_FRAME_SIZE);
+ if (ret < 0) {
av_log(fpc->avctx, AV_LOG_ERROR,
"couldn't allocate fifo_buf\n");
return AVERROR(ENOMEM);
@@ -750,15 +893,15 @@ static void flac_parse_close(AVCodecParserContext *c)
while (curr) {
temp = curr->next;
- av_freep(&curr->link_penalty);
av_free(curr);
curr = temp;
}
- av_fifo_freep(&fpc->fifo_buf);
+ fpc->headers = NULL;
+ flac_fifo_free(&fpc->fifo_buf);
av_freep(&fpc->wrap_buf);
}
-AVCodecParser ff_flac_parser = {
+const AVCodecParser ff_flac_parser = {
.codec_ids = { AV_CODEC_ID_FLAC },
.priv_data_size = sizeof(FLACParseContext),
.parser_init = flac_parse_init,
diff --git a/media/ffvpx/libavcodec/flacdata.c b/media/ffvpx/libavcodec/flacdata.c
index 1954f32d32..d96e3e0966 100644
--- a/media/ffvpx/libavcodec/flacdata.c
+++ b/media/ffvpx/libavcodec/flacdata.c
@@ -19,7 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "internal.h"
+#include "flacdata.h"
const int ff_flac_sample_rate_table[16] =
{ 0,
diff --git a/media/ffvpx/libavcodec/flacdata.h b/media/ffvpx/libavcodec/flacdata.h
index e2c1e5d7f2..ef21840777 100644
--- a/media/ffvpx/libavcodec/flacdata.h
+++ b/media/ffvpx/libavcodec/flacdata.h
@@ -22,7 +22,7 @@
#ifndef AVCODEC_FLACDATA_H
#define AVCODEC_FLACDATA_H
-#include "internal.h"
+#include <stdint.h>
extern const int ff_flac_sample_rate_table[16];
diff --git a/media/ffvpx/libavcodec/flacdec.c b/media/ffvpx/libavcodec/flacdec.c
index 8de8ebd80e..cc778a8dff 100644
--- a/media/ffvpx/libavcodec/flacdec.c
+++ b/media/ffvpx/libavcodec/flacdec.c
@@ -37,20 +37,21 @@
#include "libavutil/crc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
-#include "internal.h"
+#include "codec_internal.h"
#include "get_bits.h"
#include "bytestream.h"
#include "golomb.h"
#include "flac.h"
#include "flacdata.h"
#include "flacdsp.h"
+#include "flac_parse.h"
#include "thread.h"
#include "unary.h"
typedef struct FLACContext {
AVClass *class;
- struct FLACStreaminfo flac_stream_info;
+ FLACStreaminfo stream_info;
AVCodecContext *avctx; ///< parent AVCodecContext
GetBitContext gb; ///< GetBitContext initialized to start at the current frame
@@ -63,6 +64,9 @@ typedef struct FLACContext {
int32_t *decoded[FLAC_MAX_CHANNELS]; ///< decoded samples
uint8_t *decoded_buffer;
unsigned int decoded_buffer_size;
+ int64_t *decoded_33bps; ///< decoded samples for a 33 bps subframe
+ uint8_t *decoded_buffer_33bps;
+ unsigned int decoded_buffer_size_33bps;
int buggy_lpc; ///< use workaround for old lavc encoded files
FLACDSPContext dsp;
@@ -73,7 +77,7 @@ static int allocate_buffers(FLACContext *s);
static void flac_set_bps(FLACContext *s)
{
enum AVSampleFormat req = s->avctx->request_sample_fmt;
- int need32 = s->flac_stream_info.bps > 16;
+ int need32 = s->stream_info.bps > 16;
int want32 = av_get_bytes_per_sample(req) > 2;
int planar = av_sample_fmt_is_planar(req);
@@ -82,19 +86,18 @@ static void flac_set_bps(FLACContext *s)
s->avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
else
s->avctx->sample_fmt = AV_SAMPLE_FMT_S32;
- s->sample_shift = 32 - s->flac_stream_info.bps;
+ s->sample_shift = 32 - s->stream_info.bps;
} else {
if (planar)
s->avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
else
s->avctx->sample_fmt = AV_SAMPLE_FMT_S16;
- s->sample_shift = 16 - s->flac_stream_info.bps;
+ s->sample_shift = 16 - s->stream_info.bps;
}
}
static av_cold int flac_decode_init(AVCodecContext *avctx)
{
- enum FLACExtradataFormat format;
uint8_t *streaminfo;
int ret;
FLACContext *s = avctx->priv_data;
@@ -105,11 +108,11 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
if (!avctx->extradata)
return 0;
- if (!ff_flac_is_extradata_valid(avctx, &format, &streaminfo))
+ if (!ff_flac_is_extradata_valid(avctx, &streaminfo))
return AVERROR_INVALIDDATA;
/* initialize based on the demuxer-supplied streamdata header */
- ret = ff_flac_parse_streaminfo(avctx, &s->flac_stream_info, streaminfo);
+ ret = ff_flac_parse_streaminfo(avctx, &s->stream_info, streaminfo);
if (ret < 0)
return ret;
ret = allocate_buffers(s);
@@ -117,7 +120,7 @@ static av_cold int flac_decode_init(AVCodecContext *avctx)
return ret;
flac_set_bps(s);
ff_flacdsp_init(&s->dsp, avctx->sample_fmt,
- s->flac_stream_info.channels, s->flac_stream_info.bps);
+ s->stream_info.channels);
s->got_streaminfo = 1;
return 0;
@@ -137,10 +140,10 @@ static int allocate_buffers(FLACContext *s)
int buf_size;
int ret;
- av_assert0(s->flac_stream_info.max_blocksize);
+ av_assert0(s->stream_info.max_blocksize);
- buf_size = av_samples_get_buffer_size(NULL, s->flac_stream_info.channels,
- s->flac_stream_info.max_blocksize,
+ buf_size = av_samples_get_buffer_size(NULL, s->stream_info.channels,
+ s->stream_info.max_blocksize,
AV_SAMPLE_FMT_S32P, 0);
if (buf_size < 0)
return buf_size;
@@ -151,9 +154,27 @@ static int allocate_buffers(FLACContext *s)
ret = av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
s->decoded_buffer,
- s->flac_stream_info.channels,
- s->flac_stream_info.max_blocksize,
+ s->stream_info.channels,
+ s->stream_info.max_blocksize,
AV_SAMPLE_FMT_S32P, 0);
+ if (ret >= 0 && s->stream_info.bps == 32 && s->stream_info.channels == 2) {
+ buf_size = av_samples_get_buffer_size(NULL, 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+ if (buf_size < 0)
+ return buf_size;
+
+ av_fast_malloc(&s->decoded_buffer_33bps, &s->decoded_buffer_size_33bps, buf_size);
+ if (!s->decoded_buffer_33bps)
+ return AVERROR(ENOMEM);
+
+ ret = av_samples_fill_arrays((uint8_t **)&s->decoded_33bps, NULL,
+ s->decoded_buffer_33bps,
+ 1,
+ s->stream_info.max_blocksize,
+ AV_SAMPLE_FMT_S64P, 0);
+
+ }
return ret < 0 ? ret : 0;
}
@@ -177,7 +198,7 @@ static int parse_streaminfo(FLACContext *s, const uint8_t *buf, int buf_size)
metadata_size != FLAC_STREAMINFO_SIZE) {
return AVERROR_INVALIDDATA;
}
- ret = ff_flac_parse_streaminfo(s->avctx, &s->flac_stream_info, &buf[8]);
+ ret = ff_flac_parse_streaminfo(s->avctx, &s->stream_info, &buf[8]);
if (ret < 0)
return ret;
ret = allocate_buffers(s);
@@ -185,7 +206,7 @@ static int parse_streaminfo(FLACContext *s, const uint8_t *buf, int buf_size)
return ret;
flac_set_bps(s);
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt,
- s->flac_stream_info.channels, s->flac_stream_info.bps);
+ s->stream_info.channels);
s->got_streaminfo = 1;
return 0;
@@ -260,7 +281,7 @@ static int decode_residuals(FLACContext *s, int32_t *decoded, int pred_order)
for (; i < samples; i++)
*decoded++ = get_sbits_long(&gb, tmp);
} else {
- int real_limit = tmp ? (INT_MAX >> tmp) + 2 : INT_MAX;
+ int real_limit = (tmp > 1) ? (INT_MAX >> (tmp - 1)) + 2 : INT_MAX;
for (; i < samples; i++) {
int v = get_sr_golomb_flac(&gb, tmp, real_limit, 1);
if (v == 0x80000000){
@@ -331,6 +352,62 @@ static int decode_subframe_fixed(FLACContext *s, int32_t *decoded,
return 0;
}
+#define DECODER_SUBFRAME_FIXED_WIDE(residual) { \
+ const int blocksize = s->blocksize; \
+ int ret; \
+ \
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0) \
+ return ret; \
+ \
+ switch (pred_order) { \
+ case 0: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = residual[i]; \
+ break; \
+ case 1: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + (int64_t)decoded[i-1];\
+ break; \
+ case 2: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 2*(int64_t)decoded[i-1] - (int64_t)decoded[i-2]; \
+ break; \
+ case 3: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 3*(int64_t)decoded[i-1] - 3*(int64_t)decoded[i-2] + (int64_t)decoded[i-3]; \
+ break; \
+ case 4: \
+ for (int i = pred_order; i < blocksize; i++) \
+ decoded[i] = (int64_t)residual[i] + 4*(int64_t)decoded[i-1] - 6*(int64_t)decoded[i-2] + 4*(int64_t)decoded[i-3] - (int64_t)decoded[i-4]; \
+ break; \
+ default: \
+ av_log(s->avctx, AV_LOG_ERROR, "illegal pred order %d\n", pred_order); \
+ return AVERROR_INVALIDDATA; \
+ } \
+ return 0; \
+}
+
+static int decode_subframe_fixed_wide(FLACContext *s, int32_t *decoded,
+ int pred_order, int bps)
+{
+ /* warm up samples */
+ for (int i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ }
+ DECODER_SUBFRAME_FIXED_WIDE(decoded);
+}
+
+
+static int decode_subframe_fixed_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ /* warm up samples */ \
+ for (int i = 0; i < pred_order; i++) { \
+ decoded[i] = get_sbits64(&s->gb, 33); \
+ } \
+ DECODER_SUBFRAME_FIXED_WIDE(residual);
+}
+
static void lpc_analyze_remodulate(SUINT32 *decoded, const int coeffs[32],
int order, int qlevel, int len, int bps)
{
@@ -389,25 +466,66 @@ static int decode_subframe_lpc(FLACContext *s, int32_t *decoded, int pred_order,
if ((ret = decode_residuals(s, decoded, pred_order)) < 0)
return ret;
- if ( ( s->buggy_lpc && s->flac_stream_info.bps <= 16)
+ if ( ( s->buggy_lpc && s->stream_info.bps <= 16)
|| ( !s->buggy_lpc && bps <= 16
&& bps + coeff_prec + av_log2(pred_order) <= 32)) {
s->dsp.lpc16(decoded, coeffs, pred_order, qlevel, s->blocksize);
} else {
s->dsp.lpc32(decoded, coeffs, pred_order, qlevel, s->blocksize);
- if (s->flac_stream_info.bps <= 16)
+ if (s->stream_info.bps <= 16)
lpc_analyze_remodulate(decoded, coeffs, pred_order, qlevel, s->blocksize, bps);
}
return 0;
}
+static int decode_subframe_lpc_33bps(FLACContext *s, int64_t *decoded,
+ int32_t *residual, int pred_order)
+{
+ int i, j, ret;
+ int coeff_prec, qlevel;
+ int coeffs[32];
+
+ /* warm up samples */
+ for (i = 0; i < pred_order; i++) {
+ decoded[i] = get_sbits64(&s->gb, 33);
+ }
+
+ coeff_prec = get_bits(&s->gb, 4) + 1;
+ if (coeff_prec == 16) {
+ av_log(s->avctx, AV_LOG_ERROR, "invalid coeff precision\n");
+ return AVERROR_INVALIDDATA;
+ }
+ qlevel = get_sbits(&s->gb, 5);
+ if (qlevel < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "qlevel %d not supported, maybe buggy stream\n",
+ qlevel);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < pred_order; i++) {
+ coeffs[pred_order - i - 1] = get_sbits(&s->gb, coeff_prec);
+ }
+
+ if ((ret = decode_residuals(s, residual, pred_order)) < 0)
+ return ret;
+
+ for (i = pred_order; i < s->blocksize; i++, decoded++) {
+ int64_t sum = 0;
+ for (j = 0; j < pred_order; j++)
+ sum += (int64_t)coeffs[j] * decoded[j];
+ decoded[j] = residual[i] + (sum >> qlevel);
+ }
+
+ return 0;
+}
+
static inline int decode_subframe(FLACContext *s, int channel)
{
int32_t *decoded = s->decoded[channel];
int type, wasted = 0;
- int bps = s->flac_stream_info.bps;
- int i, tmp, ret;
+ int bps = s->stream_info.bps;
+ int i, ret;
if (channel == 0) {
if (s->ch_mode == FLAC_CHMODE_RIGHT_SIDE)
@@ -427,7 +545,7 @@ static inline int decode_subframe(FLACContext *s, int channel)
int left = get_bits_left(&s->gb);
if ( left <= 0 ||
(left < bps && !show_bits_long(&s->gb, left)) ||
- !show_bits_long(&s->gb, bps)) {
+ !show_bits_long(&s->gb, bps-1)) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid number of wasted bits > available bits (%d) - left=%d\n",
bps, left);
@@ -436,34 +554,63 @@ static inline int decode_subframe(FLACContext *s, int channel)
wasted = 1 + get_unary(&s->gb, 1, get_bits_left(&s->gb));
bps -= wasted;
}
- if (bps > 32) {
- avpriv_report_missing_feature(s->avctx, "Decorrelated bit depth > 32");
- return AVERROR_PATCHWELCOME;
- }
//FIXME use av_log2 for types
if (type == 0) {
- tmp = get_sbits_long(&s->gb, bps);
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = tmp;
+ if (bps < 33) {
+ int32_t tmp = get_sbits_long(&s->gb, bps);
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = tmp;
+ } else {
+ int64_t tmp = get_sbits64(&s->gb, 33);
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = tmp;
+ }
} else if (type == 1) {
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = get_sbits_long(&s->gb, bps);
+ if (bps < 33) {
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = get_sbits_long(&s->gb, bps);
+ } else {
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = get_sbits64(&s->gb, 33);
+ }
} else if ((type >= 8) && (type <= 12)) {
- if ((ret = decode_subframe_fixed(s, decoded, type & ~0x8, bps)) < 0)
- return ret;
+ int order = type & ~0x8;
+ if (bps < 33) {
+ if (bps + order <= 32) {
+ if ((ret = decode_subframe_fixed(s, decoded, order, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_fixed_wide(s, decoded, order, bps)) < 0)
+ return ret;
+ }
+ } else {
+ if ((ret = decode_subframe_fixed_33bps(s, s->decoded_33bps, decoded, order)) < 0)
+ return ret;
+ }
} else if (type >= 32) {
- if ((ret = decode_subframe_lpc(s, decoded, (type & ~0x20)+1, bps)) < 0)
- return ret;
+ if (bps < 33) {
+ if ((ret = decode_subframe_lpc(s, decoded, (type & ~0x20)+1, bps)) < 0)
+ return ret;
+ } else {
+ if ((ret = decode_subframe_lpc_33bps(s, s->decoded_33bps, decoded, (type & ~0x20)+1)) < 0)
+ return ret;
+ }
} else {
av_log(s->avctx, AV_LOG_ERROR, "invalid coding type\n");
return AVERROR_INVALIDDATA;
}
- if (wasted && wasted < 32) {
- int i;
- for (i = 0; i < s->blocksize; i++)
- decoded[i] = (unsigned)decoded[i] << wasted;
+ if (wasted) {
+ if (wasted+bps == 33) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ s->decoded_33bps[i] = (uint64_t)decoded[i] << wasted;
+ } else if (wasted < 32) {
+ int i;
+ for (i = 0; i < s->blocksize; i++)
+ decoded[i] = (unsigned)decoded[i] << wasted;
+ }
}
return 0;
@@ -480,69 +627,68 @@ static int decode_frame(FLACContext *s)
return ret;
}
- if ( s->flac_stream_info.channels
- && fi.channels != s->flac_stream_info.channels
+ if ( s->stream_info.channels
+ && fi.channels != s->stream_info.channels
&& s->got_streaminfo) {
- s->flac_stream_info.channels = s->avctx->channels = fi.channels;
- ff_flac_set_channel_layout(s->avctx);
+ s->stream_info.channels = fi.channels;
+ ff_flac_set_channel_layout(s->avctx, fi.channels);
ret = allocate_buffers(s);
if (ret < 0)
return ret;
}
- s->flac_stream_info.channels = s->avctx->channels = fi.channels;
- if (!s->avctx->channel_layout)
- ff_flac_set_channel_layout(s->avctx);
+ s->stream_info.channels = fi.channels;
+ ff_flac_set_channel_layout(s->avctx, fi.channels);
s->ch_mode = fi.ch_mode;
- if (!s->flac_stream_info.bps && !fi.bps) {
+ if (!s->stream_info.bps && !fi.bps) {
av_log(s->avctx, AV_LOG_ERROR, "bps not found in STREAMINFO or frame header\n");
return AVERROR_INVALIDDATA;
}
if (!fi.bps) {
- fi.bps = s->flac_stream_info.bps;
- } else if (s->flac_stream_info.bps && fi.bps != s->flac_stream_info.bps) {
+ fi.bps = s->stream_info.bps;
+ } else if (s->stream_info.bps && fi.bps != s->stream_info.bps) {
av_log(s->avctx, AV_LOG_ERROR, "switching bps mid-stream is not "
"supported\n");
return AVERROR_INVALIDDATA;
}
- if (!s->flac_stream_info.bps) {
- s->flac_stream_info.bps = s->avctx->bits_per_raw_sample = fi.bps;
+ if (!s->stream_info.bps) {
+ s->stream_info.bps = s->avctx->bits_per_raw_sample = fi.bps;
flac_set_bps(s);
}
- if (!s->flac_stream_info.max_blocksize)
- s->flac_stream_info.max_blocksize = FLAC_MAX_BLOCKSIZE;
- if (fi.blocksize > s->flac_stream_info.max_blocksize) {
+ if (!s->stream_info.max_blocksize)
+ s->stream_info.max_blocksize = FLAC_MAX_BLOCKSIZE;
+ if (fi.blocksize > s->stream_info.max_blocksize) {
av_log(s->avctx, AV_LOG_ERROR, "blocksize %d > %d\n", fi.blocksize,
- s->flac_stream_info.max_blocksize);
+ s->stream_info.max_blocksize);
return AVERROR_INVALIDDATA;
}
s->blocksize = fi.blocksize;
- if (!s->flac_stream_info.samplerate && !fi.samplerate) {
+ if (!s->stream_info.samplerate && !fi.samplerate) {
av_log(s->avctx, AV_LOG_ERROR, "sample rate not found in STREAMINFO"
" or frame header\n");
return AVERROR_INVALIDDATA;
}
if (fi.samplerate == 0)
- fi.samplerate = s->flac_stream_info.samplerate;
- s->flac_stream_info.samplerate = s->avctx->sample_rate = fi.samplerate;
+ fi.samplerate = s->stream_info.samplerate;
+ s->stream_info.samplerate = s->avctx->sample_rate = fi.samplerate;
if (!s->got_streaminfo) {
ret = allocate_buffers(s);
if (ret < 0)
return ret;
s->got_streaminfo = 1;
- dump_headers(s->avctx, &s->flac_stream_info);
+ dump_headers(s->avctx, &s->stream_info);
}
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt,
- s->flac_stream_info.channels, s->flac_stream_info.bps);
+ s->stream_info.channels);
-// dump_headers(s->avctx, &s->flac_stream_info);
+// dump_headers(s->avctx, &s->stream_info);
/* subframes */
- for (i = 0; i < s->flac_stream_info.channels; i++) {
+ for (i = 0; i < s->stream_info.channels; i++) {
if ((ret = decode_subframe(s, i)) < 0)
return ret;
}
@@ -555,11 +701,29 @@ static int decode_frame(FLACContext *s)
return 0;
}
-static int flac_decode_frame(AVCodecContext *avctx, void *data,
+static void decorrelate_33bps(int ch_mode, int32_t **decoded, int64_t *decoded_33bps, int len)
+{
+ int i;
+ if (ch_mode == FLAC_CHMODE_LEFT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[1][i] = decoded[0][i] - decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_RIGHT_SIDE ) {
+ for (i = 0; i < len; i++)
+ decoded[0][i] = decoded[1][i] + decoded_33bps[i];
+ } else if (ch_mode == FLAC_CHMODE_MID_SIDE ) {
+ for (i = 0; i < len; i++) {
+ uint64_t a = decoded[0][i];
+ int64_t b = decoded_33bps[i];
+ a -= b >> 1;
+ decoded[0][i] = (a + b);
+ decoded[1][i] = a;
+ }
+ }
+}
+
+static int flac_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt)
{
- AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data;
@@ -568,12 +732,6 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
*got_frame_ptr = 0;
- if (s->flac_stream_info.max_framesize == 0) {
- s->flac_stream_info.max_framesize =
- ff_flac_get_max_frame_size(s->flac_stream_info.max_blocksize ? s->flac_stream_info.max_blocksize : FLAC_MAX_BLOCKSIZE,
- FLAC_MAX_CHANNELS, 32);
- }
-
if (buf_size > 5 && !memcmp(buf, "\177FLAC", 5)) {
av_log(s->avctx, AV_LOG_DEBUG, "skipping flac header packet 1\n");
return buf_size;
@@ -586,7 +744,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* check that there is at least the smallest decodable amount of data.
this amount corresponds to the smallest valid FLAC frame possible.
- FF F8 69 02 00 00 9A 00 00 34 46 */
+ FF F8 69 02 00 00 9A 00 00 34 */
if (buf_size < FLAC_MIN_FRAME_SIZE)
return buf_size;
@@ -618,12 +776,18 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
- s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
- s->flac_stream_info.channels,
- s->blocksize, s->sample_shift);
+ if (s->stream_info.bps == 32 && s->ch_mode > 0) {
+ decorrelate_33bps(s->ch_mode, s->decoded, s->decoded_33bps, s->blocksize);
+ s->dsp.decorrelate[0](frame->data, s->decoded, s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ } else {
+ s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
+ s->stream_info.channels,
+ s->blocksize, s->sample_shift);
+ }
if (bytes_read > buf_size) {
av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
@@ -639,24 +803,12 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
return bytes_read;
}
-#if HAVE_THREADS
-static int init_thread_copy(AVCodecContext *avctx)
-{
- FLACContext *s = avctx->priv_data;
- s->decoded_buffer = NULL;
- s->decoded_buffer_size = 0;
- s->avctx = avctx;
- if (s->flac_stream_info.max_blocksize)
- return allocate_buffers(s);
- return 0;
-}
-#endif
-
static av_cold int flac_decode_close(AVCodecContext *avctx)
{
FLACContext *s = avctx->priv_data;
av_freep(&s->decoded_buffer);
+ av_freep(&s->decoded_buffer_33bps);
return 0;
}
@@ -667,27 +819,28 @@ static const AVOption options[] = {
};
static const AVClass flac_decoder_class = {
- "FLAC decoder",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
+ .class_name = "FLAC decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
};
-AVCodec ff_flac_decoder = {
- .name = "flac",
- .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_FLAC,
+const FFCodec ff_flac_decoder = {
+ .p.name = "flac",
+ CODEC_LONG_NAME("FLAC (Free Lossless Audio Codec)"),
+ .p.type = AVMEDIA_TYPE_AUDIO,
+ .p.id = AV_CODEC_ID_FLAC,
.priv_data_size = sizeof(FLACContext),
.init = flac_decode_init,
.close = flac_decode_close,
- .decode = flac_decode_frame,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
- .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
+ FF_CODEC_DECODE_CB(flac_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
+ AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_FRAME_THREADS,
+ .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },
- .priv_class = &flac_decoder_class,
+ .p.priv_class = &flac_decoder_class,
};
diff --git a/media/ffvpx/libavcodec/flacdsp.c b/media/ffvpx/libavcodec/flacdsp.c
index bc9a5dbed9..42e231db53 100644
--- a/media/ffvpx/libavcodec/flacdsp.c
+++ b/media/ffvpx/libavcodec/flacdsp.c
@@ -19,6 +19,7 @@
*/
#include "libavutil/attributes.h"
+#include "libavutil/internal.h"
#include "libavutil/samplefmt.h"
#include "flacdsp.h"
#include "config.h"
@@ -26,7 +27,6 @@
#define SAMPLE_SIZE 16
#define PLANAR 0
#include "flacdsp_template.c"
-#include "flacdsp_lpc_template.c"
#undef PLANAR
#define PLANAR 1
@@ -37,7 +37,6 @@
#define SAMPLE_SIZE 32
#define PLANAR 0
#include "flacdsp_template.c"
-#include "flacdsp_lpc_template.c"
#undef PLANAR
#define PLANAR 1
@@ -85,13 +84,10 @@ static void flac_lpc_32_c(int32_t *decoded, const int coeffs[32],
}
-av_cold void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels,
- int bps)
+av_cold void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels)
{
c->lpc16 = flac_lpc_16_c;
c->lpc32 = flac_lpc_32_c;
- c->lpc16_encode = flac_lpc_encode_c_16;
- c->lpc32_encode = flac_lpc_encode_c_32;
switch (fmt) {
case AV_SAMPLE_FMT_S32:
@@ -123,8 +119,9 @@ av_cold void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int cha
break;
}
- if (ARCH_ARM)
- ff_flacdsp_init_arm(c, fmt, channels, bps);
- if (ARCH_X86)
- ff_flacdsp_init_x86(c, fmt, channels, bps);
+#if ARCH_ARM
+ ff_flacdsp_init_arm(c, fmt, channels);
+#elif ARCH_X86
+ ff_flacdsp_init_x86(c, fmt, channels);
+#endif
}
diff --git a/media/ffvpx/libavcodec/flacdsp.h b/media/ffvpx/libavcodec/flacdsp.h
index 7bb0dd0e9a..9f8ed38b66 100644
--- a/media/ffvpx/libavcodec/flacdsp.h
+++ b/media/ffvpx/libavcodec/flacdsp.h
@@ -20,7 +20,7 @@
#define AVCODEC_FLACDSP_H
#include <stdint.h>
-#include "libavutil/internal.h"
+
#include "libavutil/samplefmt.h"
typedef struct FLACDSPContext {
@@ -36,8 +36,8 @@ typedef struct FLACDSPContext {
const int32_t coefs[32], int shift);
} FLACDSPContext;
-void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels, int bps);
-void ff_flacdsp_init_arm(FLACDSPContext *c, enum AVSampleFormat fmt, int channels, int bps);
-void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels, int bps);
+void ff_flacdsp_init(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
+void ff_flacdsp_init_arm(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
+void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels);
#endif /* AVCODEC_FLACDSP_H */
diff --git a/media/ffvpx/libavcodec/flacdsp_lpc_template.c b/media/ffvpx/libavcodec/flacdsp_lpc_template.c
index 5d532e0673..dd847d3b32 100644
--- a/media/ffvpx/libavcodec/flacdsp_lpc_template.c
+++ b/media/ffvpx/libavcodec/flacdsp_lpc_template.c
@@ -17,7 +17,7 @@
*/
#include <stdint.h>
-#include "libavutil/avutil.h"
+#include "libavutil/common.h"
#include "mathops.h"
#undef FUNC
diff --git a/media/ffvpx/libavcodec/flacdsp_template.c b/media/ffvpx/libavcodec/flacdsp_template.c
index 892418cddc..0a6fe59e28 100644
--- a/media/ffvpx/libavcodec/flacdsp_template.c
+++ b/media/ffvpx/libavcodec/flacdsp_template.c
@@ -19,7 +19,7 @@
*/
#include <stdint.h>
-#include "libavutil/avutil.h"
+#include "libavutil/macros.h"
#undef FUNC
#undef FSUF
diff --git a/media/ffvpx/libavcodec/frame_thread_encoder.h b/media/ffvpx/libavcodec/frame_thread_encoder.h
index fc85ba48b8..201cba2a8f 100644
--- a/media/ffvpx/libavcodec/frame_thread_encoder.h
+++ b/media/ffvpx/libavcodec/frame_thread_encoder.h
@@ -27,8 +27,9 @@
* Initialize frame thread encoder.
* @note hardware encoders are not supported
*/
-int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options);
+int ff_frame_thread_encoder_init(AVCodecContext *avctx);
void ff_frame_thread_encoder_free(AVCodecContext *avctx);
-int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet_ptr);
+int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ AVFrame *frame, int *got_packet_ptr);
#endif /* AVCODEC_FRAME_THREAD_ENCODER_H */
diff --git a/media/ffvpx/libavcodec/get_bits.h b/media/ffvpx/libavcodec/get_bits.h
index c4ab607744..65dc080ddb 100644
--- a/media/ffvpx/libavcodec/get_bits.h
+++ b/media/ffvpx/libavcodec/get_bits.h
@@ -1,6 +1,5 @@
/*
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
- * Copyright (c) 2016 Alexandra Hájková
*
* This file is part of FFmpeg.
*
@@ -31,9 +30,9 @@
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
-#include "libavutil/log.h"
#include "libavutil/avassert.h"
-#include "avcodec.h"
+
+#include "defs.h"
#include "mathops.h"
#include "vlc.h"
@@ -58,12 +57,55 @@
#define CACHED_BITSTREAM_READER 0
#endif
-typedef struct GetBitContext {
- const uint8_t *buffer, *buffer_end;
#if CACHED_BITSTREAM_READER
- uint64_t cache;
- unsigned bits_left;
+
+// we always want the LE implementation, to provide get_bits_le()
+#define BITSTREAM_LE
+
+#ifndef BITSTREAM_READER_LE
+# define BITSTREAM_BE
+# define BITSTREAM_DEFAULT_BE
#endif
+
+#include "bitstream.h"
+
+#undef BITSTREAM_LE
+#undef BITSTREAM_BE
+#undef BITSTREAM_DEFAULT_BE
+
+typedef BitstreamContext GetBitContext;
+
+#define get_bits_count bits_tell
+#define get_bits_left bits_left
+#define skip_bits_long bits_skip
+#define skip_bits bits_skip
+#define get_bits bits_read_nz
+#define get_bitsz bits_read
+#define get_bits_long bits_read
+#define get_bits1 bits_read_bit
+#define get_bits64 bits_read_64
+#define get_xbits bits_read_xbits
+#define get_sbits bits_read_signed_nz
+#define get_sbits_long bits_read_signed
+#define show_bits bits_peek
+#define show_bits_long bits_peek
+#define init_get_bits bits_init
+#define init_get_bits8 bits_init8
+#define align_get_bits bits_align
+#define get_vlc2 bits_read_vlc
+
+#define init_get_bits8_le(s, buffer, byte_size) bits_init8_le((BitstreamContextLE*)s, buffer, byte_size)
+#define get_bits_le(s, n) bits_read_le((BitstreamContextLE*)s, n)
+
+#define show_bits1(s) bits_peek(s, 1)
+#define skip_bits1(s) bits_skip(s, 1)
+
+#define skip_1stop_8data_bits bits_skip_1stop_8data
+
+#else // CACHED_BITSTREAM_READER
+
+typedef struct GetBitContext {
+ const uint8_t *buffer, *buffer_end;
int index;
int size_in_bits;
int size_in_bits_plus8;
@@ -120,16 +162,12 @@ static inline unsigned int show_bits(GetBitContext *s, int n);
* For examples see get_bits, show_bits, skip_bits, get_vlc.
*/
-#if CACHED_BITSTREAM_READER
-# define MIN_CACHE_BITS 64
-#elif defined LONG_BITSTREAM_READER
+#if defined LONG_BITSTREAM_READER
# define MIN_CACHE_BITS 32
#else
# define MIN_CACHE_BITS 25
#endif
-#if !CACHED_BITSTREAM_READER
-
#define OPEN_READER_NOSIZE(name, gb) \
unsigned int name ## _index = (gb)->index; \
unsigned int av_unused name ## _cache
@@ -214,73 +252,12 @@ static inline unsigned int show_bits(GetBitContext *s, int n);
#define GET_CACHE(name, gb) ((uint32_t) name ## _cache)
-#endif
static inline int get_bits_count(const GetBitContext *s)
{
-#if CACHED_BITSTREAM_READER
- return s->index - s->bits_left;
-#else
return s->index;
-#endif
-}
-
-#if CACHED_BITSTREAM_READER
-static inline void refill_32(GetBitContext *s, int is_le)
-{
-#if !UNCHECKED_BITSTREAM_READER
- if (s->index >> 3 >= s->buffer_end - s->buffer)
- return;
-#endif
-
- if (is_le)
- s->cache = (uint64_t)AV_RL32(s->buffer + (s->index >> 3)) << s->bits_left | s->cache;
- else
- s->cache = s->cache | (uint64_t)AV_RB32(s->buffer + (s->index >> 3)) << (32 - s->bits_left);
- s->index += 32;
- s->bits_left += 32;
-}
-
-static inline void refill_64(GetBitContext *s, int is_le)
-{
-#if !UNCHECKED_BITSTREAM_READER
- if (s->index >> 3 >= s->buffer_end - s->buffer)
- return;
-#endif
-
- if (is_le)
- s->cache = AV_RL64(s->buffer + (s->index >> 3));
- else
- s->cache = AV_RB64(s->buffer + (s->index >> 3));
- s->index += 64;
- s->bits_left = 64;
-}
-
-static inline uint64_t get_val(GetBitContext *s, unsigned n, int is_le)
-{
- uint64_t ret;
- av_assert2(n>0 && n<=63);
- if (is_le) {
- ret = s->cache & ((UINT64_C(1) << n) - 1);
- s->cache >>= n;
- } else {
- ret = s->cache >> (64 - n);
- s->cache <<= n;
- }
- s->bits_left -= n;
- return ret;
}
-static inline unsigned show_val(const GetBitContext *s, unsigned n)
-{
-#ifdef BITSTREAM_READER_LE
- return s->cache & ((UINT64_C(1) << n) - 1);
-#else
- return s->cache >> (64 - n);
-#endif
-}
-#endif
-
/**
* Skips the specified number of bits.
* @param n the number of bits to skip,
@@ -290,28 +267,12 @@ static inline unsigned show_val(const GetBitContext *s, unsigned n)
*/
static inline void skip_bits_long(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- skip_bits(s, n);
-#else
#if UNCHECKED_BITSTREAM_READER
s->index += n;
#else
s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index);
#endif
-#endif
-}
-
-#if CACHED_BITSTREAM_READER
-static inline void skip_remaining(GetBitContext *s, unsigned n)
-{
-#ifdef BITSTREAM_READER_LE
- s->cache >>= n;
-#else
- s->cache <<= n;
-#endif
- s->bits_left -= n;
}
-#endif
/**
* Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
@@ -320,13 +281,6 @@ static inline void skip_remaining(GetBitContext *s, unsigned n)
*/
static inline int get_xbits(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- int32_t cache = show_bits(s, 32);
- int sign = ~cache >> 31;
- skip_remaining(s, n);
-
- return ((((uint32_t)(sign ^ cache)) >> (32 - n)) ^ sign) - sign;
-#else
register int sign;
register int32_t cache;
OPEN_READER(re, s);
@@ -337,10 +291,8 @@ static inline int get_xbits(GetBitContext *s, int n)
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
-#endif
}
-#if !CACHED_BITSTREAM_READER
static inline int get_xbits_le(GetBitContext *s, int n)
{
register int sign;
@@ -354,22 +306,16 @@ static inline int get_xbits_le(GetBitContext *s, int n)
CLOSE_READER(re, s);
return (zero_extend(sign ^ cache, n) ^ sign) - sign;
}
-#endif
static inline int get_sbits(GetBitContext *s, int n)
{
register int tmp;
-#if CACHED_BITSTREAM_READER
- av_assert2(n>0 && n<=25);
- tmp = sign_extend(get_bits(s, n), n);
-#else
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_SBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
return tmp;
}
@@ -379,32 +325,12 @@ static inline int get_sbits(GetBitContext *s, int n)
static inline unsigned int get_bits(GetBitContext *s, int n)
{
register unsigned int tmp;
-#if CACHED_BITSTREAM_READER
-
- av_assert2(n>0 && n<=32);
- if (n > s->bits_left) {
-#ifdef BITSTREAM_READER_LE
- refill_32(s, 1);
-#else
- refill_32(s, 0);
-#endif
- if (s->bits_left < 32)
- s->bits_left = n;
- }
-
-#ifdef BITSTREAM_READER_LE
- tmp = get_val(s, n, 1);
-#else
- tmp = get_val(s, n, 0);
-#endif
-#else
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
av_assert2(tmp < UINT64_C(1) << n);
return tmp;
}
@@ -419,16 +345,6 @@ static av_always_inline int get_bitsz(GetBitContext *s, int n)
static inline unsigned int get_bits_le(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- av_assert2(n>0 && n<=32);
- if (n > s->bits_left) {
- refill_32(s, 1);
- if (s->bits_left < 32)
- s->bits_left = n;
- }
-
- return get_val(s, n, 1);
-#else
register int tmp;
OPEN_READER(re, s);
av_assert2(n>0 && n<=25);
@@ -437,7 +353,6 @@ static inline unsigned int get_bits_le(GetBitContext *s, int n)
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return tmp;
-#endif
}
/**
@@ -446,71 +361,22 @@ static inline unsigned int get_bits_le(GetBitContext *s, int n)
static inline unsigned int show_bits(GetBitContext *s, int n)
{
register unsigned int tmp;
-#if CACHED_BITSTREAM_READER
- if (n > s->bits_left)
-#ifdef BITSTREAM_READER_LE
- refill_32(s, 1);
-#else
- refill_32(s, 0);
-#endif
-
- tmp = show_val(s, n);
-#else
OPEN_READER_NOSIZE(re, s);
av_assert2(n>0 && n<=25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
-#endif
return tmp;
}
static inline void skip_bits(GetBitContext *s, int n)
{
-#if CACHED_BITSTREAM_READER
- if (n < s->bits_left)
- skip_remaining(s, n);
- else {
- n -= s->bits_left;
- s->cache = 0;
- s->bits_left = 0;
-
- if (n >= 64) {
- unsigned skip = (n / 8) * 8;
-
- n -= skip;
- s->index += skip;
- }
-#ifdef BITSTREAM_READER_LE
- refill_64(s, 1);
-#else
- refill_64(s, 0);
-#endif
- if (n)
- skip_remaining(s, n);
- }
-#else
OPEN_READER(re, s);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
-#endif
}
static inline unsigned int get_bits1(GetBitContext *s)
{
-#if CACHED_BITSTREAM_READER
- if (!s->bits_left)
-#ifdef BITSTREAM_READER_LE
- refill_64(s, 1);
-#else
- refill_64(s, 0);
-#endif
-
-#ifdef BITSTREAM_READER_LE
- return get_val(s, 1, 1);
-#else
- return get_val(s, 1, 0);
-#endif
-#else
unsigned int index = s->index;
uint8_t result = s->buffer[index >> 3];
#ifdef BITSTREAM_READER_LE
@@ -527,7 +393,6 @@ static inline unsigned int get_bits1(GetBitContext *s)
s->index = index;
return result;
-#endif
}
static inline unsigned int show_bits1(GetBitContext *s)
@@ -548,10 +413,6 @@ static inline unsigned int get_bits_long(GetBitContext *s, int n)
av_assert2(n>=0 && n<=32);
if (!n) {
return 0;
-#if CACHED_BITSTREAM_READER
- }
- return get_bits(s, n);
-#else
} else if (n <= MIN_CACHE_BITS) {
return get_bits(s, n);
} else {
@@ -563,7 +424,6 @@ static inline unsigned int get_bits_long(GetBitContext *s, int n)
return ret | get_bits(s, n - 16);
#endif
}
-#endif
}
/**
@@ -597,6 +457,18 @@ static inline int get_sbits_long(GetBitContext *s, int n)
}
/**
+ * Read 0-64 bits as a signed integer.
+ */
+static inline int64_t get_sbits64(GetBitContext *s, int n)
+{
+ // sign_extend(x, 0) is undefined
+ if (!n)
+ return 0;
+
+ return sign_extend64(get_bits64(s, n), n);
+}
+
+/**
* Show 0-32 bits.
*/
static inline unsigned int show_bits_long(GetBitContext *s, int n)
@@ -609,18 +481,17 @@ static inline unsigned int show_bits_long(GetBitContext *s, int n)
}
}
-static inline int check_marker(void *logctx, GetBitContext *s, const char *msg)
-{
- int bit = get_bits1(s);
- if (!bit)
- av_log(logctx, AV_LOG_INFO, "Marker bit missing at %d of %d %s\n",
- get_bits_count(s) - 1, s->size_in_bits, msg);
-
- return bit;
-}
-static inline int init_get_bits_xe(GetBitContext *s, const uint8_t *buffer,
- int bit_size, int is_le)
+/**
+ * Initialize GetBitContext.
+ * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
+ * larger than the actual read bits because some optimized bitstream
+ * readers read 32 or 64 bit at once and could read over the end
+ * @param bit_size the size of the buffer in bits
+ * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
+ */
+static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
+ int bit_size)
{
int buffer_size;
int ret = 0;
@@ -639,12 +510,6 @@ static inline int init_get_bits_xe(GetBitContext *s, const uint8_t *buffer,
s->buffer_end = buffer + buffer_size;
s->index = 0;
-#if CACHED_BITSTREAM_READER
- s->cache = 0;
- s->bits_left = 0;
- refill_64(s, is_le);
-#endif
-
return ret;
}
@@ -653,24 +518,6 @@ static inline int init_get_bits_xe(GetBitContext *s, const uint8_t *buffer,
* @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
* larger than the actual read bits because some optimized bitstream
* readers read 32 or 64 bit at once and could read over the end
- * @param bit_size the size of the buffer in bits
- * @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
- */
-static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
- int bit_size)
-{
-#ifdef BITSTREAM_READER_LE
- return init_get_bits_xe(s, buffer, bit_size, 1);
-#else
- return init_get_bits_xe(s, buffer, bit_size, 0);
-#endif
-}
-
-/**
- * Initialize GetBitContext.
- * @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
- * larger than the actual read bits because some optimized bitstream
- * readers read 32 or 64 bit at once and could read over the end
* @param byte_size the size of the buffer in bytes
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
*/
@@ -687,7 +534,7 @@ static inline int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer,
{
if (byte_size > INT_MAX / 8 || byte_size < 0)
byte_size = -1;
- return init_get_bits_xe(s, buffer, byte_size * 8, 1);
+ return init_get_bits(s, buffer, byte_size * 8);
}
static inline const uint8_t *align_get_bits(GetBitContext *s)
@@ -709,8 +556,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
unsigned int index; \
\
index = SHOW_UBITS(name, gb, bits); \
- code = table[index][0]; \
- n = table[index][1]; \
+ code = table[index].sym; \
+ n = table[index].len; \
\
if (max_depth > 1 && n < 0) { \
LAST_SKIP_BITS(name, gb, bits); \
@@ -719,8 +566,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
- code = table[index][0]; \
- n = table[index][1]; \
+ code = table[index].sym; \
+ n = table[index].len; \
if (max_depth > 2 && n < 0) { \
LAST_SKIP_BITS(name, gb, nb_bits); \
UPDATE_CACHE(name, gb); \
@@ -728,8 +575,8 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
- code = table[index][0]; \
- n = table[index][1]; \
+ code = table[index].sym; \
+ n = table[index].len; \
} \
} \
SKIP_BITS(name, gb, n); \
@@ -772,19 +619,6 @@ static inline const uint8_t *align_get_bits(GetBitContext *s)
SKIP_BITS(name, gb, n); \
} while (0)
-/* Return the LUT element for the given bitstream configuration. */
-static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
- VLC_TYPE (*table)[2])
-{
- unsigned idx;
-
- *nb_bits = -*n;
- idx = show_bits(s, *nb_bits) + code;
- *n = table[idx][1];
-
- return table[idx][0];
-}
-
/**
* Parse a vlc code.
* @param bits is the number of bits which will be read at once, must be
@@ -794,27 +628,9 @@ static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
* = (max_vlc_length + bits - 1) / bits
* @returns the code parsed or -1 if no vlc matches
*/
-static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
+static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table,
int bits, int max_depth)
{
-#if CACHED_BITSTREAM_READER
- int nb_bits;
- unsigned idx = show_bits(s, bits);
- int code = table[idx][0];
- int n = table[idx][1];
-
- if (max_depth > 1 && n < 0) {
- skip_remaining(s, bits);
- code = set_idx(s, code, &n, &nb_bits, table);
- if (max_depth > 2 && n < 0) {
- skip_remaining(s, nb_bits);
- code = set_idx(s, code, &n, &nb_bits, table);
- }
- }
- skip_remaining(s, n);
-
- return code;
-#else
int code;
OPEN_READER(re, s);
@@ -825,7 +641,6 @@ static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
CLOSE_READER(re, s);
return code;
-#endif
}
static inline int decode012(GetBitContext *gb)
@@ -865,4 +680,6 @@ static inline int skip_1stop_8data_bits(GetBitContext *gb)
return 0;
}
+#endif // CACHED_BITSTREAM_READER
+
#endif /* AVCODEC_GET_BITS_H */
diff --git a/media/ffvpx/libavcodec/get_buffer.c b/media/ffvpx/libavcodec/get_buffer.c
new file mode 100644
index 0000000000..a04fd878de
--- /dev/null
+++ b/media/ffvpx/libavcodec/get_buffer.c
@@ -0,0 +1,304 @@
+/*
+ * The default get_buffer2() implementation
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mem.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/version.h"
+
+#include "avcodec.h"
+#include "internal.h"
+
+typedef struct FramePool {
+ /**
+ * Pools for each data plane. For audio all the planes have the same size,
+ * so only pools[0] is used.
+ */
+ AVBufferPool *pools[4];
+
+ /*
+ * Pool parameters
+ */
+ int format;
+ int width, height;
+ int stride_align[AV_NUM_DATA_POINTERS];
+ int linesize[4];
+ int planes;
+ int channels;
+ int samples;
+} FramePool;
+
+static void frame_pool_free(void *opaque, uint8_t *data)
+{
+ FramePool *pool = (FramePool*)data;
+ int i;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
+ av_buffer_pool_uninit(&pool->pools[i]);
+
+ av_freep(&data);
+}
+
+static AVBufferRef *frame_pool_alloc(void)
+{
+ FramePool *pool = av_mallocz(sizeof(*pool));
+ AVBufferRef *buf;
+
+ if (!pool)
+ return NULL;
+
+ buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
+ frame_pool_free, NULL, 0);
+ if (!buf) {
+ av_freep(&pool);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
+{
+ FramePool *pool = avctx->internal->pool ?
+ (FramePool*)avctx->internal->pool->data : NULL;
+ AVBufferRef *pool_buf;
+ int i, ret, ch, planes;
+
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ int planar = av_sample_fmt_is_planar(frame->format);
+ ch = frame->ch_layout.nb_channels;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!ch)
+ ch = frame->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ planes = planar ? ch : 1;
+ }
+
+ if (pool && pool->format == frame->format) {
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
+ pool->width == frame->width && pool->height == frame->height)
+ return 0;
+ if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
+ pool->channels == ch && frame->nb_samples == pool->samples)
+ return 0;
+ }
+
+ pool_buf = frame_pool_alloc();
+ if (!pool_buf)
+ return AVERROR(ENOMEM);
+ pool = (FramePool*)pool_buf->data;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: {
+ int linesize[4];
+ int w = frame->width;
+ int h = frame->height;
+ int unaligned;
+ ptrdiff_t linesize1[4];
+ size_t size[4];
+
+ avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
+
+ do {
+ // NOTE: do not align linesizes individually, this breaks e.g. assumptions
+ // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
+ ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
+ if (ret < 0)
+ goto fail;
+ // increase alignment of w for next try (rhs gives the lowest bit set in w)
+ w += w & ~(w - 1);
+
+ unaligned = 0;
+ for (i = 0; i < 4; i++)
+ unaligned |= linesize[i] % pool->stride_align[i];
+ } while (unaligned);
+
+ for (i = 0; i < 4; i++)
+ linesize1[i] = linesize[i];
+ ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
+ if (ret < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ pool->linesize[i] = linesize[i];
+ if (size[i]) {
+ if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
+ CONFIG_MEMORY_POISONING ?
+ NULL :
+ av_buffer_allocz);
+ if (!pool->pools[i]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ }
+ pool->format = frame->format;
+ pool->width = frame->width;
+ pool->height = frame->height;
+
+ break;
+ }
+ case AVMEDIA_TYPE_AUDIO: {
+ ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
+ frame->nb_samples, frame->format, 0);
+ if (ret < 0)
+ goto fail;
+
+ pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
+ if (!pool->pools[0]) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ pool->format = frame->format;
+ pool->planes = planes;
+ pool->channels = ch;
+ pool->samples = frame->nb_samples;
+ break;
+ }
+ default: av_assert0(0);
+ }
+
+ av_buffer_unref(&avctx->internal->pool);
+ avctx->internal->pool = pool_buf;
+
+ return 0;
+fail:
+ av_buffer_unref(&pool_buf);
+ return ret;
+}
+
+static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+{
+ FramePool *pool = (FramePool*)avctx->internal->pool->data;
+ int planes = pool->planes;
+ int i;
+
+ frame->linesize[0] = pool->linesize[0];
+
+ if (planes > AV_NUM_DATA_POINTERS) {
+ frame->extended_data = av_calloc(planes, sizeof(*frame->extended_data));
+ frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
+ frame->extended_buf = av_calloc(frame->nb_extended_buf,
+ sizeof(*frame->extended_buf));
+ if (!frame->extended_data || !frame->extended_buf) {
+ av_freep(&frame->extended_data);
+ av_freep(&frame->extended_buf);
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ frame->extended_data = frame->data;
+ av_assert0(frame->nb_extended_buf == 0);
+ }
+
+ for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
+ frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->buf[i])
+ goto fail;
+ frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
+ }
+ for (i = 0; i < frame->nb_extended_buf; i++) {
+ frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
+ if (!frame->extended_buf[i])
+ goto fail;
+ frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
+ }
+
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
+
+ return 0;
+fail:
+ av_frame_unref(frame);
+ return AVERROR(ENOMEM);
+}
+
+static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+{
+ FramePool *pool = (FramePool*)s->internal->pool->data;
+ int i;
+
+ if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
+ av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
+ return -1;
+ }
+
+ memset(pic->data, 0, sizeof(pic->data));
+ pic->extended_data = pic->data;
+
+ for (i = 0; i < 4 && pool->pools[i]; i++) {
+ pic->linesize[i] = pool->linesize[i];
+
+ pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
+ if (!pic->buf[i])
+ goto fail;
+
+ pic->data[i] = pic->buf[i]->data;
+ }
+ for (; i < AV_NUM_DATA_POINTERS; i++) {
+ pic->data[i] = NULL;
+ pic->linesize[i] = 0;
+ }
+
+ if (s->debug & FF_DEBUG_BUFFERS)
+ av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
+
+ return 0;
+fail:
+ av_frame_unref(pic);
+ return AVERROR(ENOMEM);
+}
+
+int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
+{
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
+ frame->width = avctx->coded_width;
+ frame->height = avctx->coded_height;
+ return ret;
+ }
+
+ if ((ret = update_frame_pool(avctx, frame)) < 0)
+ return ret;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ return video_get_buffer(avctx, frame);
+ case AVMEDIA_TYPE_AUDIO:
+ return audio_get_buffer(avctx, frame);
+ default:
+ return -1;
+ }
+}
diff --git a/media/ffvpx/libavcodec/golomb.c b/media/ffvpx/libavcodec/golomb.c
index 937ac22ce1..f9ca8149eb 100644
--- a/media/ffvpx/libavcodec/golomb.c
+++ b/media/ffvpx/libavcodec/golomb.c
@@ -26,7 +26,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
-#include "libavutil/common.h"
+#include <stdint.h>
const uint8_t ff_golomb_vlc_len[512]={
19,17,15,15,13,13,13,13,11,11,11,11,11,11,11,11,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
diff --git a/media/ffvpx/libavcodec/golomb.h b/media/ffvpx/libavcodec/golomb.h
index 5cdfa0945d..164c2583b6 100644
--- a/media/ffvpx/libavcodec/golomb.h
+++ b/media/ffvpx/libavcodec/golomb.h
@@ -33,14 +33,12 @@
#include <stdint.h>
#include "get_bits.h"
-#include "put_bits.h"
#define INVALID_VLC 0x80000000
extern const uint8_t ff_golomb_vlc_len[512];
extern const uint8_t ff_ue_golomb_vlc_code[512];
extern const int8_t ff_se_golomb_vlc_code[512];
-extern const uint8_t ff_ue_golomb_len[256];
extern const uint8_t ff_interleaved_golomb_vlc_len[256];
extern const uint8_t ff_interleaved_ue_golomb_vlc_code[256];
@@ -66,9 +64,12 @@ static inline int get_ue_golomb(GetBitContext *gb)
return ff_ue_golomb_vlc_code[buf];
} else {
int log = 2 * av_log2(buf) - 31;
+
+ skip_bits_long(gb, 32 - log);
+ if (log < 7)
+ return AVERROR_INVALIDDATA;
buf >>= log;
buf--;
- skip_bits_long(gb, 32 - log);
return buf;
}
@@ -87,10 +88,8 @@ static inline int get_ue_golomb(GetBitContext *gb)
int log = 2 * av_log2(buf) - 31;
LAST_SKIP_BITS(re, gb, 32 - log);
CLOSE_READER(re, gb);
- if (log < 7) {
- av_log(NULL, AV_LOG_ERROR, "Invalid UE golomb code\n");
+ if (log < 7)
return AVERROR_INVALIDDATA;
- }
buf >>= log;
buf--;
@@ -115,7 +114,8 @@ static inline unsigned get_ue_golomb_long(GetBitContext *gb)
/**
* read unsigned exp golomb code, constraint to a max of 31.
- * the return value is undefined if the stored value exceeds 31.
+ * If the value encountered is not in 0..31, the return value
+ * is outside the range 0..30.
*/
static inline int get_ue_golomb_31(GetBitContext *gb)
{
@@ -313,7 +313,7 @@ static inline int get_interleaved_se_golomb(GetBitContext *gb)
} else {
int log;
skip_bits(gb, 8);
- buf |= 1 | show_bits_long(gb, 24);
+ buf |= 1 | show_bits(gb, 24);
if ((buf & 0xAAAAAAAA) == 0)
return INVALID_VLC;
@@ -613,135 +613,4 @@ static inline int get_te(GetBitContext *s, int r, char *file, const char *func,
#define get_te0_golomb(a, r) get_te(a, r, __FILE__, __func__, __LINE__)
#endif /* TRACE */
-
-/**
- * write unsigned exp golomb code. 2^16 - 2 at most
- */
-static inline void set_ue_golomb(PutBitContext *pb, int i)
-{
- av_assert2(i >= 0);
- av_assert2(i <= 0xFFFE);
-
- if (i < 256)
- put_bits(pb, ff_ue_golomb_len[i], i + 1);
- else {
- int e = av_log2(i + 1);
- put_bits(pb, 2 * e + 1, i + 1);
- }
-}
-
-/**
- * write unsigned exp golomb code. 2^32-2 at most.
- */
-static inline void set_ue_golomb_long(PutBitContext *pb, uint32_t i)
-{
- av_assert2(i <= (UINT32_MAX - 1));
-
- if (i < 256)
- put_bits(pb, ff_ue_golomb_len[i], i + 1);
- else {
- int e = av_log2(i + 1);
- put_bits64(pb, 2 * e + 1, i + 1);
- }
-}
-
-/**
- * write truncated unsigned exp golomb code.
- */
-static inline void set_te_golomb(PutBitContext *pb, int i, int range)
-{
- av_assert2(range >= 1);
- av_assert2(i <= range);
-
- if (range == 2)
- put_bits(pb, 1, i ^ 1);
- else
- set_ue_golomb(pb, i);
-}
-
-/**
- * write signed exp golomb code. 16 bits at most.
- */
-static inline void set_se_golomb(PutBitContext *pb, int i)
-{
- i = 2 * i - 1;
- if (i < 0)
- i ^= -1; //FIXME check if gcc does the right thing
- set_ue_golomb(pb, i);
-}
-
-/**
- * write unsigned golomb rice code (ffv1).
- */
-static inline void set_ur_golomb(PutBitContext *pb, int i, int k, int limit,
- int esc_len)
-{
- int e;
-
- av_assert2(i >= 0);
-
- e = i >> k;
- if (e < limit)
- put_bits(pb, e + k + 1, (1 << k) + av_mod_uintp2(i, k));
- else
- put_bits(pb, limit + esc_len, i - limit + 1);
-}
-
-/**
- * write unsigned golomb rice code (jpegls).
- */
-static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k,
- int limit, int esc_len)
-{
- int e;
-
- av_assert2(i >= 0);
-
- e = (i >> k) + 1;
- if (e < limit) {
- while (e > 31) {
- put_bits(pb, 31, 0);
- e -= 31;
- }
- put_bits(pb, e, 1);
- if (k)
- put_sbits(pb, k, i);
- } else {
- while (limit > 31) {
- put_bits(pb, 31, 0);
- limit -= 31;
- }
- put_bits(pb, limit, 1);
- put_bits(pb, esc_len, i - 1);
- }
-}
-
-/**
- * write signed golomb rice code (ffv1).
- */
-static inline void set_sr_golomb(PutBitContext *pb, int i, int k, int limit,
- int esc_len)
-{
- int v;
-
- v = -2 * i - 1;
- v ^= (v >> 31);
-
- set_ur_golomb(pb, v, k, limit, esc_len);
-}
-
-/**
- * write signed golomb rice code (flac).
- */
-static inline void set_sr_golomb_flac(PutBitContext *pb, int i, int k,
- int limit, int esc_len)
-{
- int v;
-
- v = -2 * i - 1;
- v ^= (v >> 31);
-
- set_ur_golomb_jpegls(pb, v, k, limit, esc_len);
-}
-
#endif /* AVCODEC_GOLOMB_H */
diff --git a/media/ffvpx/libavcodec/h264chroma.h b/media/ffvpx/libavcodec/h264chroma.h
index 5c89fd12df..b8f9c8f4fc 100644
--- a/media/ffvpx/libavcodec/h264chroma.h
+++ b/media/ffvpx/libavcodec/h264chroma.h
@@ -22,7 +22,7 @@
#include <stddef.h>
#include <stdint.h>
-typedef void (*h264_chroma_mc_func)(uint8_t *dst /*align 8*/, uint8_t *src /*align 1*/, ptrdiff_t srcStride, int h, int x, int y);
+typedef void (*h264_chroma_mc_func)(uint8_t *dst /*align 8*/, const uint8_t *src /*align 1*/, ptrdiff_t srcStride, int h, int x, int y);
typedef struct H264ChromaContext {
h264_chroma_mc_func put_h264_chroma_pixels_tab[4];
@@ -36,5 +36,6 @@ void ff_h264chroma_init_arm(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth);
void ff_h264chroma_init_mips(H264ChromaContext *c, int bit_depth);
+void ff_h264chroma_init_loongarch(H264ChromaContext *c, int bit_depth);
#endif /* AVCODEC_H264CHROMA_H */
diff --git a/media/ffvpx/libavcodec/h264dsp.h b/media/ffvpx/libavcodec/h264dsp.h
index cbea3173c6..e0880c4d88 100644
--- a/media/ffvpx/libavcodec/h264dsp.h
+++ b/media/ffvpx/libavcodec/h264dsp.h
@@ -89,16 +89,16 @@ typedef struct H264DSPContext {
void (*h264_idct_add16)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
- const uint8_t nnzc[15 * 8]);
+ const uint8_t nnzc[5 * 8]);
void (*h264_idct8_add4)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
- const uint8_t nnzc[15 * 8]);
+ const uint8_t nnzc[5 * 8]);
void (*h264_idct_add8)(uint8_t **dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/, int stride,
const uint8_t nnzc[15 * 8]);
void (*h264_idct_add16intra)(uint8_t *dst /*align 16*/, const int *blockoffset,
int16_t *block /*align 16*/,
- int stride, const uint8_t nnzc[15 * 8]);
+ int stride, const uint8_t nnzc[5 * 8]);
void (*h264_luma_dc_dequant_idct)(int16_t *output,
int16_t *input /*align 16*/, int qmul);
void (*h264_chroma_dc_dequant_idct)(int16_t *block, int qmul);
@@ -129,5 +129,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
void ff_h264dsp_init_mips(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc);
+void ff_h264dsp_init_loongarch(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc);
#endif /* AVCODEC_H264DSP_H */
diff --git a/media/ffvpx/libavcodec/h264pred.c b/media/ffvpx/libavcodec/h264pred.c
index f9f3af50e2..da76ade34b 100644
--- a/media/ffvpx/libavcodec/h264pred.c
+++ b/media/ffvpx/libavcodec/h264pred.c
@@ -25,11 +25,13 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
+#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
-#include "avcodec.h"
+#include "codec_id.h"
#include "h264pred.h"
+#include "mathops.h"
#define BIT_DEPTH 8
#include "h264pred_template.c"
@@ -51,6 +53,30 @@
#include "h264pred_template.c"
#undef BIT_DEPTH
+static void pred4x4_127_dc_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ int stride = _stride;
+ const uint32_t a = 0x7F7F7F7FU;
+
+ AV_WN32A(src + 0 * stride, a);
+ AV_WN32A(src + 1 * stride, a);
+ AV_WN32A(src + 2 * stride, a);
+ AV_WN32A(src + 3 * stride, a);
+}
+
+static void pred4x4_129_dc_c(uint8_t *src, const uint8_t *topright,
+ ptrdiff_t _stride)
+{
+ int stride = _stride;
+ const uint32_t a = 0x81818181U;
+
+ AV_WN32A(src + 0 * stride, a);
+ AV_WN32A(src + 1 * stride, a);
+ AV_WN32A(src + 2 * stride, a);
+ AV_WN32A(src + 3 * stride, a);
+}
+
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright,
ptrdiff_t stride)
{
@@ -419,56 +445,19 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
#define FUNCD(a) a ## _c
#define H264_PRED(depth) \
- if(codec_id != AV_CODEC_ID_RV40){\
- if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
- h->pred4x4[VERT_PRED ]= FUNCD(pred4x4_vertical_vp8);\
- h->pred4x4[HOR_PRED ]= FUNCD(pred4x4_horizontal_vp8);\
- } else {\
- h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
- h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
- }\
- h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
- if(codec_id == AV_CODEC_ID_SVQ3)\
- h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_svq3);\
- else\
- h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCC(pred4x4_down_left , depth);\
- h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
- h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
- h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
- if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
- h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_vp8);\
- } else\
- h->pred4x4[VERT_LEFT_PRED ]= FUNCC(pred4x4_vertical_left , depth);\
- h->pred4x4[HOR_UP_PRED ]= FUNCC(pred4x4_horizontal_up , depth);\
- if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
- h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
- h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
- } else {\
- h->pred4x4[TM_VP8_PRED ]= FUNCD(pred4x4_tm_vp8);\
- h->pred4x4[DC_127_PRED ]= FUNCC(pred4x4_127_dc , depth);\
- h->pred4x4[DC_129_PRED ]= FUNCC(pred4x4_129_dc , depth);\
- h->pred4x4[VERT_VP8_PRED ]= FUNCC(pred4x4_vertical , depth);\
- h->pred4x4[HOR_VP8_PRED ]= FUNCC(pred4x4_horizontal , depth);\
- }\
- if (codec_id != AV_CODEC_ID_VP8)\
- h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
- }else{\
- h->pred4x4[VERT_PRED ]= FUNCC(pred4x4_vertical , depth);\
- h->pred4x4[HOR_PRED ]= FUNCC(pred4x4_horizontal , depth);\
- h->pred4x4[DC_PRED ]= FUNCC(pred4x4_dc , depth);\
- h->pred4x4[DIAG_DOWN_LEFT_PRED ]= FUNCD(pred4x4_down_left_rv40);\
- h->pred4x4[DIAG_DOWN_RIGHT_PRED]= FUNCC(pred4x4_down_right , depth);\
- h->pred4x4[VERT_RIGHT_PRED ]= FUNCC(pred4x4_vertical_right , depth);\
- h->pred4x4[HOR_DOWN_PRED ]= FUNCC(pred4x4_horizontal_down , depth);\
- h->pred4x4[VERT_LEFT_PRED ]= FUNCD(pred4x4_vertical_left_rv40);\
- h->pred4x4[HOR_UP_PRED ]= FUNCD(pred4x4_horizontal_up_rv40);\
- h->pred4x4[LEFT_DC_PRED ]= FUNCC(pred4x4_left_dc , depth);\
- h->pred4x4[TOP_DC_PRED ]= FUNCC(pred4x4_top_dc , depth);\
- h->pred4x4[DC_128_PRED ]= FUNCC(pred4x4_128_dc , depth);\
- h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_down_left_rv40_nodown);\
- h->pred4x4[HOR_UP_PRED_RV40_NODOWN]= FUNCD(pred4x4_horizontal_up_rv40_nodown);\
- h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN]= FUNCD(pred4x4_vertical_left_rv40_nodown);\
- }\
+ h->pred4x4[VERT_PRED ] = FUNCC(pred4x4_vertical, depth);\
+ h->pred4x4[HOR_PRED ] = FUNCC(pred4x4_horizontal, depth);\
+ h->pred4x4[DC_PRED ] = FUNCC(pred4x4_dc, depth);\
+ h->pred4x4[DIAG_DOWN_LEFT_PRED ] = FUNCC(pred4x4_down_left, depth);\
+ h->pred4x4[DIAG_DOWN_RIGHT_PRED] = FUNCC(pred4x4_down_right, depth);\
+ h->pred4x4[VERT_RIGHT_PRED ] = FUNCC(pred4x4_vertical_right, depth);\
+ h->pred4x4[HOR_DOWN_PRED ] = FUNCC(pred4x4_horizontal_down, depth);\
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCC(pred4x4_vertical_left, depth);\
+ h->pred4x4[HOR_UP_PRED ] = FUNCC(pred4x4_horizontal_up, depth);\
+ h->pred4x4[LEFT_DC_PRED ] = FUNCC(pred4x4_left_dc, depth);\
+ h->pred4x4[TOP_DC_PRED ] = FUNCC(pred4x4_top_dc, depth);\
+ if (depth > 8 || codec_id != AV_CODEC_ID_VP8)\
+ h->pred4x4[DC_128_PRED ] = FUNCC(pred4x4_128_dc, depth);\
\
h->pred8x8l[VERT_PRED ]= FUNCC(pred8x8l_vertical , depth);\
h->pred8x8l[HOR_PRED ]= FUNCC(pred8x8l_horizontal , depth);\
@@ -486,20 +475,15 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
if (chroma_format_idc <= 1) {\
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x8_vertical , depth);\
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x8_horizontal , depth);\
+ h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x8_plane, depth);\
} else {\
h->pred8x8[VERT_PRED8x8 ]= FUNCC(pred8x16_vertical , depth);\
h->pred8x8[HOR_PRED8x8 ]= FUNCC(pred8x16_horizontal , depth);\
+ h->pred8x8[PLANE_PRED8x8] = FUNCC(pred8x16_plane, depth);\
}\
- if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) {\
- if (chroma_format_idc <= 1) {\
- h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x8_plane , depth);\
- } else {\
- h->pred8x8[PLANE_PRED8x8]= FUNCC(pred8x16_plane , depth);\
- }\
- } else\
- h->pred8x8[PLANE_PRED8x8]= FUNCD(pred8x8_tm_vp8);\
- if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && \
- codec_id != AV_CODEC_ID_VP8) {\
+ if (depth > 8 || (codec_id != AV_CODEC_ID_RV40 && \
+ codec_id != AV_CODEC_ID_VP7 && \
+ codec_id != AV_CODEC_ID_VP8)) { \
if (chroma_format_idc <= 1) {\
h->pred8x8[DC_PRED8x8 ]= FUNCC(pred8x8_dc , depth);\
h->pred8x8[LEFT_DC_PRED8x8]= FUNCC(pred8x8_left_dc , depth);\
@@ -521,10 +505,6 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred8x8[DC_PRED8x8 ]= FUNCD(pred8x8_dc_rv40);\
h->pred8x8[LEFT_DC_PRED8x8]= FUNCD(pred8x8_left_dc_rv40);\
h->pred8x8[TOP_DC_PRED8x8 ]= FUNCD(pred8x8_top_dc_rv40);\
- if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {\
- h->pred8x8[DC_127_PRED8x8]= FUNCC(pred8x8_127_dc , depth);\
- h->pred8x8[DC_129_PRED8x8]= FUNCC(pred8x8_129_dc , depth);\
- }\
}\
if (chroma_format_idc <= 1) {\
h->pred8x8[DC_128_PRED8x8 ]= FUNCC(pred8x8_128_dc , depth);\
@@ -535,23 +515,7 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred16x16[DC_PRED8x8 ]= FUNCC(pred16x16_dc , depth);\
h->pred16x16[VERT_PRED8x8 ]= FUNCC(pred16x16_vertical , depth);\
h->pred16x16[HOR_PRED8x8 ]= FUNCC(pred16x16_horizontal , depth);\
- switch(codec_id){\
- case AV_CODEC_ID_SVQ3:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_svq3);\
- break;\
- case AV_CODEC_ID_RV40:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_plane_rv40);\
- break;\
- case AV_CODEC_ID_VP7:\
- case AV_CODEC_ID_VP8:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCD(pred16x16_tm_vp8);\
- h->pred16x16[DC_127_PRED8x8]= FUNCC(pred16x16_127_dc , depth);\
- h->pred16x16[DC_129_PRED8x8]= FUNCC(pred16x16_129_dc , depth);\
- break;\
- default:\
- h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
- break;\
- }\
+ h->pred16x16[PLANE_PRED8x8 ]= FUNCC(pred16x16_plane , depth);\
h->pred16x16[LEFT_DC_PRED8x8]= FUNCC(pred16x16_left_dc , depth);\
h->pred16x16[TOP_DC_PRED8x8 ]= FUNCC(pred16x16_top_dc , depth);\
h->pred16x16[DC_128_PRED8x8 ]= FUNCC(pred16x16_128_dc , depth);\
@@ -564,8 +528,8 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
h->pred8x8l_filter_add [VERT_PRED ]= FUNCC(pred8x8l_vertical_filter_add , depth);\
h->pred8x8l_filter_add [ HOR_PRED ]= FUNCC(pred8x8l_horizontal_filter_add , depth);\
if (chroma_format_idc <= 1) {\
- h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x8_vertical_add , depth);\
- h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x8_horizontal_add , depth);\
+ h->pred8x8_add[VERT_PRED8x8] = FUNCC(pred8x8_vertical_add, depth);\
+ h->pred8x8_add[ HOR_PRED8x8] = FUNCC(pred8x8_horizontal_add, depth);\
} else {\
h->pred8x8_add [VERT_PRED8x8]= FUNCC(pred8x16_vertical_add , depth);\
h->pred8x8_add [ HOR_PRED8x8]= FUNCC(pred8x16_horizontal_add , depth);\
@@ -589,16 +553,50 @@ av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id,
default:
av_assert0(bit_depth<=8);
H264_PRED(8)
+ switch (codec_id) {
+ case AV_CODEC_ID_SVQ3:
+ h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_svq3);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_svq3);
+ break;
+ case AV_CODEC_ID_RV40:
+ h->pred4x4[DIAG_DOWN_LEFT_PRED] = FUNCD(pred4x4_down_left_rv40);
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_rv40);
+ h->pred4x4[HOR_UP_PRED ] = FUNCD(pred4x4_horizontal_up_rv40);
+ h->pred4x4[DIAG_DOWN_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_down_left_rv40_nodown);
+ h->pred4x4[HOR_UP_PRED_RV40_NODOWN] = FUNCD(pred4x4_horizontal_up_rv40_nodown);
+ h->pred4x4[VERT_LEFT_PRED_RV40_NODOWN] = FUNCD(pred4x4_vertical_left_rv40_nodown);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_plane_rv40);
+ break;
+ case AV_CODEC_ID_VP7:
+ case AV_CODEC_ID_VP8:
+ h->pred4x4[VERT_PRED ] = FUNCD(pred4x4_vertical_vp8);
+ h->pred4x4[HOR_PRED ] = FUNCD(pred4x4_horizontal_vp8);
+ h->pred4x4[VERT_LEFT_PRED ] = FUNCD(pred4x4_vertical_left_vp8);
+ h->pred4x4[TM_VP8_PRED ] = FUNCD(pred4x4_tm_vp8);
+ h->pred4x4[VERT_VP8_PRED ] = FUNCC(pred4x4_vertical, 8);
+ h->pred4x4[DC_127_PRED ] = FUNCD(pred4x4_127_dc);
+ h->pred4x4[DC_129_PRED ] = FUNCD(pred4x4_129_dc);
+ h->pred4x4[HOR_VP8_PRED ] = FUNCC(pred4x4_horizontal, 8);
+ h->pred8x8[PLANE_PRED8x8 ] = FUNCD(pred8x8_tm_vp8);
+ h->pred8x8[DC_127_PRED8x8 ] = FUNCC(pred8x8_127_dc, 8);
+ h->pred8x8[DC_129_PRED8x8 ] = FUNCC(pred8x8_129_dc, 8);
+ h->pred16x16[PLANE_PRED8x8 ] = FUNCD(pred16x16_tm_vp8);
+ h->pred16x16[DC_127_PRED8x8] = FUNCC(pred16x16_127_dc, 8);
+ h->pred16x16[DC_129_PRED8x8] = FUNCC(pred16x16_129_dc, 8);
+ break;
+ }
break;
}
- #if ARCH_AARCH64 == 1
- ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
- #elif ARCH_ARM == 1
- ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
- #elif ARCH_X86 == 1
- ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
- #elif ARCH_MIPS == 1
- ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_h264_pred_init_aarch64(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_ARM == 1
+ ff_h264_pred_init_arm(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_X86 == 1
+ ff_h264_pred_init_x86(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_MIPS == 1
+ ff_h264_pred_init_mips(h, codec_id, bit_depth, chroma_format_idc);
+#elif ARCH_LOONGARCH == 1
+ ff_h264_pred_init_loongarch(h, codec_id, bit_depth, chroma_format_idc);
+#endif
}
diff --git a/media/ffvpx/libavcodec/h264pred.h b/media/ffvpx/libavcodec/h264pred.h
index 2863dc9bd1..cb008548fc 100644
--- a/media/ffvpx/libavcodec/h264pred.h
+++ b/media/ffvpx/libavcodec/h264pred.h
@@ -86,6 +86,8 @@
#define DC_129_PRED8x8 8
//@}
+#define PART_NOT_AVAILABLE -2
+
/**
* Context for storing H.264 prediction functions
*/
@@ -122,5 +124,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_mips(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
+void ff_h264_pred_init_loongarch(H264PredContext *h, int codec_id,
+ const int bit_depth, const int chroma_format_idc);
#endif /* AVCODEC_H264PRED_H */
diff --git a/media/ffvpx/libavcodec/h264pred_template.c b/media/ffvpx/libavcodec/h264pred_template.c
index 2b30fff70f..b5bc942a5e 100644
--- a/media/ffvpx/libavcodec/h264pred_template.c
+++ b/media/ffvpx/libavcodec/h264pred_template.c
@@ -111,32 +111,6 @@ static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright,
AV_WN4PA(src+3*stride, a);
}
-static void FUNCC(pred4x4_127_dc)(uint8_t *_src, const uint8_t *topright,
- ptrdiff_t _stride)
-{
- pixel *src = (pixel*)_src;
- int stride = _stride>>(sizeof(pixel)-1);
- const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
-
- AV_WN4PA(src+0*stride, a);
- AV_WN4PA(src+1*stride, a);
- AV_WN4PA(src+2*stride, a);
- AV_WN4PA(src+3*stride, a);
-}
-
-static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright,
- ptrdiff_t _stride)
-{
- pixel *src = (pixel*)_src;
- int stride = _stride>>(sizeof(pixel)-1);
- const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
-
- AV_WN4PA(src+0*stride, a);
- AV_WN4PA(src+1*stride, a);
- AV_WN4PA(src+2*stride, a);
- AV_WN4PA(src+3*stride, a);
-}
-
#define LOAD_TOP_RIGHT_EDGE\
const unsigned av_unused t4 = topright[0];\
@@ -427,9 +401,11 @@ static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
}
-PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1)
PRED16x16_X(128, (1<<(BIT_DEPTH-1))+0)
+#if BIT_DEPTH == 8
+PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1)
PRED16x16_X(129, (1<<(BIT_DEPTH-1))+1)
+#endif
static inline void FUNCC(pred16x16_plane_compat)(uint8_t *_src,
ptrdiff_t _stride,
@@ -551,9 +527,11 @@ static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\
}\
}
-PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1)
PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0)
+#if BIT_DEPTH == 8
+PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1)
PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1)
+#endif
static void FUNCC(pred8x16_128_dc)(uint8_t *_src, ptrdiff_t stride)
{
diff --git a/media/ffvpx/libavcodec/hpeldsp.h b/media/ffvpx/libavcodec/hpeldsp.h
index 768139bfc9..45e81b10a5 100644
--- a/media/ffvpx/libavcodec/hpeldsp.h
+++ b/media/ffvpx/libavcodec/hpeldsp.h
@@ -102,5 +102,6 @@ void ff_hpeldsp_init_arm(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags);
void ff_hpeldsp_init_mips(HpelDSPContext *c, int flags);
+void ff_hpeldsp_init_loongarch(HpelDSPContext *c, int flags);
#endif /* AVCODEC_HPELDSP_H */
diff --git a/media/ffvpx/libavcodec/hwaccels.h b/media/ffvpx/libavcodec/hwaccels.h
index 7d73da8676..aca55831f3 100644
--- a/media/ffvpx/libavcodec/hwaccels.h
+++ b/media/ffvpx/libavcodec/hwaccels.h
@@ -21,6 +21,12 @@
#include "avcodec.h"
+extern const AVHWAccel ff_av1_d3d11va_hwaccel;
+extern const AVHWAccel ff_av1_d3d11va2_hwaccel;
+extern const AVHWAccel ff_av1_dxva2_hwaccel;
+extern const AVHWAccel ff_av1_nvdec_hwaccel;
+extern const AVHWAccel ff_av1_vaapi_hwaccel;
+extern const AVHWAccel ff_av1_vdpau_hwaccel;
extern const AVHWAccel ff_h263_vaapi_hwaccel;
extern const AVHWAccel ff_h263_videotoolbox_hwaccel;
extern const AVHWAccel ff_h264_d3d11va_hwaccel;
@@ -42,7 +48,6 @@ extern const AVHWAccel ff_mjpeg_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg1_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg1_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg1_videotoolbox_hwaccel;
-extern const AVHWAccel ff_mpeg1_xvmc_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel;
extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel;
extern const AVHWAccel ff_mpeg2_nvdec_hwaccel;
@@ -50,11 +55,11 @@ extern const AVHWAccel ff_mpeg2_dxva2_hwaccel;
extern const AVHWAccel ff_mpeg2_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg2_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel;
-extern const AVHWAccel ff_mpeg2_xvmc_hwaccel;
extern const AVHWAccel ff_mpeg4_nvdec_hwaccel;
extern const AVHWAccel ff_mpeg4_vaapi_hwaccel;
extern const AVHWAccel ff_mpeg4_vdpau_hwaccel;
extern const AVHWAccel ff_mpeg4_videotoolbox_hwaccel;
+extern const AVHWAccel ff_prores_videotoolbox_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va_hwaccel;
extern const AVHWAccel ff_vc1_d3d11va2_hwaccel;
extern const AVHWAccel ff_vc1_dxva2_hwaccel;
@@ -68,6 +73,8 @@ extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
extern const AVHWAccel ff_vp9_dxva2_hwaccel;
extern const AVHWAccel ff_vp9_nvdec_hwaccel;
extern const AVHWAccel ff_vp9_vaapi_hwaccel;
+extern const AVHWAccel ff_vp9_vdpau_hwaccel;
+extern const AVHWAccel ff_vp9_videotoolbox_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va_hwaccel;
extern const AVHWAccel ff_wmv3_d3d11va2_hwaccel;
extern const AVHWAccel ff_wmv3_dxva2_hwaccel;
diff --git a/media/ffvpx/libavcodec/hwaccel.h b/media/ffvpx/libavcodec/hwconfig.h
index 3aaa92571c..721424912c 100644
--- a/media/ffvpx/libavcodec/hwaccel.h
+++ b/media/ffvpx/libavcodec/hwconfig.h
@@ -16,8 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef AVCODEC_HWACCEL_H
-#define AVCODEC_HWACCEL_H
+#ifndef AVCODEC_HWCONFIG_H
+#define AVCODEC_HWCONFIG_H
#include "avcodec.h"
#include "hwaccels.h"
@@ -78,7 +78,23 @@ typedef struct AVCodecHWConfigInternal {
HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
#define HWACCEL_D3D11VA(codec) \
HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)
-#define HWACCEL_XVMC(codec) \
- HW_CONFIG_HWACCEL(0, 0, 1, XVMC, NONE, ff_ ## codec ## _xvmc_hwaccel)
-#endif /* AVCODEC_HWACCEL_H */
+#define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \
+ &(const AVCodecHWConfigInternal) { \
+ .public = { \
+ .pix_fmt = AV_PIX_FMT_ ## format, \
+ .methods = (device ? AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX : 0) | \
+ (frames ? AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX : 0) | \
+ (ad_hoc ? AV_CODEC_HW_CONFIG_METHOD_AD_HOC : 0), \
+ .device_type = AV_HWDEVICE_TYPE_ ## device_type_, \
+ }, \
+ .hwaccel = NULL, \
+ }
+
+#define HW_CONFIG_ENCODER_DEVICE(format, device_type_) \
+ HW_CONFIG_ENCODER(1, 0, 0, format, device_type_)
+
+#define HW_CONFIG_ENCODER_FRAMES(format, device_type_) \
+ HW_CONFIG_ENCODER(0, 1, 0, format, device_type_)
+
+#endif /* AVCODEC_HWCONFIG_H */
diff --git a/media/ffvpx/libavcodec/idctdsp.h b/media/ffvpx/libavcodec/idctdsp.h
index ca21a31a02..7224463349 100644
--- a/media/ffvpx/libavcodec/idctdsp.h
+++ b/media/ffvpx/libavcodec/idctdsp.h
@@ -25,15 +25,6 @@
#include "avcodec.h"
-/**
- * Scantable.
- */
-typedef struct ScanTable {
- const uint8_t *scantable;
- uint8_t permutated[64];
- uint8_t raster_end[64];
-} ScanTable;
-
enum idct_permutation_type {
FF_IDCT_PERM_NONE,
FF_IDCT_PERM_LIBMPEG2,
@@ -43,8 +34,8 @@ enum idct_permutation_type {
FF_IDCT_PERM_SSE2,
};
-void ff_init_scantable(uint8_t *permutation, ScanTable *st,
- const uint8_t *src_scantable);
+void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64],
+ const uint8_t permutation[64]);
void ff_init_scantable_permutation(uint8_t *idct_permutation,
enum idct_permutation_type perm_type);
int ff_init_scantable_permutation_x86(uint8_t *idct_permutation,
@@ -114,9 +105,13 @@ void ff_idctdsp_init_arm(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
+void ff_idctdsp_init_riscv(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
void ff_idctdsp_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_idctdsp_init_mips(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
+void ff_idctdsp_init_loongarch(IDCTDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
#endif /* AVCODEC_IDCTDSP_H */
diff --git a/media/ffvpx/libavcodec/imgconvert.c b/media/ffvpx/libavcodec/imgconvert.c
index 1fd636c83d..96511ac7d6 100644
--- a/media/ffvpx/libavcodec/imgconvert.c
+++ b/media/ffvpx/libavcodec/imgconvert.c
@@ -25,43 +25,8 @@
*/
#include "avcodec.h"
-#include "internal.h"
-#include "mathops.h"
-#include "libavutil/avassert.h"
-#include "libavutil/colorspace.h"
-#include "libavutil/common.h"
#include "libavutil/pixdesc.h"
-#include "libavutil/internal.h"
-#include "libavutil/imgutils.h"
-
-#if FF_API_GETCHROMA
-void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
-{
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
- av_assert0(desc);
- *h_shift = desc->log2_chroma_w;
- *v_shift = desc->log2_chroma_h;
-}
-#endif
-
-int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
- enum AVPixelFormat src_pix_fmt,
- int has_alpha)
-{
- return av_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
-}
-
-enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
- enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
-{
- return av_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, has_alpha, loss_ptr);
-}
-
-enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
- enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
-{
- return avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, has_alpha, loss_ptr);
-}
+#include "libavutil/pixfmt.h"
enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
enum AVPixelFormat src_pix_fmt,
@@ -73,7 +38,7 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *p
for (i=0; pix_fmt_list[i] != AV_PIX_FMT_NONE; i++) {
loss = loss_ptr ? *loss_ptr : 0;
- best = avcodec_find_best_pix_fmt_of_2(best, pix_fmt_list[i], src_pix_fmt, has_alpha, &loss);
+ best = av_find_best_pix_fmt_of_2(best, pix_fmt_list[i], src_pix_fmt, has_alpha, &loss);
}
if (loss_ptr)
@@ -81,152 +46,3 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *p
return best;
}
-#if FF_API_AVPICTURE
-FF_DISABLE_DEPRECATION_WARNINGS
-/* return true if yuv planar */
-static inline int is_yuv_planar(const AVPixFmtDescriptor *desc)
-{
- int i;
- int planes[4] = { 0 };
-
- if ( desc->flags & AV_PIX_FMT_FLAG_RGB
- || !(desc->flags & AV_PIX_FMT_FLAG_PLANAR))
- return 0;
-
- /* set the used planes */
- for (i = 0; i < desc->nb_components; i++)
- planes[desc->comp[i].plane] = 1;
-
- /* if there is an unused plane, the format is not planar */
- for (i = 0; i < desc->nb_components; i++)
- if (!planes[i])
- return 0;
- return 1;
-}
-
-int av_picture_crop(AVPicture *dst, const AVPicture *src,
- enum AVPixelFormat pix_fmt, int top_band, int left_band)
-{
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
- int y_shift;
- int x_shift;
- int max_step[4];
-
- if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
- return -1;
-
- y_shift = desc->log2_chroma_h;
- x_shift = desc->log2_chroma_w;
- av_image_fill_max_pixsteps(max_step, NULL, desc);
-
- if (is_yuv_planar(desc)) {
- dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
- dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
- dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
- } else{
- if(top_band % (1<<y_shift) || left_band % (1<<x_shift))
- return -1;
- dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + (left_band * max_step[0]);
- }
-
- dst->linesize[0] = src->linesize[0];
- dst->linesize[1] = src->linesize[1];
- dst->linesize[2] = src->linesize[2];
- return 0;
-}
-
-int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
- enum AVPixelFormat pix_fmt, int padtop, int padbottom, int padleft, int padright,
- int *color)
-{
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
- uint8_t *optr;
- int y_shift;
- int x_shift;
- int yheight;
- int i, y;
- int max_step[4];
-
- if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
- return -1;
-
- if (!is_yuv_planar(desc)) {
- if (src)
- return -1; //TODO: Not yet implemented
-
- av_image_fill_max_pixsteps(max_step, NULL, desc);
-
- if (padtop || padleft) {
- memset(dst->data[0], color[0],
- dst->linesize[0] * padtop + (padleft * max_step[0]));
- }
-
- if (padleft || padright) {
- optr = dst->data[0] + dst->linesize[0] * padtop +
- (dst->linesize[0] - (padright * max_step[0]));
- yheight = height - 1 - (padtop + padbottom);
- for (y = 0; y < yheight; y++) {
- memset(optr, color[0], (padleft + padright) * max_step[0]);
- optr += dst->linesize[0];
- }
- }
-
- if (padbottom || padright) {
- optr = dst->data[0] + dst->linesize[0] * (height - padbottom) -
- (padright * max_step[0]);
- memset(optr, color[0], dst->linesize[0] * padbottom +
- (padright * max_step[0]));
- }
-
- return 0;
- }
-
- for (i = 0; i < 3; i++) {
- x_shift = i ? desc->log2_chroma_w : 0;
- y_shift = i ? desc->log2_chroma_h : 0;
-
- if (padtop || padleft) {
- memset(dst->data[i], color[i],
- dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
- }
-
- if (padleft || padright) {
- optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
- (dst->linesize[i] - (padright >> x_shift));
- yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
- for (y = 0; y < yheight; y++) {
- memset(optr, color[i], (padleft + padright) >> x_shift);
- optr += dst->linesize[i];
- }
- }
-
- if (src) { /* first line */
- uint8_t *iptr = src->data[i];
- optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
- (padleft >> x_shift);
- memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
- iptr += src->linesize[i];
- optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
- (dst->linesize[i] - (padright >> x_shift));
- yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
- for (y = 0; y < yheight; y++) {
- memset(optr, color[i], (padleft + padright) >> x_shift);
- memcpy(optr + ((padleft + padright) >> x_shift), iptr,
- (width - padleft - padright) >> x_shift);
- iptr += src->linesize[i];
- optr += dst->linesize[i];
- }
- }
-
- if (padbottom || padright) {
- optr = dst->data[i] + dst->linesize[i] *
- ((height - padbottom) >> y_shift) - (padright >> x_shift);
- memset(optr, color[i],dst->linesize[i] *
- (padbottom >> y_shift) + (padright >> x_shift));
- }
- }
-
- return 0;
-}
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif /* FF_API_AVPICTURE */
diff --git a/media/ffvpx/libavcodec/internal.h b/media/ffvpx/libavcodec/internal.h
index 5096ffa1d9..a283c52e01 100644
--- a/media/ffvpx/libavcodec/internal.h
+++ b/media/ffvpx/libavcodec/internal.h
@@ -33,59 +33,11 @@
#include "avcodec.h"
#include "config.h"
-/**
- * The codec does not modify any global variables in the init function,
- * allowing to call the init function without locking any global mutexes.
- */
-#define FF_CODEC_CAP_INIT_THREADSAFE (1 << 0)
-/**
- * The codec allows calling the close function for deallocation even if
- * the init function returned a failure. Without this capability flag, a
- * codec does such cleanup internally when returning failures from the
- * init function and does not expect the close function to be called at
- * all.
- */
-#define FF_CODEC_CAP_INIT_CLEANUP (1 << 1)
-/**
- * Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set
- * AVFrame.pkt_dts manually. If the flag is set, decode.c won't overwrite
- * this field. If it's unset, decode.c tries to guess the pkt_dts field
- * from the input AVPacket.
- */
-#define FF_CODEC_CAP_SETS_PKT_DTS (1 << 2)
-/**
- * The decoder extracts and fills its parameters even if the frame is
- * skipped due to the skip_frame setting.
- */
-#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM (1 << 3)
-/**
- * The decoder sets the cropping fields in the output frames manually.
- * If this cap is set, the generic code will initialize output frame
- * dimensions to coded rather than display values.
- */
-#define FF_CODEC_CAP_EXPORTS_CROPPING (1 << 4)
-/**
- * Codec initializes slice-based threading with a main function
- */
-#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF (1 << 5)
-
-#ifdef TRACE
-# define ff_tlog(ctx, ...) av_log(ctx, AV_LOG_TRACE, __VA_ARGS__)
-#else
-# define ff_tlog(ctx, ...) do { } while(0)
+#if CONFIG_LCMS2
+# include "fflcms2.h"
#endif
-
-#define FF_DEFAULT_QUANT_BIAS 999999
-
-#define FF_QSCALE_TYPE_MPEG1 0
-#define FF_QSCALE_TYPE_MPEG2 1
-#define FF_QSCALE_TYPE_H264 2
-#define FF_QSCALE_TYPE_VP56 3
-
-#define FF_SANE_NB_CHANNELS 256U
-
-#define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
+#define FF_SANE_NB_CHANNELS 512U
#if HAVE_SIMD_ALIGN_64
# define STRIDE_ALIGN 64 /* AVX-512 */
@@ -97,73 +49,39 @@
# define STRIDE_ALIGN 8
#endif
-typedef struct FramePool {
- /**
- * Pools for each data plane. For audio all the planes have the same size,
- * so only pools[0] is used.
- */
- AVBufferPool *pools[4];
-
- /*
- * Pool parameters
- */
- int format;
- int width, height;
- int stride_align[AV_NUM_DATA_POINTERS];
- int linesize[4];
- int planes;
- int channels;
- int samples;
-} FramePool;
-
-typedef struct DecodeSimpleContext {
- AVPacket *in_pkt;
- AVFrame *out_frame;
-} DecodeSimpleContext;
-
-typedef struct DecodeFilterContext {
- AVBSFContext **bsfs;
- int nb_bsfs;
-} DecodeFilterContext;
-
typedef struct AVCodecInternal {
/**
- * Whether the parent AVCodecContext is a copy of the context which had
- * init() called on it.
- * This is used by multithreading - shared tables and picture pointers
- * should be freed from the original context only.
+ * When using frame-threaded decoding, this field is set for the first
+ * worker thread (e.g. to decode extradata just once).
*/
int is_copy;
/**
- * Whether to allocate progress for frame threading.
- *
- * The codec must set it to 1 if it uses ff_thread_await/report_progress(),
- * then progress will be allocated in ff_thread_get_buffer(). The frames
- * then MUST be freed with ff_thread_release_buffer().
- *
- * If the codec does not need to call the progress functions (there are no
- * dependencies between the frames), it should leave this at 0. Then it can
- * decode straight to the user-provided frames (which the user will then
- * free with av_frame_unref()), there is no need to call
- * ff_thread_release_buffer().
+ * An audio frame with less than required samples has been submitted (and
+ * potentially padded with silence). Reject all subsequent frames.
*/
- int allocate_progress;
+ int last_audio_frame;
/**
- * An audio frame with less than required samples has been submitted and
- * padded with silence. Reject all subsequent frames.
+ * Audio encoders can set this flag during init to indicate that they
+ * want the small last frame to be padded to a multiple of pad_samples.
*/
- int last_audio_frame;
-
- AVFrame *to_free;
+ int pad_samples;
- FramePool *pool;
+ AVBufferRef *pool;
void *thread_ctx;
- DecodeSimpleContext ds;
- DecodeFilterContext filter;
+ /**
+ * This packet is used to hold the packet given to decoders
+ * implementing the .decode API; it is unused by the generic
+ * code for decoders implementing the .receive_frame API and
+ * may be freely used (but not freed) by them with the caveat
+ * that the packet will be unreferenced generically in
+ * avcodec_flush_buffers().
+ */
+ AVPacket *in_pkt;
+ struct AVBSFContext *bsf;
/**
* Properties (timestamps+side data) extracted from the last packet passed
@@ -177,9 +95,38 @@ typedef struct AVCodecInternal {
uint8_t *byte_buffer;
unsigned int byte_buffer_size;
+ /**
+ * This is set to AV_PKT_FLAG_KEY for encoders that encode intra-only
+ * formats (i.e. whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set).
+ * This is used to set said flag generically for said encoders.
+ */
+ int intra_only_flag;
+
void *frame_thread_encoder;
/**
+ * The input frame is stored here for encoders implementing the simple
+ * encode API.
+ *
+ * Not allocated in other cases.
+ */
+ AVFrame *in_frame;
+
+ /**
+ * When the AV_CODEC_FLAG_RECON_FRAME flag is used. the encoder should store
+ * here the reconstructed frame corresponding to the last returned packet.
+ *
+ * Not allocated in other cases.
+ */
+ AVFrame *recon_frame;
+
+ /**
+ * If this is set, then FFCodec->close (if existing) needs to be called
+ * for the parent AVCodecContext.
+ */
+ int needs_close;
+
+ /**
* Number of audio samples to skip at the start of the next decoded frame
*/
int skip_samples;
@@ -195,27 +142,14 @@ typedef struct AVCodecInternal {
int draining;
/**
- * buffers for using new encode/decode API through legacy API
+ * Temporary buffers for newly received or not yet output packets/frames.
*/
AVPacket *buffer_pkt;
- int buffer_pkt_valid; // encoding: packet without data can be valid
AVFrame *buffer_frame;
int draining_done;
- /* set to 1 when the caller is using the old decoding API */
- int compat_decode;
- int compat_decode_warned;
- /* this variable is set by the decoder internals to signal to the old
- * API compat wrappers the amount of data consumed from the last packet */
- size_t compat_decode_consumed;
- /* when a partial packet has been consumed, this stores the remaining size
- * of the packet (that should be submitted in the next decode call */
- size_t compat_decode_partial_size;
- AVFrame *compat_decode_frame;
int showed_multi_packet_warning;
- int skip_samples_multiplier;
-
/* to prevent infinite loop on errors when draining */
int nb_draining_errors;
@@ -224,16 +158,12 @@ typedef struct AVCodecInternal {
int initial_format;
int initial_width, initial_height;
int initial_sample_rate;
- int initial_channels;
- uint64_t initial_channel_layout;
-} AVCodecInternal;
-
-struct AVCodecDefault {
- const uint8_t *key;
- const uint8_t *value;
-};
+ AVChannelLayout initial_ch_layout;
-extern const uint8_t ff_log2_run[41];
+#if CONFIG_LCMS2
+ FFIccContext icc; /* used to read and write embedded ICC profiles */
+#endif
+} AVCodecInternal;
/**
* Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
@@ -241,7 +171,7 @@ extern const uint8_t ff_log2_run[41];
*/
int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b);
-unsigned int avpriv_toupper4(unsigned int x);
+unsigned int ff_toupper4(unsigned int x);
void ff_color_frame(AVFrame *frame, const int color[4]);
@@ -253,48 +183,6 @@ void ff_color_frame(AVFrame *frame, const int color[4]);
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
/**
- * Check AVPacket size and/or allocate data.
- *
- * Encoders supporting AVCodec.encode2() can use this as a convenience to
- * ensure the output packet data is large enough, whether provided by the user
- * or allocated in this function.
- *
- * @param avctx the AVCodecContext of the encoder
- * @param avpkt the AVPacket
- * If avpkt->data is already set, avpkt->size is checked
- * to ensure it is large enough.
- * If avpkt->data is NULL, a new buffer is allocated.
- * avpkt->size is set to the specified size.
- * All other AVPacket fields will be reset with av_init_packet().
- * @param size the minimum required packet size
- * @param min_size This is a hint to the allocation algorithm, which indicates
- * to what minimal size the caller might later shrink the packet
- * to. Encoders often allocate packets which are larger than the
- * amount of data that is written into them as the exact amount is
- * not known at the time of allocation. min_size represents the
- * size a packet might be shrunk to by the caller. Can be set to
- * 0. setting this roughly correctly allows the allocation code
- * to choose between several allocation strategies to improve
- * speed slightly.
- * @return non negative on success, negative error code on failure
- */
-int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size);
-
-attribute_deprecated int ff_alloc_packet(AVPacket *avpkt, int size);
-
-/**
- * Rescale from sample rate to AVCodecContext.time_base.
- */
-static av_always_inline int64_t ff_samples_to_time_base(AVCodecContext *avctx,
- int64_t samples)
-{
- if(samples == AV_NOPTS_VALUE)
- return AV_NOPTS_VALUE;
- return av_rescale_q(samples, (AVRational){ 1, avctx->sample_rate },
- avctx->time_base);
-}
-
-/**
* 2^(x) for integer x
* @return correctly rounded float
*/
@@ -313,97 +201,29 @@ static av_always_inline float ff_exp2fi(int x) {
return 0;
}
-/**
- * Get a buffer for a frame. This is a wrapper around
- * AVCodecContext.get_buffer() and should be used instead calling get_buffer()
- * directly.
- */
-int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags);
-
-/**
- * Identical in function to av_frame_make_writable(), except it uses
- * ff_get_buffer() to allocate the buffer when needed.
- */
-int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame);
-
-int ff_thread_can_start_frame(AVCodecContext *avctx);
-
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx);
-/**
- * Call avcodec_open2 recursively by decrementing counter, unlocking mutex,
- * calling the function and then restoring again. Assumes the mutex is
- * already locked
- */
-int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
-
-/**
- * Finalize buf into extradata and set its size appropriately.
- */
-int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf);
-
-const uint8_t *avpriv_find_start_code(const uint8_t *p,
- const uint8_t *end,
- uint32_t *state);
-
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec);
/**
- * Check that the provided frame dimensions are valid and set them on the codec
- * context.
- */
-int ff_set_dimensions(AVCodecContext *s, int width, int height);
-
-/**
- * Check that the provided sample aspect ratio is valid and set it on the codec
- * context.
- */
-int ff_set_sar(AVCodecContext *avctx, AVRational sar);
-
-/**
- * Add or update AV_FRAME_DATA_MATRIXENCODING side data.
- */
-int ff_side_data_update_matrix_encoding(AVFrame *frame,
- enum AVMatrixEncoding matrix_encoding);
-
-/**
- * Select the (possibly hardware accelerated) pixel format.
- * This is a wrapper around AVCodecContext.get_format() and should be used
- * instead of calling get_format() directly.
- *
- * The list of pixel formats must contain at least one valid entry, and is
- * terminated with AV_PIX_FMT_NONE. If it is possible to decode to software,
- * the last entry in the list must be the most accurate software format.
- * If it is not possible to decode to software, AVCodecContext.sw_pix_fmt
- * must be set before calling this function.
- */
-int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
-
-/**
- * Set various frame properties from the codec context / packet data.
- */
-int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame);
-
-/**
* Add a CPB properties side data to an encoding context.
*/
AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx);
-int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
-
/**
- * Check AVFrame for A53 side data and allocate and fill SEI message with A53 info
+ * Check AVFrame for S12M timecode side data and allocate and fill TC SEI message with timecode info
*
- * @param frame Raw frame to get A53 side data from
+ * @param frame Raw frame to get S12M timecode side data from
+ * @param rate The frame rate
* @param prefix_len Number of bytes to allocate before SEI message
* @param data Pointer to a variable to store allocated memory
- * Upon return the variable will hold NULL on error or if frame has no A53 info.
+ * Upon return the variable will hold NULL on error or if frame has no S12M timecode info.
* Otherwise it will point to prefix_len uninitialized bytes followed by
* *sei_size SEI message
* @param sei_size Pointer to a variable to store generated SEI message length
* @return Zero on success, negative error code on failure
*/
-int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
+int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len,
void **data, size_t *sei_size);
/**
@@ -424,10 +244,4 @@ int64_t ff_guess_coded_bitrate(AVCodecContext *avctx);
int ff_int_from_list_or_default(void *ctx, const char * val_name, int val,
const int * array_valid_values, int default_value);
-#if defined(_WIN32) && CONFIG_SHARED && !defined(BUILDING_avcodec)
-# define av_export_avcodec __declspec(dllimport)
-#else
-# define av_export_avcodec
-#endif
-
#endif /* AVCODEC_INTERNAL_H */
diff --git a/media/ffvpx/libavcodec/mathops.h b/media/ffvpx/libavcodec/mathops.h
index 1c35664318..a1dc323304 100644
--- a/media/ffvpx/libavcodec/mathops.h
+++ b/media/ffvpx/libavcodec/mathops.h
@@ -24,15 +24,16 @@
#include <stdint.h>
+#include "libavutil/attributes_internal.h"
#include "libavutil/common.h"
-#include "libavutil/reverse.h"
#include "config.h"
#define MAX_NEG_CROP 1024
extern const uint32_t ff_inverse[257];
+extern const uint8_t ff_log2_run[41];
extern const uint8_t ff_sqrt_tab[256];
-extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
+extern const uint8_t attribute_visibility_hidden ff_crop_tab[256 + 2 * MAX_NEG_CROP];
extern const uint8_t ff_zigzag_direct[64];
extern const uint8_t ff_zigzag_scan[16+1];
@@ -126,6 +127,8 @@ static inline av_const int median4(int a, int b, int c, int d)
}
#endif
+#define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
+
#ifndef sign_extend
static inline av_const int sign_extend(int val, unsigned bits)
{
@@ -135,6 +138,15 @@ static inline av_const int sign_extend(int val, unsigned bits)
}
#endif
+#ifndef sign_extend64
+static inline av_const int64_t sign_extend64(int64_t val, unsigned bits)
+{
+ unsigned shift = 8 * sizeof(int64_t) - bits;
+ union { uint64_t u; int64_t s; } v = { (uint64_t) val << shift };
+ return v.s >> shift;
+}
+#endif
+
#ifndef zero_extend
static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
{
@@ -240,12 +252,4 @@ static inline int8_t ff_u8_to_s8(uint8_t a)
return b.s8;
}
-static av_always_inline uint32_t bitswap_32(uint32_t x)
-{
- return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
- (uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
- (uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
- (uint32_t)ff_reverse[ x >> 24];
-}
-
#endif /* AVCODEC_MATHOPS_H */
diff --git a/media/ffvpx/libavcodec/mathtables.c b/media/ffvpx/libavcodec/mathtables.c
index 81eabc7a65..8b0031eb00 100644
--- a/media/ffvpx/libavcodec/mathtables.c
+++ b/media/ffvpx/libavcodec/mathtables.c
@@ -112,3 +112,12 @@ const uint8_t ff_zigzag_scan[16+1] = {
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4,
3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
};
+
+const uint8_t ff_log2_run[41] = {
+ 0, 0, 0, 0, 1, 1, 1, 1,
+ 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+16, 17, 18, 19, 20, 21, 22, 23,
+24,
+};
diff --git a/media/ffvpx/libavcodec/me_cmp.h b/media/ffvpx/libavcodec/me_cmp.h
index 0a589e3c3d..90ea76c891 100644
--- a/media/ffvpx/libavcodec/me_cmp.h
+++ b/media/ffvpx/libavcodec/me_cmp.h
@@ -21,9 +21,11 @@
#include <stdint.h>
+#include "libavutil/attributes_internal.h"
+
#include "avcodec.h"
-extern const uint32_t ff_square_tab[512];
+extern const uint32_t attribute_visibility_hidden ff_square_tab[512];
/* minimum alignment rules ;)
@@ -46,12 +48,12 @@ struct MpegEncContext;
* Although currently h < 4 is not used as functions with
* width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c,
- uint8_t *blk1 /* align width (8 or 16) */,
- uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
+ const uint8_t *blk1 /* align width (8 or 16) */,
+ const uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
int h);
typedef struct MECmpContext {
- int (*sum_abs_dctelem)(int16_t *block /* align 16 */);
+ int (*sum_abs_dctelem)(const int16_t *block /* align 16 */);
me_cmp_func sad[6]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[6];
@@ -79,9 +81,8 @@ typedef struct MECmpContext {
me_cmp_func median_sad[6];
} MECmpContext;
-int ff_check_alignment(void);
-
void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx);
+void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_alpha(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx);
void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx);
diff --git a/media/ffvpx/libavcodec/motion_est.h b/media/ffvpx/libavcodec/motion_est.h
index 3b3a8d7341..f6a563b08c 100644
--- a/media/ffvpx/libavcodec/motion_est.h
+++ b/media/ffvpx/libavcodec/motion_est.h
@@ -51,10 +51,7 @@ typedef struct MotionEstContext {
int direct_basis_mv[4][2];
uint8_t *scratchpad; /**< data area for the ME algo, so that
* the ME does not need to malloc/free. */
- uint8_t *best_mb;
- uint8_t *temp_mb[2];
uint8_t *temp;
- int best_bits;
uint32_t *map; ///< map to avoid duplicate evaluations
uint32_t *score_map; ///< map to store the scores
unsigned map_generation;
@@ -77,8 +74,8 @@ typedef struct MotionEstContext {
int ymax;
int pred_x;
int pred_y;
- uint8_t *src[4][4];
- uint8_t *ref[4][4];
+ const uint8_t *src[4][4];
+ const uint8_t *ref[4][4];
int stride;
int uvstride;
/* temp variables for picture complexity calculation */
@@ -90,8 +87,8 @@ typedef struct MotionEstContext {
op_pixels_func(*hpel_avg)[4];
qpel_mc_func(*qpel_put)[16];
qpel_mc_func(*qpel_avg)[16];
- uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
- uint8_t *current_mv_penalty;
+ const uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
+ const uint8_t *current_mv_penalty;
int (*sub_motion_search)(struct MpegEncContext *s,
int *mx_ptr, int *my_ptr, int dmin,
int src_index, int ref_index,
@@ -118,16 +115,16 @@ int ff_pre_estimate_p_frame_motion(struct MpegEncContext *s,
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index,
- int16_t (*last_mv)[2], int ref_mv_scale, int size,
- int h);
+ const int16_t (*last_mv)[2], int ref_mv_scale,
+ int size, int h);
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate);
int ff_get_best_fcode(struct MpegEncContext *s,
- int16_t (*mv_table)[2], int type);
+ const int16_t (*mv_table)[2], int type);
-void ff_fix_long_p_mvs(struct MpegEncContext *s);
+void ff_fix_long_p_mvs(struct MpegEncContext *s, int type);
void ff_fix_long_mvs(struct MpegEncContext *s, uint8_t *field_select_table,
int field_select, int16_t (*mv_table)[2], int f_code,
int type, int truncate);
diff --git a/media/ffvpx/libavcodec/moz.build b/media/ffvpx/libavcodec/moz.build
index 861f19b1ae..2e1c2c9bfe 100644
--- a/media/ffvpx/libavcodec/moz.build
+++ b/media/ffvpx/libavcodec/moz.build
@@ -11,6 +11,7 @@ if CONFIG['FFVPX_ASFLAGS']:
SharedLibrary('mozavcodec')
SOURCES += [
'allcodecs.c',
+ 'avcodec.c',
'avpacket.c',
'avpicture.c',
'bitstream.c',
@@ -18,13 +19,16 @@ SOURCES += [
'bitstream_filters.c',
'bsf.c',
'codec_desc.c',
+ 'codec_par.c',
'decode.c',
'dummy_funcs.c',
+ 'encode.c',
'flac.c',
'flac_parser.c',
'flacdata.c',
'flacdec.c',
'flacdsp.c',
+ 'get_buffer.c',
'golomb.c',
'h264pred.c',
'imgconvert.c',
@@ -42,9 +46,10 @@ SOURCES += [
'raw.c',
'reverse.c',
'utils.c',
+ 'version.c',
'videodsp.c',
+ 'vlc.c',
'vorbis_parser.c',
- 'vp56rac.c',
'vp8.c',
'vp8_parser.c',
'vp8dsp.c',
@@ -61,6 +66,7 @@ SOURCES += [
'vp9mvs.c',
'vp9prob.c',
'vp9recon.c',
+ 'vpx_rac.c',
'xiph.c'
]
diff --git a/media/ffvpx/libavcodec/mpeg12data.h b/media/ffvpx/libavcodec/mpeg12data.h
index f51faf4607..bc39655fbf 100644
--- a/media/ffvpx/libavcodec/mpeg12data.h
+++ b/media/ffvpx/libavcodec/mpeg12data.h
@@ -30,7 +30,6 @@
#include <stdint.h>
#include "libavutil/rational.h"
-#include "rl.h"
extern const uint16_t ff_mpeg1_default_intra_matrix[];
extern const uint16_t ff_mpeg1_default_non_intra_matrix[64];
@@ -40,9 +39,6 @@ extern const unsigned char ff_mpeg12_vlc_dc_lum_bits[12];
extern const uint16_t ff_mpeg12_vlc_dc_chroma_code[12];
extern const unsigned char ff_mpeg12_vlc_dc_chroma_bits[12];
-extern RLTable ff_rl_mpeg1;
-extern RLTable ff_rl_mpeg2;
-
extern const uint8_t ff_mpeg12_mbAddrIncrTable[36][2];
extern const uint8_t ff_mpeg12_mbPatTable[64][2];
diff --git a/media/ffvpx/libavcodec/mpegpicture.h b/media/ffvpx/libavcodec/mpegpicture.h
index 2db3d6733a..7919aa402c 100644
--- a/media/ffvpx/libavcodec/mpegpicture.h
+++ b/media/ffvpx/libavcodec/mpegpicture.h
@@ -27,8 +27,9 @@
#include "avcodec.h"
#include "motion_est.h"
-#include "thread.h"
+#include "threadframe.h"
+#define MPEGVIDEO_MAX_PLANES 4
#define MAX_PICTURE_COUNT 36
#define EDGE_WIDTH 16
@@ -61,33 +62,23 @@ typedef struct Picture {
AVBufferRef *ref_index_buf[2];
int8_t *ref_index[2];
- AVBufferRef *mb_var_buf;
- uint16_t *mb_var; ///< Table for MB variances
-
- AVBufferRef *mc_mb_var_buf;
- uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
-
int alloc_mb_width; ///< mb_width used to allocate tables
int alloc_mb_height; ///< mb_height used to allocate tables
-
- AVBufferRef *mb_mean_buf;
- uint8_t *mb_mean; ///< Table for MB luminance
+ int alloc_mb_stride; ///< mb_stride used to allocate tables
AVBufferRef *hwaccel_priv_buf;
void *hwaccel_picture_private; ///< Hardware accelerator private data
int field_picture; ///< whether or not the picture was encoded in separate fields
- int64_t mb_var_sum; ///< sum of MB variance for current frame
- int64_t mc_mb_var_sum; ///< motion compensated MB variance for current frame
-
int b_frame_score;
int needs_realloc; ///< Picture needs to be reallocated (eg due to a frame size change)
int reference;
int shared;
- uint64_t encoding_error[AV_NUM_DATA_POINTERS];
+ int display_picture_number;
+ int coded_picture_number;
} Picture;
/**
@@ -106,8 +97,8 @@ int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src);
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *picture);
-void ff_free_picture_tables(Picture *pic);
-int ff_update_picture_tables(Picture *dst, Picture *src);
+void ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic);
+int ff_update_picture_tables(Picture *dst, const Picture *src);
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared);
diff --git a/media/ffvpx/libavcodec/mpegutils.h b/media/ffvpx/libavcodec/mpegutils.h
index 1ed21c19be..386110bb8c 100644
--- a/media/ffvpx/libavcodec/mpegutils.h
+++ b/media/ffvpx/libavcodec/mpegutils.h
@@ -26,7 +26,6 @@
#include "libavutil/frame.h"
#include "avcodec.h"
-#include "version.h"
/**
* Return value for header parsers if frame is not coded.
@@ -38,12 +37,6 @@
#define PICT_BOTTOM_FIELD 2
#define PICT_FRAME 3
-/**
- * Value of Picture.reference when Picture is not a reference picture, but
- * is held for delayed output.
- */
-#define DELAYED_PIC_REF 4
-
#define MAX_MB_BYTES (30 * 16 * 16 * 3 / 8 + 120)
#define MAX_FCODE 7
@@ -125,6 +118,7 @@ enum OutputFormat {
FMT_H261,
FMT_H263,
FMT_MJPEG,
+ FMT_SPEEDHQ,
};
@@ -133,16 +127,16 @@ enum OutputFormat {
*
* @param h is the normal height, this will be reduced automatically if needed
*/
-void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last,
+void ff_draw_horiz_band(AVCodecContext *avctx, const AVFrame *cur, const AVFrame *last,
int y, int h, int picture_structure, int first_field,
int low_delay);
/**
* Print debugging info for the given picture.
*/
-void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
- uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
- int *low_delay,
- int mb_width, int mb_height, int mb_stride, int quarter_sample);
+void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict,
+ const uint8_t *mbskip_table, const uint32_t *mbtype_table,
+ const int8_t *qscale_table, int16_t (*const motion_val[2])[2],
+ int mb_width, int mb_height, int mb_stride, int quarter_sample);
#endif /* AVCODEC_MPEGUTILS_H */
diff --git a/media/ffvpx/libavcodec/mpegvideo.h b/media/ffvpx/libavcodec/mpegvideo.h
index e1ff5f97dc..55828e6102 100644
--- a/media/ffvpx/libavcodec/mpegvideo.h
+++ b/media/ffvpx/libavcodec/mpegvideo.h
@@ -28,8 +28,6 @@
#ifndef AVCODEC_MPEGVIDEO_H
#define AVCODEC_MPEGVIDEO_H
-#include <float.h>
-
#include "avcodec.h"
#include "blockdsp.h"
#include "error_resilience.h"
@@ -39,41 +37,29 @@
#include "h263dsp.h"
#include "hpeldsp.h"
#include "idctdsp.h"
-#include "internal.h"
#include "me_cmp.h"
#include "motion_est.h"
#include "mpegpicture.h"
-#include "mpegvideodsp.h"
#include "mpegvideoencdsp.h"
-#include "mpegvideodata.h"
#include "pixblockdsp.h"
#include "put_bits.h"
#include "ratecontrol.h"
-#include "parser.h"
#include "mpegutils.h"
-#include "mpeg12data.h"
#include "qpeldsp.h"
-#include "thread.h"
#include "videodsp.h"
-#include "libavutil/opt.h"
-#include "libavutil/timecode.h"
-
#define MAX_THREADS 32
#define MAX_B_FRAMES 16
-/* Start codes. */
-#define SEQ_END_CODE 0x000001b7
-#define SEQ_START_CODE 0x000001b3
-#define GOP_START_CODE 0x000001b8
-#define PICTURE_START_CODE 0x00000100
-#define SLICE_MIN_START_CODE 0x00000101
-#define SLICE_MAX_START_CODE 0x000001af
-#define EXT_START_CODE 0x000001b5
-#define USER_START_CODE 0x000001b2
-#define SLICE_START_CODE 0x000001b7
-
+/**
+ * Scantable.
+ */
+typedef struct ScanTable {
+ const uint8_t *scantable;
+ uint8_t permutated[64];
+ uint8_t raster_end[64];
+} ScanTable;
/**
* MpegEncContext.
@@ -88,14 +74,18 @@ typedef struct MpegEncContext {
/* scantables */
ScanTable inter_scantable; ///< if inter == intra then intra should be used to reduce the cache usage
- ScanTable intra_scantable;
- ScanTable intra_h_scantable;
- ScanTable intra_v_scantable;
/* WARNING: changes above this line require updates to hardcoded
* offsets used in ASM. */
+ ScanTable intra_scantable;
+ uint8_t permutated_intra_h_scantable[64];
+ uint8_t permutated_intra_v_scantable[64];
+
struct AVCodecContext *avctx;
+ /* The following pointer is intended for codecs sharing code
+ * between decoder and encoder and in need of a common context to do so. */
+ void *private_ctx;
/* the following parameters must be initialized before encoding */
int width, height;///< picture size. must be a multiple of 16
int gop_size;
@@ -115,7 +105,6 @@ typedef struct MpegEncContext {
int max_b_frames; ///< max number of B-frames for encoding
int luma_elim_threshold;
int chroma_elim_threshold;
- int strict_std_compliance; ///< strictly follow the std (MPEG-4, ...)
int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
/* the following fields are managed internally by the encoder */
@@ -125,6 +114,7 @@ typedef struct MpegEncContext {
int input_picture_number; ///< used to set pic->display_picture_number, should not be used for/by anything else
int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else
int picture_number; //FIXME remove, unclear definition
+ int extradata_parsed;
int picture_in_gop_number; ///< 0-> first pic in gop, ...
int mb_width, mb_height; ///< number of MBs horizontally & vertically
int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
@@ -137,7 +127,7 @@ typedef struct MpegEncContext {
Picture **input_picture; ///< next pictures on display order for encoding
Picture **reordered_input_picture; ///< pointer to the next pictures in coded order for encoding
- int64_t user_specified_pts; ///< last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
+ int64_t user_specified_pts; ///< last non-zero pts from AVFrame which was passed into avcodec_send_frame()
/**
* pts difference between the first and second input frame, used for
* calculating dts of the first frame when there's a delay */
@@ -168,10 +158,10 @@ typedef struct MpegEncContext {
Picture next_picture;
/**
- * copy of the source picture structure for encoding.
+ * Reference to the source picture for encoding.
* note, linesize & data, might not match the source picture (for field pictures)
*/
- Picture new_picture;
+ AVFrame *new_picture;
/**
* copy of the current picture structure.
@@ -182,6 +172,7 @@ typedef struct MpegEncContext {
Picture *last_picture_ptr; ///< pointer to the previous picture.
Picture *next_picture_ptr; ///< pointer to the next picture (for bidir pred)
Picture *current_picture_ptr; ///< pointer to the current picture
+ int skipped_last_frame;
int last_dc[3]; ///< last DC values for MPEG-1
int16_t *dc_val_base;
int16_t *dc_val[3]; ///< used for MPEG-4 DC prediction, all 3 arrays must be continuous
@@ -208,14 +199,11 @@ typedef struct MpegEncContext {
int *lambda_table;
int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale
- int closed_gop; ///< MPEG1/2 GOP is closed
int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int vbv_delay;
int last_pict_type; //FIXME removes
int last_non_b_pict_type; ///< used for MPEG-4 gmc B-frames & ratecontrol
int droppable;
- int frame_rate_index;
- AVRational mpeg2_frame_rate_ext;
int last_lambda_for[5]; ///< last lambda for a specific pict type
int skipdct; ///< skip dct and code zero residual
@@ -229,7 +217,6 @@ typedef struct MpegEncContext {
HpelDSPContext hdsp;
IDCTDSPContext idsp;
MECmpContext mecc;
- MpegVideoDSPContext mdsp;
MpegvideoEncDSPContext mpvencdsp;
PixblockDSPContext pdsp;
QpelDSPContext qdsp;
@@ -243,8 +230,8 @@ typedef struct MpegEncContext {
int16_t (*b_bidir_forw_mv_table_base)[2];
int16_t (*b_bidir_back_mv_table_base)[2];
int16_t (*b_direct_mv_table_base)[2];
- int16_t (*p_field_mv_table_base[2][2])[2];
- int16_t (*b_field_mv_table_base[2][2][2])[2];
+ int16_t (*p_field_mv_table_base)[2];
+ int16_t (*b_field_mv_table_base)[2];
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame encoding
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame encoding
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame encoding
@@ -253,8 +240,17 @@ typedef struct MpegEncContext {
int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode B-frame encoding
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding
int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced B-frame encoding
- uint8_t (*p_field_select_table[2]);
- uint8_t (*b_field_select_table[2][2]);
+ uint8_t (*p_field_select_table[2]); ///< Only the first element is allocated
+ uint8_t (*b_field_select_table[2][2]); ///< Only the first element is allocated
+
+ /* The following fields are encoder-only */
+ uint16_t *mb_var; ///< Table for MB variances
+ uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
+ uint8_t *mb_mean; ///< Table for MB luminance
+ int64_t mb_var_sum; ///< sum of MB variance for current frame
+ int64_t mc_mb_var_sum; ///< motion compensated MB variance for current frame
+ uint64_t encoding_error[MPEGVIDEO_MAX_PLANES];
+
int motion_est; ///< ME algorithm
int me_penalty_compensation;
int me_pre; ///< prepass for motion estimation
@@ -276,7 +272,7 @@ typedef struct MpegEncContext {
int mv[2][4][2];
int field_select[2][2];
int last_mv[2][2][2]; ///< last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
- uint8_t *fcode_tab; ///< smallest fcode needed for each MV
+ const uint8_t *fcode_tab; ///< smallest fcode needed for each MV
int16_t direct_scale_mv[2][64]; ///< precomputed to avoid divisions in ff_mpeg4_set_direct_mv
MotionEstContext me;
@@ -301,7 +297,6 @@ typedef struct MpegEncContext {
uint16_t chroma_intra_matrix[64];
uint16_t inter_matrix[64];
uint16_t chroma_inter_matrix[64];
- int force_duplicated_matrix; ///< Force duplication of mjpeg matrices, useful for rtp streaming
int intra_quant_bias; ///< bias for the quantizer
int inter_quant_bias; ///< bias for the quantizer
@@ -315,7 +310,6 @@ typedef struct MpegEncContext {
uint8_t *inter_ac_vlc_length;
uint8_t *inter_ac_vlc_last_length;
uint8_t *luma_dc_vlc_length;
-#define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level))
int coded_score[12];
@@ -346,8 +340,6 @@ typedef struct MpegEncContext {
int i_tex_bits;
int p_tex_bits;
int i_count;
- int f_count;
- int b_count;
int skip_count;
int misc_bits; ///< cbp, mb_type
int last_bits; ///< temp var used for calculating the above vars
@@ -357,9 +349,6 @@ typedef struct MpegEncContext {
int resync_mb_y; ///< y position of last resync marker
GetBitContext last_resync_gb; ///< used to search for the next resync marker
int mb_num_left; ///< number of MBs left in this video packet (for partitioned Slices only)
- int next_p_frame_damaged; ///< set if the next p frame is damaged, to avoid showing trashed B-frames
-
- ParseContext parse_context;
/* H.263 specific */
int gob_index;
@@ -369,7 +358,6 @@ typedef struct MpegEncContext {
uint8_t *mb_info_ptr;
int mb_info_size;
int ehc_mode;
- int rc_strategy; ///< deprecated
/* H.263+ specific */
int umvplus; ///< == H.263+ && unrestricted_mv
@@ -393,18 +381,12 @@ typedef struct MpegEncContext {
uint16_t pb_time; ///< time distance between the last b and p,s,i frame
uint16_t pp_field_time;
uint16_t pb_field_time; ///< like above, just for interlaced
- int real_sprite_warping_points;
- int sprite_offset[2][2]; ///< sprite offset[isChroma][isMVY]
- int sprite_delta[2][2]; ///< sprite_delta [isY][isMVY]
int mcsel;
int quant_precision;
int quarter_sample; ///< 1->qpel, 0->half pel ME/MC
- int aspect_ratio_info; //FIXME remove
- int sprite_warping_accuracy;
int data_partitioning; ///< data partitioning flag from header
int partitioned_frame; ///< is current frame partitioned
int low_delay; ///< no reordering needed / has no B-frames
- int vo_type;
PutBitContext tex_pb; ///< used for data partitioned VOPs
PutBitContext pb2; ///< used for data partitioned VOPs
int mpeg_quant;
@@ -423,8 +405,6 @@ typedef struct MpegEncContext {
/* MJPEG specific */
struct MJpegContext *mjpeg_ctx;
int esc_pos;
- int pred;
- int huffman;
/* MSMPEG4 specific */
int mv_table_index;
@@ -439,8 +419,6 @@ typedef struct MpegEncContext {
int per_mb_rl_table;
int esc3_level_length;
int esc3_run_length;
- /** [mb_intra][isChroma][level][run][last] */
- int (*ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2];
int inter_intra_pred;
int mspel;
@@ -448,19 +426,16 @@ typedef struct MpegEncContext {
GetBitContext gb;
/* MPEG-1 specific */
- int gop_picture_number; ///< index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
int last_mv_dir; ///< last mv_dir, used for B-frame encoding
- uint8_t *vbv_delay_ptr; ///< pointer to vbv_delay in the bitstream
+ int vbv_delay_pos; ///< offset of vbv_delay in the bitstream
/* MPEG-2-specific - I wished not to have to support this mess. */
int progressive_sequence;
int mpeg_f_code[2][2];
- int a53_cc;
// picture structure defines are loaded from mpegutils.h
int picture_structure;
- int64_t timecode_frame_start; ///< GOP timecode frame start number, in non drop frame format
int intra_dc_precision;
int frame_pred_frame_dct;
int top_field_first;
@@ -469,14 +444,6 @@ typedef struct MpegEncContext {
int brd_scale;
int intra_vlc_format;
int alternate_scan;
- int seq_disp_ext;
- int video_format;
-#define VIDEO_FORMAT_COMPONENT 0
-#define VIDEO_FORMAT_PAL 1
-#define VIDEO_FORMAT_NTSC 2
-#define VIDEO_FORMAT_SECAM 3
-#define VIDEO_FORMAT_MAC 4
-#define VIDEO_FORMAT_UNSPECIFIED 5
int repeat_first_field;
int chroma_420_type;
int chroma_format;
@@ -490,29 +457,18 @@ typedef struct MpegEncContext {
int full_pel[2];
int interlaced_dct;
int first_field; ///< is 1 for the first field of a field picture 0 otherwise
- int drop_frame_timecode; ///< timecode is in drop frame format.
- int scan_offset; ///< reserve space for SVCD scan offset user data.
/* RTP specific */
int rtp_mode;
int rtp_payload_size;
- char *tc_opt_str; ///< timecode option string
- AVTimecode tc; ///< timecode context
-
uint8_t *ptr_lastgob;
- int swap_uv; //vcr2 codec is an MPEG-2 variant with U and V swapped
- int pack_pblocks; //xvmc needs to keep blocks without gaps.
int16_t (*pblocks[12])[64];
int16_t (*block)[64]; ///< points to one of the following blocks
int16_t (*blocks)[12][64]; // for HQ mode we need to keep the best block
int (*decode_mb)(struct MpegEncContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch()
- int32_t (*block32)[12][64];
- int dpcm_direction; // 0 = DCT, 1 = DPCM top to bottom scan, -1 = DPCM bottom to top scan
- int16_t (*dpcm_macroblock)[3][256];
-
#define SLICE_OK 0
#define SLICE_ERROR -1
#define SLICE_END -2 ///<end marker found
@@ -580,93 +536,10 @@ typedef struct MpegEncContext {
int scenechange_threshold;
int noise_reduction;
+
+ int intra_penalty;
} MpegEncContext;
-/* mpegvideo_enc common options */
-#define FF_MPV_FLAG_SKIP_RD 0x0001
-#define FF_MPV_FLAG_STRICT_GOP 0x0002
-#define FF_MPV_FLAG_QP_RD 0x0004
-#define FF_MPV_FLAG_CBP_RD 0x0008
-#define FF_MPV_FLAG_NAQ 0x0010
-#define FF_MPV_FLAG_MV0 0x0020
-
-#define FF_MPV_OPT_CMP_FUNC \
-{ "sad", "Sum of absolute differences, fast", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "sse", "Sum of squared errors", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SSE }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "satd", "Sum of absolute Hadamard transformed differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_SATD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "dct", "Sum of absolute DCT transformed differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_DCT }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "psnr", "Sum of squared quantization errors, low quality", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_PSNR }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "bit", "Number of bits needed for the block", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_BIT }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "rd", "Rate distortion optimal, slow", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_RD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "zero", "Zero", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_ZERO }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "vsad", "Sum of absolute vertical differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_VSAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "vsse", "Sum of squared vertical differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_VSSE }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "nsse", "Noise preserving sum of squared differences", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_NSSE }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "dct264", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_DCT264 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "dctmax", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_CHROMA }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{ "msad", "Sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }
-
-#ifndef FF_MPV_OFFSET
-#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x)
-#endif
-#define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
-#define FF_MPV_COMMON_OPTS \
-FF_MPV_OPT_CMP_FUNC, \
-{ "mpv_flags", "Flags common for all mpegvideo-based encoders.", FF_MPV_OFFSET(mpv_flags), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "skip_rd", "RD optimal MB level residual skipping", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_SKIP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "strict_gop", "Strictly enforce gop size", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_STRICT_GOP }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "qp_rd", "Use rate distortion optimization for qp selection", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_QP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "cbp_rd", "use rate distortion optimization for CBP", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_CBP_RD }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "naq", "normalize adaptive quantization", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_NAQ }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "mv0", "always try a mb with mv=<0,0>", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_MV0 }, 0, 0, FF_MPV_OPT_FLAGS, "mpv_flags" },\
-{ "luma_elim_threshold", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)",\
- FF_MPV_OFFSET(luma_elim_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
-{ "chroma_elim_threshold", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)",\
- FF_MPV_OFFSET(chroma_elim_threshold), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS },\
-{ "quantizer_noise_shaping", NULL, FF_MPV_OFFSET(quantizer_noise_shaping), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FF_MPV_OPT_FLAGS },\
-{ "error_rate", "Simulate errors in the bitstream to test error concealment.", \
- FF_MPV_OFFSET(error_rate), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FF_MPV_OPT_FLAGS },\
-{"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", \
- FF_MPV_OFFSET(rc_qsquish), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, 0, 99, FF_MPV_OPT_FLAGS}, \
-{"rc_qmod_amp", "experimental quantizer modulation", FF_MPV_OFFSET(rc_qmod_amp), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, FF_MPV_OPT_FLAGS}, \
-{"rc_qmod_freq", "experimental quantizer modulation", FF_MPV_OFFSET(rc_qmod_freq), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS}, \
-{"rc_eq", "Set rate control equation. When computing the expression, besides the standard functions " \
- "defined in the section 'Expression Evaluation', the following functions are available: " \
- "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv " \
- "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.", \
- FF_MPV_OFFSET(rc_eq), AV_OPT_TYPE_STRING, .flags = FF_MPV_OPT_FLAGS }, \
-{"rc_init_cplx", "initial complexity for 1-pass encoding", FF_MPV_OFFSET(rc_initial_cplx), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, FF_MPV_OPT_FLAGS}, \
-{"rc_buf_aggressivity", "currently useless", FF_MPV_OFFSET(rc_buffer_aggressivity), AV_OPT_TYPE_FLOAT, {.dbl = 1.0 }, -FLT_MAX, FLT_MAX, FF_MPV_OPT_FLAGS}, \
-{"border_mask", "increase the quantizer for macroblocks close to borders", FF_MPV_OFFSET(border_masking), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, -FLT_MAX, FLT_MAX, FF_MPV_OPT_FLAGS}, \
-{"lmin", "minimum Lagrange factor (VBR)", FF_MPV_OFFSET(lmin), AV_OPT_TYPE_INT, {.i64 = 2*FF_QP2LAMBDA }, 0, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"lmax", "maximum Lagrange factor (VBR)", FF_MPV_OFFSET(lmax), AV_OPT_TYPE_INT, {.i64 = 31*FF_QP2LAMBDA }, 0, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"ibias", "intra quant bias", FF_MPV_OFFSET(intra_quant_bias), AV_OPT_TYPE_INT, {.i64 = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"pbias", "inter quant bias", FF_MPV_OFFSET(inter_quant_bias), AV_OPT_TYPE_INT, {.i64 = FF_DEFAULT_QUANT_BIAS }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"rc_strategy", "ratecontrol method", FF_MPV_OFFSET(rc_strategy), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FF_MPV_OPT_FLAGS | AV_OPT_FLAG_DEPRECATED, "rc_strategy" }, \
- { "ffmpeg", "deprecated, does nothing", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FF_MPV_OPT_FLAGS | AV_OPT_FLAG_DEPRECATED, "rc_strategy" }, \
- { "xvid", "deprecated, does nothing", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FF_MPV_OPT_FLAGS | AV_OPT_FLAG_DEPRECATED, "rc_strategy" }, \
-{"motion_est", "motion estimation algorithm", FF_MPV_OFFSET(motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, FF_MPV_OPT_FLAGS, "motion_est" }, \
-{ "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, "motion_est" }, \
-{ "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, "motion_est" }, \
-{ "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, "motion_est" }, \
-{ "force_duplicated_matrix", "Always write luma and chroma matrix for mjpeg, useful for rtp streaming.", FF_MPV_OFFSET(force_duplicated_matrix), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, FF_MPV_OPT_FLAGS }, \
-{"b_strategy", "Strategy to choose between I/P/B-frames", FF_MPV_OFFSET(b_frame_strategy), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, FF_MPV_OPT_FLAGS }, \
-{"b_sensitivity", "Adjust sensitivity of b_frame_strategy 1", FF_MPV_OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.i64 = 40 }, 1, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"brd_scale", "Downscale frames for dynamic B-frame decision", FF_MPV_OFFSET(brd_scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 3, FF_MPV_OPT_FLAGS }, \
-{"skip_threshold", "Frame skip threshold", FF_MPV_OFFSET(frame_skip_threshold), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"skip_factor", "Frame skip factor", FF_MPV_OFFSET(frame_skip_factor), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"skip_exp", "Frame skip exponent", FF_MPV_OFFSET(frame_skip_exp), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"skip_cmp", "Frame skip compare function", FF_MPV_OFFSET(frame_skip_cmp), AV_OPT_TYPE_INT, {.i64 = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }, \
-{"sc_threshold", "Scene change threshold", FF_MPV_OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"noise_reduction", "Noise reduction", FF_MPV_OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"mpeg_quant", "Use MPEG quantizers instead of H.263", FF_MPV_OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FF_MPV_OPT_FLAGS }, \
-{"ps", "RTP payload size in bytes", FF_MPV_OFFSET(rtp_payload_size), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"mepc", "Motion estimation bitrate penalty compensation (1.0 = 256)", FF_MPV_OFFSET(me_penalty_compensation), AV_OPT_TYPE_INT, {.i64 = 256 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"mepre", "pre motion estimation", FF_MPV_OFFSET(me_pre), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS }, \
-{"a53cc", "Use A53 Closed Captions (if available)", FF_MPV_OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FF_MPV_OPT_FLAGS }, \
-
-extern const AVOption ff_mpv_generic_options[];
/**
* Set the given MpegEncContext to common defaults (same for encoding
@@ -675,8 +548,6 @@ extern const AVOption ff_mpv_generic_options[];
*/
void ff_mpv_common_defaults(MpegEncContext *s);
-void ff_dct_encode_init_x86(MpegEncContext *s);
-
int ff_mpv_common_init(MpegEncContext *s);
void ff_mpv_common_init_arm(MpegEncContext *s);
void ff_mpv_common_init_axp(MpegEncContext *s);
@@ -684,59 +555,48 @@ void ff_mpv_common_init_neon(MpegEncContext *s);
void ff_mpv_common_init_ppc(MpegEncContext *s);
void ff_mpv_common_init_x86(MpegEncContext *s);
void ff_mpv_common_init_mips(MpegEncContext *s);
+/**
+ * Initialize an MpegEncContext's thread contexts. Presumes that
+ * slice_context_count is already set and that all the fields
+ * that are freed/reset in free_duplicate_context() are NULL.
+ */
+int ff_mpv_init_duplicate_contexts(MpegEncContext *s);
+/**
+ * Initialize and allocates MpegEncContext fields dependent on the resolution.
+ */
+int ff_mpv_init_context_frame(MpegEncContext *s);
+/**
+ * Frees and resets MpegEncContext fields depending on the resolution
+ * as well as the slice thread contexts.
+ * Is used during resolution changes to avoid a full reinitialization of the
+ * codec.
+ */
+void ff_mpv_free_context_frame(MpegEncContext *s);
-int ff_mpv_common_frame_size_change(MpegEncContext *s);
void ff_mpv_common_end(MpegEncContext *s);
-void ff_mpv_decode_defaults(MpegEncContext *s);
-void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx);
-void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64]);
-void ff_mpv_report_decode_progress(MpegEncContext *s);
-
-int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx);
-void ff_mpv_frame_end(MpegEncContext *s);
-
-int ff_mpv_encode_init(AVCodecContext *avctx);
-void ff_mpv_encode_init_x86(MpegEncContext *s);
-
-int ff_mpv_encode_end(AVCodecContext *avctx);
-int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
- const AVFrame *frame, int *got_packet);
-int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase);
-
void ff_clean_intra_table_entries(MpegEncContext *s);
-void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
-void ff_mpeg_flush(AVCodecContext *avctx);
-
-void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict);
-
-int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type);
-void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
-
-int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
-int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
+int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src);
void ff_set_qscale(MpegEncContext * s, int qscale);
void ff_mpv_idct_init(MpegEncContext *s);
-int ff_dct_encode_init(MpegEncContext *s);
-void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64],
- const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra);
-int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
-void ff_block_permute(int16_t *block, uint8_t *permutation,
- const uint8_t *scantable, int last);
+void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
+ const uint8_t *src_scantable);
void ff_init_block_index(MpegEncContext *s);
void ff_mpv_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr, int dir,
- uint8_t **ref_picture,
+ uint8_t *const *ref_picture,
op_pixels_func (*pix_op)[4],
qpel_mc_func (*qpix_op)[16]);
-static inline void ff_update_block_index(MpegEncContext *s){
- const int bytes_per_pixel = 1 + (s->avctx->bits_per_raw_sample > 8);
- const int block_size= (8*bytes_per_pixel) >> s->avctx->lowres;
+static inline void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample,
+ int lowres, int chroma_x_shift)
+{
+ const int bytes_per_pixel = 1 + (bits_per_raw_sample > 8);
+ const int block_size = (8 * bytes_per_pixel) >> lowres;
s->block_index[0]+=2;
s->block_index[1]+=2;
@@ -745,26 +605,8 @@ static inline void ff_update_block_index(MpegEncContext *s){
s->block_index[4]++;
s->block_index[5]++;
s->dest[0]+= 2*block_size;
- s->dest[1]+= (2 >> s->chroma_x_shift) * block_size;
- s->dest[2]+= (2 >> s->chroma_x_shift) * block_size;
-}
-
-static inline int get_bits_diff(MpegEncContext *s){
- const int bits= put_bits_count(&s->pb);
- const int last= s->last_bits;
-
- s->last_bits = bits;
-
- return bits - last;
-}
-
-static inline int mpeg_get_qscale(MpegEncContext *s)
-{
- int qscale = get_bits(&s->gb, 5);
- if (s->q_scale_type)
- return ff_mpeg2_non_linear_qscale[qscale];
- else
- return qscale << 1;
+ s->dest[1] += (2 >> chroma_x_shift) * block_size;
+ s->dest[2] += (2 >> chroma_x_shift) * block_size;
}
#endif /* AVCODEC_MPEGVIDEO_H */
diff --git a/media/ffvpx/libavcodec/mpegvideodata.h b/media/ffvpx/libavcodec/mpegvideodata.h
index 14f4806d66..42c9d6c293 100644
--- a/media/ffvpx/libavcodec/mpegvideodata.h
+++ b/media/ffvpx/libavcodec/mpegvideodata.h
@@ -21,15 +21,19 @@
#include <stdint.h>
+#include "libavutil/attributes_internal.h"
+
+FF_VISIBILITY_PUSH_HIDDEN
/* encoding scans */
extern const uint8_t ff_alternate_horizontal_scan[64];
extern const uint8_t ff_alternate_vertical_scan[64];
-extern const uint8_t ff_mpeg1_dc_scale_table[128];
-extern const uint8_t * const ff_mpeg2_dc_scale_table[4];
+extern const uint8_t ff_mpeg12_dc_scale_table[4][32];
+static const uint8_t *const ff_mpeg1_dc_scale_table = ff_mpeg12_dc_scale_table[0];
extern const uint8_t ff_mpeg2_non_linear_qscale[32];
extern const uint8_t ff_default_chroma_qscale_table[32];
+FF_VISIBILITY_POP_HIDDEN
#endif /* AVCODEC_MPEGVIDEODATA_H */
diff --git a/media/ffvpx/libavcodec/mpegvideoencdsp.h b/media/ffvpx/libavcodec/mpegvideoencdsp.h
index 33f0282fcc..95084679d9 100644
--- a/media/ffvpx/libavcodec/mpegvideoencdsp.h
+++ b/media/ffvpx/libavcodec/mpegvideoencdsp.h
@@ -30,12 +30,12 @@
#define EDGE_BOTTOM 2
typedef struct MpegvideoEncDSPContext {
- int (*try_8x8basis)(int16_t rem[64], int16_t weight[64],
- int16_t basis[64], int scale);
- void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
+ int (*try_8x8basis)(const int16_t rem[64], const int16_t weight[64],
+ const int16_t basis[64], int scale);
+ void (*add_8x8basis)(int16_t rem[64], const int16_t basis[64], int scale);
- int (*pix_sum)(uint8_t *pix, int line_size);
- int (*pix_norm1)(uint8_t *pix, int line_size);
+ int (*pix_sum)(const uint8_t *pix, int line_size);
+ int (*pix_norm1)(const uint8_t *pix, int line_size);
void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src,
int src_wrap, int width, int height);
diff --git a/media/ffvpx/libavcodec/null_bsf.c b/media/ffvpx/libavcodec/null_bsf.c
index 24d26dfb1a..28237076fb 100644
--- a/media/ffvpx/libavcodec/null_bsf.c
+++ b/media/ffvpx/libavcodec/null_bsf.c
@@ -21,15 +21,9 @@
* Null bitstream filter -- pass the input through unchanged.
*/
-#include "avcodec.h"
-#include "bsf.h"
+#include "bsf_internal.h"
-static int null_filter(AVBSFContext *ctx, AVPacket *pkt)
-{
- return ff_bsf_get_packet_ref(ctx, pkt);
-}
-
-const AVBitStreamFilter ff_null_bsf = {
- .name = "null",
- .filter = null_filter,
+const FFBitStreamFilter ff_null_bsf = {
+ .p.name = "null",
+ .filter = ff_bsf_get_packet_ref,
};
diff --git a/media/ffvpx/libavcodec/options.c b/media/ffvpx/libavcodec/options.c
index 35e8ac9313..a9b35ee1c3 100644
--- a/media/ffvpx/libavcodec/options.c
+++ b/media/ffvpx/libavcodec/options.c
@@ -24,8 +24,10 @@
* Options definition for AVCodecContext.
*/
+#include "config_components.h"
+
#include "avcodec.h"
-#include "internal.h"
+#include "codec_internal.h"
#include "libavutil/avassert.h"
#include "libavutil/internal.h"
#include "libavutil/mem.h"
@@ -39,7 +41,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
static const char* context_to_name(void* ptr) {
AVCodecContext *avc= ptr;
- if(avc && avc->codec && avc->codec->name)
+ if (avc && avc->codec)
return avc->codec->name;
else
return "NULL";
@@ -53,17 +55,11 @@ static void *codec_child_next(void *obj, void *prev)
return NULL;
}
-static const AVClass *codec_child_class_next(const AVClass *prev)
+static const AVClass *codec_child_class_iterate(void **iter)
{
- AVCodec *c = NULL;
-
- /* find the codec that corresponds to prev */
- while (prev && (c = av_codec_next(c)))
- if (c->priv_class == prev)
- break;
-
+ const AVCodec *c;
/* find next codec with priv options */
- while (c = av_codec_next(c))
+ while (c = av_codec_iterate(iter))
if (c->priv_class)
return c->priv_class;
return NULL;
@@ -72,8 +68,10 @@ static const AVClass *codec_child_class_next(const AVClass *prev)
static AVClassCategory get_category(void *ptr)
{
AVCodecContext* avctx = ptr;
- if(avctx->codec && avctx->codec->decode) return AV_CLASS_CATEGORY_DECODER;
- else return AV_CLASS_CATEGORY_ENCODER;
+ if (avctx->codec && av_codec_is_decoder(avctx->codec))
+ return AV_CLASS_CATEGORY_DECODER;
+ else
+ return AV_CLASS_CATEGORY_ENCODER;
}
static const AVClass av_codec_context_class = {
@@ -83,13 +81,14 @@ static const AVClass av_codec_context_class = {
.version = LIBAVUTIL_VERSION_INT,
.log_level_offset_offset = offsetof(AVCodecContext, log_level_offset),
.child_next = codec_child_next,
- .child_class_next = codec_child_class_next,
+ .child_class_iterate = codec_child_class_iterate,
.category = AV_CLASS_CATEGORY_ENCODER,
.get_category = get_category,
};
static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
{
+ const FFCodec *const codec2 = ffcodec(codec);
int flags=0;
memset(s, 0, sizeof(AVCodecContext));
@@ -109,34 +108,39 @@ static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
flags= AV_OPT_FLAG_SUBTITLE_PARAM;
av_opt_set_defaults2(s, flags, flags);
+ av_channel_layout_uninit(&s->ch_layout);
+
s->time_base = (AVRational){0,1};
s->framerate = (AVRational){ 0, 1 };
s->pkt_timebase = (AVRational){ 0, 1 };
s->get_buffer2 = avcodec_default_get_buffer2;
s->get_format = avcodec_default_get_format;
+ s->get_encode_buffer = avcodec_default_get_encode_buffer;
s->execute = avcodec_default_execute;
s->execute2 = avcodec_default_execute2;
s->sample_aspect_ratio = (AVRational){0,1};
+ s->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
s->pix_fmt = AV_PIX_FMT_NONE;
s->sw_pix_fmt = AV_PIX_FMT_NONE;
s->sample_fmt = AV_SAMPLE_FMT_NONE;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
s->reordered_opaque = AV_NOPTS_VALUE;
- if(codec && codec->priv_data_size){
- if(!s->priv_data){
- s->priv_data= av_mallocz(codec->priv_data_size);
- if (!s->priv_data) {
- return AVERROR(ENOMEM);
- }
- }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if(codec && codec2->priv_data_size){
+ s->priv_data = av_mallocz(codec2->priv_data_size);
+ if (!s->priv_data)
+ return AVERROR(ENOMEM);
if(codec->priv_class){
*(const AVClass**)s->priv_data = codec->priv_class;
av_opt_set_defaults(s->priv_data);
}
}
- if (codec && codec->defaults) {
+ if (codec && codec2->defaults) {
int ret;
- const AVCodecDefault *d = codec->defaults;
+ const FFCodecDefault *d = codec2->defaults;
while (d->key) {
ret = av_opt_set(s, d->key, d->value, 0);
av_assert0(ret >= 0);
@@ -146,13 +150,6 @@ static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)
return 0;
}
-#if FF_API_GET_CONTEXT_DEFAULTS
-int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec)
-{
- return init_context_defaults(s, codec);
-}
-#endif
-
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)
{
AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));
@@ -182,147 +179,16 @@ void avcodec_free_context(AVCodecContext **pavctx)
av_freep(&avctx->intra_matrix);
av_freep(&avctx->inter_matrix);
av_freep(&avctx->rc_override);
+ av_channel_layout_uninit(&avctx->ch_layout);
av_freep(pavctx);
}
-#if FF_API_COPY_CONTEXT
-static void copy_context_reset(AVCodecContext *avctx)
-{
- int i;
-
- av_opt_free(avctx);
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- av_frame_free(&avctx->coded_frame);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- av_freep(&avctx->rc_override);
- av_freep(&avctx->intra_matrix);
- av_freep(&avctx->inter_matrix);
- av_freep(&avctx->extradata);
- av_freep(&avctx->subtitle_header);
- av_buffer_unref(&avctx->hw_frames_ctx);
- av_buffer_unref(&avctx->hw_device_ctx);
- for (i = 0; i < avctx->nb_coded_side_data; i++)
- av_freep(&avctx->coded_side_data[i].data);
- av_freep(&avctx->coded_side_data);
- avctx->subtitle_header_size = 0;
- avctx->nb_coded_side_data = 0;
- avctx->extradata_size = 0;
-}
-
-int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
-{
- const AVCodec *orig_codec = dest->codec;
- uint8_t *orig_priv_data = dest->priv_data;
-
- if (avcodec_is_open(dest)) { // check that the dest context is uninitialized
- av_log(dest, AV_LOG_ERROR,
- "Tried to copy AVCodecContext %p into already-initialized %p\n",
- src, dest);
- return AVERROR(EINVAL);
- }
-
- copy_context_reset(dest);
-
- memcpy(dest, src, sizeof(*dest));
- av_opt_copy(dest, src);
-
- dest->priv_data = orig_priv_data;
- dest->codec = orig_codec;
-
- if (orig_priv_data && src->codec && src->codec->priv_class &&
- dest->codec && dest->codec->priv_class)
- av_opt_copy(orig_priv_data, src->priv_data);
-
-
- /* set values specific to opened codecs back to their default state */
- dest->slice_offset = NULL;
- dest->hwaccel = NULL;
- dest->internal = NULL;
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- dest->coded_frame = NULL;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
- /* reallocate values that should be allocated separately */
- dest->extradata = NULL;
- dest->coded_side_data = NULL;
- dest->intra_matrix = NULL;
- dest->inter_matrix = NULL;
- dest->rc_override = NULL;
- dest->subtitle_header = NULL;
- dest->hw_frames_ctx = NULL;
- dest->hw_device_ctx = NULL;
- dest->nb_coded_side_data = 0;
-
-#define alloc_and_copy_or_fail(obj, size, pad) \
- if (src->obj && size > 0) { \
- dest->obj = av_malloc(size + pad); \
- if (!dest->obj) \
- goto fail; \
- memcpy(dest->obj, src->obj, size); \
- if (pad) \
- memset(((uint8_t *) dest->obj) + size, 0, pad); \
- }
- alloc_and_copy_or_fail(extradata, src->extradata_size,
- AV_INPUT_BUFFER_PADDING_SIZE);
- dest->extradata_size = src->extradata_size;
- alloc_and_copy_or_fail(intra_matrix, 64 * sizeof(int16_t), 0);
- alloc_and_copy_or_fail(inter_matrix, 64 * sizeof(int16_t), 0);
- alloc_and_copy_or_fail(rc_override, src->rc_override_count * sizeof(*src->rc_override), 0);
- alloc_and_copy_or_fail(subtitle_header, src->subtitle_header_size, 1);
- av_assert0(dest->subtitle_header_size == src->subtitle_header_size);
-#undef alloc_and_copy_or_fail
-
- if (src->hw_frames_ctx) {
- dest->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
- if (!dest->hw_frames_ctx)
- goto fail;
- }
-
- return 0;
-
-fail:
- copy_context_reset(dest);
- return AVERROR(ENOMEM);
-}
-#endif
-
const AVClass *avcodec_get_class(void)
{
return &av_codec_context_class;
}
-#define FOFFSET(x) offsetof(AVFrame,x)
-
-static const AVOption frame_options[]={
-{"best_effort_timestamp", "", FOFFSET(best_effort_timestamp), AV_OPT_TYPE_INT64, {.i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, 0},
-{"pkt_pos", "", FOFFSET(pkt_pos), AV_OPT_TYPE_INT64, {.i64 = -1 }, INT64_MIN, INT64_MAX, 0},
-{"pkt_size", "", FOFFSET(pkt_size), AV_OPT_TYPE_INT64, {.i64 = -1 }, INT64_MIN, INT64_MAX, 0},
-{"sample_aspect_ratio", "", FOFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0 }, 0, INT_MAX, 0},
-{"width", "", FOFFSET(width), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{"height", "", FOFFSET(height), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{"format", "", FOFFSET(format), AV_OPT_TYPE_INT, {.i64 = -1 }, 0, INT_MAX, 0},
-{"channel_layout", "", FOFFSET(channel_layout), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, 0},
-{"sample_rate", "", FOFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, 0},
-{NULL},
-};
-
-static const AVClass av_frame_class = {
- .class_name = "AVFrame",
- .item_name = NULL,
- .option = frame_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-const AVClass *avcodec_get_frame_class(void)
-{
- return &av_frame_class;
-}
-
#define SROFFSET(x) offsetof(AVSubtitleRect,x)
static const AVOption subtitle_rect_options[]={
diff --git a/media/ffvpx/libavcodec/options_table.h b/media/ffvpx/libavcodec/options_table.h
index 4a266eca16..4fea57673a 100644
--- a/media/ffvpx/libavcodec/options_table.h
+++ b/media/ffvpx/libavcodec/options_table.h
@@ -22,13 +22,15 @@
#ifndef AVCODEC_OPTIONS_TABLE_H
#define AVCODEC_OPTIONS_TABLE_H
+#include "config_components.h"
+
#include <float.h>
#include <limits.h>
#include <stdint.h>
#include "libavutil/opt.h"
#include "avcodec.h"
-#include "version.h"
+#include "version_major.h"
#define OFFSET(x) offsetof(AVCodecContext,x)
#define DEFAULT 0 //should be NAN but it does not work as it is not a constant in glibc as required by ANSI/ISO C
@@ -38,6 +40,7 @@
#define S AV_OPT_FLAG_SUBTITLE_PARAM
#define E AV_OPT_FLAG_ENCODING_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
+#define CC AV_OPT_FLAG_CHILD_CONSTS
#define AV_CODEC_DEFAULT_BITRATE 200*1000
@@ -54,11 +57,13 @@ static const AVOption avcodec_options[] = {
{"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
{"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
{"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
+{"recon_frame", "export reconstructed frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_RECON_FRAME}, .unit = "flags"},
+{"copy_opaque", "propagate opaque values", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_COPY_OPAQUE}, .unit = "flags"},
+{"frame_duration", "use frame durations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_FRAME_DURATION}, .unit = "flags"},
{"pass1", "use internal 2-pass ratecontrol in first pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS1 }, INT_MIN, INT_MAX, 0, "flags"},
{"pass2", "use internal 2-pass ratecontrol in second pass mode", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PASS2 }, INT_MIN, INT_MAX, 0, "flags"},
{"gray", "only decode/encode grayscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GRAY }, INT_MIN, INT_MAX, V|E|D, "flags"},
{"psnr", "error[?] variables will be set during encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_PSNR }, INT_MIN, INT_MAX, V|E, "flags"},
-{"truncated", "Input bitstream might be randomly truncated", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_TRUNCATED }, INT_MIN, INT_MAX, V|D, "flags"},
{"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
{"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
{"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
@@ -68,7 +73,7 @@ static const AVOption avcodec_options[] = {
{"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
{"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"},
{"drop_changed", "Drop frames whose parameters differ from first decoded frame", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_DROPCHANGED }, INT_MIN, INT_MAX, A|V|D, "flags"},
-{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, V|A|E|D, "flags2"},
+{"flags2", NULL, OFFSET(flags2), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, V|A|E|D|S, "flags2"},
{"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
{"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
{"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"},
@@ -76,15 +81,23 @@ static const AVOption avcodec_options[] = {
{"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"},
{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
{"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"},
-{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, V|D, "flags2"},
+{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, A|D, "flags2"},
{"ass_ro_flush_noop", "do not reset ASS ReadOrder field on flush", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_RO_FLUSH_NOOP}, INT_MIN, INT_MAX, S|D, "flags2"},
+{"icc_profiles", "generate/parse embedded ICC profiles from/to colorimetry tags", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_ICC_PROFILES}, INT_MIN, INT_MAX, S|D, "flags2"},
+{"export_side_data", "Export metadata as side data", OFFSET(export_side_data), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT}, 0, UINT_MAX, A|V|S|D|E, "export_side_data"},
+{"mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_MVS}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"prft", "export Producer Reference Time through packet side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_PRFT}, INT_MIN, INT_MAX, A|V|S|E, "export_side_data"},
+{"venc_params", "export video encoding parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"film_grain", "export film grain parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_FILM_GRAIN}, INT_MIN, INT_MAX, V|D, "export_side_data"},
{"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, INT_MAX},
{"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
+#if FF_API_OLD_CHANNEL_LAYOUT
{"ac", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
+#endif
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, A|E},
{"frame_size", NULL, OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|E},
-{"frame_number", NULL, OFFSET(frame_number), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
+{"frame_number", NULL, OFFSET(frame_num), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"delay", NULL, OFFSET(delay), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"qcomp", "video quantizer scale compression (VBR). Constant of ratecontrol equation. "
"Recommended range for default rc_eq: 0.0-1.0",
@@ -95,21 +108,6 @@ static const AVOption avcodec_options[] = {
{"qdiff", "maximum difference between the quantizer scales (VBR)", OFFSET(max_qdiff), AV_OPT_TYPE_INT, {.i64 = 3 }, INT_MIN, INT_MAX, V|E},
{"bf", "set maximum number of B-frames between non-B-frames", OFFSET(max_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, -1, INT_MAX, V|E},
{"b_qfactor", "QP factor between P- and B-frames", OFFSET(b_quant_factor), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
-#if FF_API_PRIVATE_OPT
-{"b_strategy", "strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, V|E},
-{"ps", "RTP payload size in bytes", OFFSET(rtp_payload_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
-#if FF_API_STAT_BITS
-{"mv_bits", NULL, OFFSET(mv_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"header_bits", NULL, OFFSET(header_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"i_tex_bits", NULL, OFFSET(i_tex_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"p_tex_bits", NULL, OFFSET(p_tex_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"i_count", NULL, OFFSET(i_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"p_count", NULL, OFFSET(p_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"skip_count", NULL, OFFSET(skip_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"misc_bits", NULL, OFFSET(misc_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-{"frame_bits", NULL, OFFSET(frame_bits), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
-#endif
{"codec_tag", NULL, OFFSET(codec_tag), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"bug", "work around not autodetected encoder bugs", OFFSET(workaround_bugs), AV_OPT_TYPE_FLAGS, {.i64 = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
{"autodetect", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_BUG_AUTODETECT }, INT_MIN, INT_MAX, V|D, "bug"},
@@ -134,20 +132,17 @@ static const AVOption avcodec_options[] = {
{"unofficial", "allow unofficial extensions", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_UNOFFICIAL }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
{"experimental", "allow non-standardized experimental things", 0, AV_OPT_TYPE_CONST, {.i64 = FF_COMPLIANCE_EXPERIMENTAL }, INT_MIN, INT_MAX, A|V|D|E, "strict"},
{"b_qoffset", "QP offset between P- and B-frames", OFFSET(b_quant_offset), AV_OPT_TYPE_FLOAT, {.dbl = 1.25 }, -FLT_MAX, FLT_MAX, V|E},
-{"err_detect", "set error detection flags", OFFSET(err_recognition), AV_OPT_TYPE_FLAGS, {.i64 = 0 }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"crccheck", "verify embedded CRCs", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"bitstream", "detect bitstream specification deviations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BITSTREAM }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"buffer", "detect improper bitstream length", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BUFFER }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
-{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
+{"err_detect", "set error detection flags", OFFSET(err_recognition), AV_OPT_TYPE_FLAGS, {.i64 = 0 }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"crccheck", "verify embedded CRCs", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CRCCHECK }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"bitstream", "detect bitstream specification deviations", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BITSTREAM }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"buffer", "detect improper bitstream length", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_BUFFER }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
+{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, A|V|S|D|E, "err_detect"},
{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
-#if FF_API_PRIVATE_OPT
-{"mpeg_quant", "use MPEG quantizers instead of H.263", OFFSET(mpeg_quant), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
{"rc_override_count", NULL, OFFSET(rc_override_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"maxrate", "maximum bitrate (in bits/s). Used for VBV together with bufsize.", OFFSET(rc_max_rate), AV_OPT_TYPE_INT64, {.i64 = DEFAULT }, 0, INT_MAX, V|A|E},
{"minrate", "minimum bitrate (in bits/s). Most useful in setting up a CBR encode. It is of little use otherwise.",
@@ -188,12 +183,6 @@ static const AVOption avcodec_options[] = {
{"deblock", "use strong deblock filter for damaged MBs", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_DEBLOCK }, INT_MIN, INT_MAX, V|D, "ec"},
{"favor_inter", "favor predicting from the previous frame", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_FAVOR_INTER }, INT_MIN, INT_MAX, V|D, "ec"},
{"bits_per_coded_sample", NULL, OFFSET(bits_per_coded_sample), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
-#if FF_API_PRIVATE_OPT
-{"pred", "prediction method", OFFSET(prediction_method), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "pred"},
-{"left", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PRED_LEFT }, INT_MIN, INT_MAX, V|E, "pred"},
-{"plane", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PRED_PLANE }, INT_MIN, INT_MAX, V|E, "pred"},
-{"median", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PRED_MEDIAN }, INT_MIN, INT_MAX, V|E, "pred"},
-#endif
{"aspect", "sample aspect ratio", OFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, 10, V|E},
{"sar", "sample aspect ratio", OFFSET(sample_aspect_ratio), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, 10, V|E},
{"debug", "print specific debug info", OFFSET(debug), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, INT_MAX, V|A|S|E|D, "debug"},
@@ -202,9 +191,6 @@ static const AVOption avcodec_options[] = {
{"bitstream", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BITSTREAM }, INT_MIN, INT_MAX, V|D, "debug"},
{"mb_type", "macroblock (MB) type", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_MB_TYPE }, INT_MIN, INT_MAX, V|D, "debug"},
{"qp", "per-block quantization parameter (QP)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_QP }, INT_MIN, INT_MAX, V|D, "debug"},
-#if FF_API_DEBUG_MV
-{"mv", "motion vector", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_MV }, INT_MIN, INT_MAX, V|D, "debug"},
-#endif
{"dct_coeff", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_DCT_COEFF }, INT_MIN, INT_MAX, V|D, "debug"},
{"green_metadata", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_GREEN_MD }, INT_MIN, INT_MAX, V|D, "debug"},
{"skip", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_SKIP }, INT_MIN, INT_MAX, V|D, "debug"},
@@ -212,43 +198,20 @@ static const AVOption avcodec_options[] = {
{"er", "error recognition", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_ER }, INT_MIN, INT_MAX, V|D, "debug"},
{"mmco", "memory management control operations (H.264)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_MMCO }, INT_MIN, INT_MAX, V|D, "debug"},
{"bugs", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BUGS }, INT_MIN, INT_MAX, V|D, "debug"},
-#if FF_API_DEBUG_MV
-{"vis_qp", "visualize quantization parameter (QP), lower QP are tinted greener", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_VIS_QP }, INT_MIN, INT_MAX, V|D, "debug"},
-{"vis_mb_type", "visualize block types", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_VIS_MB_TYPE }, INT_MIN, INT_MAX, V|D, "debug"},
-#endif
{"buffers", "picture buffer allocations", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_BUFFERS }, INT_MIN, INT_MAX, V|D, "debug"},
{"thread_ops", "threading operations", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_THREADS }, INT_MIN, INT_MAX, V|A|D, "debug"},
{"nomc", "skip motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = FF_DEBUG_NOMC }, INT_MIN, INT_MAX, V|A|D, "debug"},
{"dia_size", "diamond type & size for motion estimation", OFFSET(dia_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"last_pred", "amount of motion predictors from the previous frame", OFFSET(last_predictor_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#if FF_API_PRIVATE_OPT
-{"preme", "pre motion estimation", OFFSET(pre_me), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
{"pre_dia_size", "diamond type & size for motion estimation pre-pass", OFFSET(pre_dia_size), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"subq", "sub-pel motion estimation quality", OFFSET(me_subpel_quality), AV_OPT_TYPE_INT, {.i64 = 8 }, INT_MIN, INT_MAX, V|E},
{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"global_quality", NULL, OFFSET(global_quality), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
-#if FF_API_CODER_TYPE
-{"coder", NULL, OFFSET(coder_type), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "coder"},
-{"vlc", "variable length coder / Huffman coder", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CODER_TYPE_VLC }, INT_MIN, INT_MAX, V|E, "coder"},
-{"ac", "arithmetic coder", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CODER_TYPE_AC }, INT_MIN, INT_MAX, V|E, "coder"},
-{"raw", "raw (no encoding)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CODER_TYPE_RAW }, INT_MIN, INT_MAX, V|E, "coder"},
-{"rle", "run-length coder", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CODER_TYPE_RLE }, INT_MIN, INT_MAX, V|E, "coder"},
-#endif /* FF_API_CODER_TYPE */
-#if FF_API_PRIVATE_OPT
-{"context", "context model", OFFSET(context_model), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
{"slice_flags", NULL, OFFSET(slice_flags), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
{"mbd", "macroblock decision algorithm (high quality mode)", OFFSET(mb_decision), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, 2, V|E, "mbd"},
{"simple", "use mbcmp", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_SIMPLE }, INT_MIN, INT_MAX, V|E, "mbd"},
{"bits", "use fewest bits", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_BITS }, INT_MIN, INT_MAX, V|E, "mbd"},
{"rd", "use best rate distortion", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MB_DECISION_RD }, INT_MIN, INT_MAX, V|E, "mbd"},
-#if FF_API_PRIVATE_OPT
-{"sc_threshold", "scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
-#if FF_API_PRIVATE_OPT
-{"nr", "noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
{"rc_init_occupancy", "number of bits which should be loaded into the rc buffer before decoding starts", OFFSET(rc_initial_buffer_occupancy), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
{"threads", "set the number of threads", OFFSET(thread_count), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, INT_MAX, V|A|E|D, "threads"},
{"auto", "autodetect a suitable number of threads to use", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, INT_MIN, INT_MAX, V|E|D, "threads"},
@@ -256,38 +219,12 @@ static const AVOption avcodec_options[] = {
{"nssew", "nsse weight", OFFSET(nsse_weight), AV_OPT_TYPE_INT, {.i64 = 8 }, INT_MIN, INT_MAX, V|E},
{"skip_top", "number of macroblock rows at the top which are skipped", OFFSET(skip_top), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|D},
{"skip_bottom", "number of macroblock rows at the bottom which are skipped", OFFSET(skip_bottom), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|D},
-{"profile", NULL, OFFSET(profile), AV_OPT_TYPE_INT, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "profile"},
-{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "profile"},
-{"aac_main", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_MAIN }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_low", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_LOW }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_ssr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_SSR }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_ltp", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_LTP }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_he", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_HE }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_he_v2", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_HE_V2 }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_ld", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_LD }, INT_MIN, INT_MAX, A|E, "profile"},
-{"aac_eld", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_AAC_ELD }, INT_MIN, INT_MAX, A|E, "profile"},
-{"mpeg2_aac_low", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG2_AAC_LOW }, INT_MIN, INT_MAX, A|E, "profile"},
-{"mpeg2_aac_he", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG2_AAC_HE }, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_DTS }, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_es", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_DTS_ES }, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_96_24", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_DTS_96_24 }, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_hd_hra", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_DTS_HD_HRA }, INT_MIN, INT_MAX, A|E, "profile"},
-{"dts_hd_ma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_DTS_HD_MA }, INT_MIN, INT_MAX, A|E, "profile"},
-{"mpeg4_sp", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG4_SIMPLE }, INT_MIN, INT_MAX, V|E, "profile"},
-{"mpeg4_core", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG4_CORE }, INT_MIN, INT_MAX, V|E, "profile"},
-{"mpeg4_main", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG4_MAIN }, INT_MIN, INT_MAX, V|E, "profile"},
-{"mpeg4_asp", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_MPEG4_ADVANCED_SIMPLE }, INT_MIN, INT_MAX, V|E, "profile"},
-{"main10", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_HEVC_MAIN_10 }, INT_MIN, INT_MAX, V|E, "profile"},
-{"msbc", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_SBC_MSBC }, INT_MIN, INT_MAX, A|E, "profile"},
-{"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
-{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "level"},
+{"profile", NULL, OFFSET(profile), AV_OPT_TYPE_INT, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E|CC, "avctx.profile"},
+{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "avctx.profile"},
+{"main10", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_PROFILE_HEVC_MAIN_10 }, INT_MIN, INT_MAX, V|E, "avctx.profile"},
+{"level", NULL, OFFSET(level), AV_OPT_TYPE_INT, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E|CC, "avctx.level"},
+{"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_LEVEL_UNKNOWN }, INT_MIN, INT_MAX, V|A|E, "avctx.level"},
{"lowres", "decode at 1= 1/2, 2=1/4, 3=1/8 resolutions", OFFSET(lowres), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, V|A|D},
-#if FF_API_PRIVATE_OPT
-{"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-{"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-{"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-{"skipcmp", "frame skip compare function", OFFSET(frame_skip_cmp), AV_OPT_TYPE_INT, {.i64 = FF_CMP_DCTMAX }, INT_MIN, INT_MAX, V|E, "cmp_func"},
-#endif
{"cmp", "full-pel ME compare function", OFFSET(me_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"subcmp", "sub-pel ME compare function", OFFSET(me_sub_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"mbcmp", "macroblock compare function", OFFSET(mb_cmp), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E, "cmp_func"},
@@ -313,9 +250,6 @@ static const AVOption avcodec_options[] = {
{"msad", "sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"mblmin", "minimum macroblock Lagrange factor (VBR)", OFFSET(mb_lmin), AV_OPT_TYPE_INT, {.i64 = FF_QP2LAMBDA * 2 }, 1, FF_LAMBDA_MAX, V|E},
{"mblmax", "maximum macroblock Lagrange factor (VBR)", OFFSET(mb_lmax), AV_OPT_TYPE_INT, {.i64 = FF_QP2LAMBDA * 31 }, 1, FF_LAMBDA_MAX, V|E},
-#if FF_API_PRIVATE_OPT
-{"mepc", "motion estimation bitrate penalty compensation (1.0 = 256)", OFFSET(me_penalty_compensation), AV_OPT_TYPE_INT, {.i64 = 256 }, INT_MIN, INT_MAX, V|E},
-#endif
{"skip_loop_filter", "skip loop filtering process for the selected frames", OFFSET(skip_loop_filter), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"skip_idct" , "skip IDCT/dequantization for the selected frames", OFFSET(skip_idct), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"skip_frame" , "skip decoding for the selected frames", OFFSET(skip_frame), AV_OPT_TYPE_INT, {.i64 = AVDISCARD_DEFAULT }, INT_MIN, INT_MAX, V|D, "avdiscard"},
@@ -327,28 +261,17 @@ static const AVOption avcodec_options[] = {
{"nointra" , "discard all frames except I frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_NONINTRA}, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"all" , "discard all frames", 0, AV_OPT_TYPE_CONST, {.i64 = AVDISCARD_ALL }, INT_MIN, INT_MAX, V|D, "avdiscard"},
{"bidir_refine", "refine the two motion vectors used in bidirectional macroblocks", OFFSET(bidir_refine), AV_OPT_TYPE_INT, {.i64 = 1 }, 0, 4, V|E},
-#if FF_API_PRIVATE_OPT
-{"brd_scale", "downscale frames for dynamic B-frame decision", OFFSET(brd_scale), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, 10, V|E},
-#endif
{"keyint_min", "minimum interval between IDR-frames", OFFSET(keyint_min), AV_OPT_TYPE_INT, {.i64 = 25 }, INT_MIN, INT_MAX, V|E},
{"refs", "reference frames to consider for motion compensation", OFFSET(refs), AV_OPT_TYPE_INT, {.i64 = 1 }, INT_MIN, INT_MAX, V|E},
-#if FF_API_PRIVATE_OPT
-{"chromaoffset", "chroma QP offset from luma", OFFSET(chromaoffset), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|E},
-#endif
{"trellis", "rate-distortion optimal quantization", OFFSET(trellis), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX, V|A|E},
{"mv0_threshold", NULL, OFFSET(mv0_threshold), AV_OPT_TYPE_INT, {.i64 = 256 }, 0, INT_MAX, V|E},
-#if FF_API_PRIVATE_OPT
-{"b_sensitivity", "adjust sensitivity of b_frame_strategy 1", OFFSET(b_sensitivity), AV_OPT_TYPE_INT, {.i64 = 40 }, 1, INT_MAX, V|E},
-#endif
{"compression_level", NULL, OFFSET(compression_level), AV_OPT_TYPE_INT, {.i64 = FF_COMPRESSION_DEFAULT }, INT_MIN, INT_MAX, V|A|E},
-#if FF_API_PRIVATE_OPT
-{"min_prediction_order", NULL, OFFSET(min_prediction_order), AV_OPT_TYPE_INT, {.i64 = -1 }, INT_MIN, INT_MAX, A|E},
-{"max_prediction_order", NULL, OFFSET(max_prediction_order), AV_OPT_TYPE_INT, {.i64 = -1 }, INT_MIN, INT_MAX, A|E},
-{"timecode_frame_start", "GOP timecode frame start number, in non-drop-frame format", OFFSET(timecode_frame_start), AV_OPT_TYPE_INT64, {.i64 = -1 }, -1, INT64_MAX, V|E},
-#endif
{"bits_per_raw_sample", NULL, OFFSET(bits_per_raw_sample), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
-{"channel_layout", NULL, OFFSET(channel_layout), AV_OPT_TYPE_UINT64, {.i64 = DEFAULT }, 0, UINT64_MAX, A|E|D, "channel_layout"},
-{"request_channel_layout", NULL, OFFSET(request_channel_layout), AV_OPT_TYPE_UINT64, {.i64 = DEFAULT }, 0, UINT64_MAX, A|D, "request_channel_layout"},
+{"ch_layout", NULL, OFFSET(ch_layout), AV_OPT_TYPE_CHLAYOUT, {.str = NULL }, 0, 0, A|E|D, "ch_layout"},
+#if FF_API_OLD_CHANNEL_LAYOUT
+{"channel_layout", NULL, OFFSET(channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64 = DEFAULT }, 0, UINT64_MAX, A|E|D, "channel_layout"},
+{"request_channel_layout", NULL, OFFSET(request_channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64 = DEFAULT }, 0, UINT64_MAX, A|D, "request_channel_layout"},
+#endif
{"rc_max_vbv_use", NULL, OFFSET(rc_max_available_vbv_use), AV_OPT_TYPE_FLOAT, {.dbl = 0 }, 0.0, FLT_MAX, V|E},
{"rc_min_vbv_use", NULL, OFFSET(rc_min_vbv_overflow_use), AV_OPT_TYPE_FLOAT, {.dbl = 3 }, 0.0, FLT_MAX, V|E},
{"ticks_per_frame", NULL, OFFSET(ticks_per_frame), AV_OPT_TYPE_INT, {.i64 = 1 }, 1, INT_MAX, A|V|E|D},
@@ -366,6 +289,7 @@ static const AVOption avcodec_options[] = {
{"smpte431", "SMPTE 431-2", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE431 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
{"smpte432", "SMPTE 422-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_SMPTE432 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
{"jedec-p22", "JEDEC P22", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_JEDEC_P22 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
+{"ebu3213", "EBU 3213-E", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_EBU3213 }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_PRI_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_primaries_type"},
{"color_trc", "color transfer characteristics", OFFSET(color_trc), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_UNSPECIFIED }, 1, INT_MAX, V|E|D, "color_trc_type"},
{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
@@ -395,21 +319,24 @@ static const AVOption avcodec_options[] = {
{"bt2020_12bit", "BT.2020 - 12 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
{"smpte428_1", "SMPTE 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE428 }, INT_MIN, INT_MAX, V|E|D, "color_trc_type"},
{"colorspace", "color space", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64 = AVCOL_SPC_UNSPECIFIED }, 0, INT_MAX, V|E|D, "colorspace_type"},
-{"rgb", "RGB", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_RGB }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"fcc", "FCC", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_FCC }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt470bg", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT470BG }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE170M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE240M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"ycgco", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt2020nc", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt2020c", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"smpte2085", "SMPTE 2085", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE2085 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"ycocg", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt2020_ncl", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
-{"bt2020_cl", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"rgb", "RGB", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_RGB }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT709 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"fcc", "FCC", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_FCC }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt470bg", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT470BG }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE170M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE240M }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ycgco", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020nc", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020c", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"smpte2085", "SMPTE 2085", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_SMPTE2085 }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"chroma-derived-nc", "Chroma-derived NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_CHROMA_DERIVED_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"chroma-derived-c", "Chroma-derived CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_CHROMA_DERIVED_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ictcp", "ICtCp", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_ICTCP }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"unspecified", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"ycocg", "YCGCO", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_YCGCO }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020_ncl", "BT.2020 NCL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_NCL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
+{"bt2020_cl", "BT.2020 CL", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_SPC_BT2020_CL }, INT_MIN, INT_MAX, V|E|D, "colorspace_type"},
{"color_range", "color range", OFFSET(color_range), AV_OPT_TYPE_INT, {.i64 = AVCOL_RANGE_UNSPECIFIED }, 0, INT_MAX, V|E|D, "color_range_type"},
{"unknown", "Unspecified", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_UNSPECIFIED }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
{"tv", "MPEG (219*2^(n-8))", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_RANGE_MPEG }, INT_MIN, INT_MAX, V|E|D, "color_range_type"},
@@ -443,25 +370,12 @@ static const AVOption avcodec_options[] = {
{"ka", "Karaoke", 0, AV_OPT_TYPE_CONST, {.i64 = AV_AUDIO_SERVICE_TYPE_KARAOKE }, INT_MIN, INT_MAX, A|E, "audio_service_type"},
{"request_sample_fmt", "sample format audio decoders should prefer", OFFSET(request_sample_fmt), AV_OPT_TYPE_SAMPLE_FMT, {.i64=AV_SAMPLE_FMT_NONE}, -1, INT_MAX, A|D, "request_sample_fmt"},
{"pkt_timebase", NULL, OFFSET(pkt_timebase), AV_OPT_TYPE_RATIONAL, {.dbl = 0 }, 0, INT_MAX, 0},
-{"sub_charenc", "set input text subtitles character encoding", OFFSET(sub_charenc), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, S|D},
+{"sub_charenc", "set input text subtitles character encoding", OFFSET(sub_charenc), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, S|D},
{"sub_charenc_mode", "set input text subtitles character encoding mode", OFFSET(sub_charenc_mode), AV_OPT_TYPE_FLAGS, {.i64 = FF_SUB_CHARENC_MODE_AUTOMATIC}, -1, INT_MAX, S|D, "sub_charenc_mode"},
{"do_nothing", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_DO_NOTHING}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
{"auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_AUTOMATIC}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
{"pre_decoder", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_PRE_DECODER}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
{"ignore", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_CHARENC_MODE_IGNORE}, INT_MIN, INT_MAX, S|D, "sub_charenc_mode"},
-#if FF_API_ASS_TIMING
-{"sub_text_format", "set decoded text subtitle format", OFFSET(sub_text_format), AV_OPT_TYPE_INT, {.i64 = FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS}, 0, 1, S|D, "sub_text_format"},
-#else
-{"sub_text_format", "set decoded text subtitle format", OFFSET(sub_text_format), AV_OPT_TYPE_INT, {.i64 = FF_SUB_TEXT_FMT_ASS}, 0, 1, S|D, "sub_text_format"},
-#endif
-{"ass", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_TEXT_FMT_ASS}, INT_MIN, INT_MAX, S|D, "sub_text_format"},
-#if FF_API_ASS_TIMING
-{"ass_with_timings", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS}, INT_MIN, INT_MAX, S|D, "sub_text_format"},
-#endif
-{"refcounted_frames", NULL, OFFSET(refcounted_frames), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A|V|D },
-#if FF_API_SIDEDATA_ONLY_PKT
-{"side_data_only_packets", NULL, OFFSET(side_data_only_packets), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, A|V|E },
-#endif
{"apply_cropping", NULL, OFFSET(apply_cropping), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, V | D },
{"skip_alpha", "Skip processing alpha", OFFSET(skip_alpha), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, V|D },
{"field_order", "Field order", OFFSET(field_order), AV_OPT_TYPE_INT, {.i64 = AV_FIELD_UNKNOWN }, 0, 5, V|D|E, "field_order" },
@@ -470,15 +384,17 @@ static const AVOption avcodec_options[] = {
{"bb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BB }, 0, 0, V|D|E, "field_order" },
{"tb", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_TB }, 0, 0, V|D|E, "field_order" },
{"bt", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = AV_FIELD_BT }, 0, 0, V|D|E, "field_order" },
-{"dump_separator", "set information dump field separator", OFFSET(dump_separator), AV_OPT_TYPE_STRING, {.str = NULL}, CHAR_MIN, CHAR_MAX, A|V|S|D|E},
-{"codec_whitelist", "List of decoders that are allowed to be used", OFFSET(codec_whitelist), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, A|V|S|D },
+{"dump_separator", "set information dump field separator", OFFSET(dump_separator), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, A|V|S|D|E},
+{"codec_whitelist", "List of decoders that are allowed to be used", OFFSET(codec_whitelist), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, A|V|S|D },
{"pixel_format", "set pixel format", OFFSET(pix_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_NONE}, -1, INT_MAX, 0 },
{"video_size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str=NULL}, 0, INT_MAX, 0 },
{"max_pixels", "Maximum number of pixels", OFFSET(max_pixels), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|V|S|D|E },
+{"max_samples", "Maximum number of samples", OFFSET(max_samples), AV_OPT_TYPE_INT64, {.i64 = INT_MAX }, 0, INT_MAX, A|D|E },
{"hwaccel_flags", NULL, OFFSET(hwaccel_flags), AV_OPT_TYPE_FLAGS, {.i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, 0, UINT_MAX, V|D, "hwaccel_flags"},
{"ignore_level", "ignore level even if the codec level used is unknown or higher than the maximum supported level reported by the hardware driver", 0, AV_OPT_TYPE_CONST, { .i64 = AV_HWACCEL_FLAG_IGNORE_LEVEL }, INT_MIN, INT_MAX, V | D, "hwaccel_flags" },
{"allow_high_depth", "allow to output YUV pixel formats with a different chroma sampling than 4:2:0 and/or other than 8 bits per component", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{"allow_profile_mismatch", "attempt to decode anyway if HW accelerated decoder's supported profiles do not exactly match the stream", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
+{"unsafe_output", "allow potentially unsafe hwaccel frame output that might require special care to process successfully", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_UNSAFE_OUTPUT }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
{"extra_hw_frames", "Number of extra hardware frames to allocate for the user", OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, V|D },
{"discard_damaged_percentage", "Percentage of damaged samples to discard a frame", OFFSET(discard_damaged_percentage), AV_OPT_TYPE_INT, {.i64 = 95 }, 0, 100, V|D },
{NULL},
@@ -489,6 +405,7 @@ static const AVOption avcodec_options[] = {
#undef S
#undef E
#undef D
+#undef CC
#undef DEFAULT
#undef OFFSET
diff --git a/media/ffvpx/libavcodec/packet.h b/media/ffvpx/libavcodec/packet.h
new file mode 100644
index 0000000000..f28e7e7011
--- /dev/null
+++ b/media/ffvpx/libavcodec/packet.h
@@ -0,0 +1,731 @@
+/*
+ * AVPacket public API
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_H
+#define AVCODEC_PACKET_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/buffer.h"
+#include "libavutil/dict.h"
+#include "libavutil/rational.h"
+#include "libavutil/version.h"
+
+#include "libavcodec/version_major.h"
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ /**
+ * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE
+ * bytes worth of palette. This side data signals that a new palette is
+ * present.
+ */
+ AV_PKT_DATA_PALETTE,
+
+ /**
+ * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format
+ * that the extradata buffer was changed and the receiving side should
+ * act upon it appropriately. The new extradata is embedded in the side
+ * data buffer and should be immediately used for processing the current
+ * frame or packet.
+ */
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ */
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ /**
+ * This side data contains quality related information from the encoder.
+ * @code
+ * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad).
+ * u8 picture type
+ * u8 error count
+ * u16 reserved
+ * u64le[error count] sum of squared differences between encoder in and output
+ * @endcode
+ */
+ AV_PKT_DATA_QUALITY_STATS,
+
+ /**
+ * This side data contains an integer value representing the stream index
+ * of a "fallback" track. A fallback track indicates an alternate
+ * track to use when the current track can not be decoded for some reason.
+ * e.g. no decoder available for codec.
+ */
+ AV_PKT_DATA_FALLBACK_TRACK,
+
+ /**
+ * This side data corresponds to the AVCPBProperties struct.
+ */
+ AV_PKT_DATA_CPB_PROPERTIES,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=main/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+
+ /**
+ * MPEGTS stream ID as uint8_t, this is required to pass the stream ID
+ * information from the demuxer to the corresponding muxer.
+ */
+ AV_PKT_DATA_MPEGTS_STREAM_ID,
+
+ /**
+ * Mastering display metadata (based on SMPTE-2086:2014). This metadata
+ * should be associated with a video stream and contains data in the form
+ * of the AVMasteringDisplayMetadata struct.
+ */
+ AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
+
+ /**
+ * This side data should be associated with a video stream and corresponds
+ * to the AVSphericalMapping structure.
+ */
+ AV_PKT_DATA_SPHERICAL,
+
+ /**
+ * Content light level (based on CTA-861.3). This metadata should be
+ * associated with a video stream and contains data in the form of the
+ * AVContentLightMetadata struct.
+ */
+ AV_PKT_DATA_CONTENT_LIGHT_LEVEL,
+
+ /**
+ * ATSC A53 Part 4 Closed Captions. This metadata should be associated with
+ * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data.
+ * The number of bytes of CC data is AVPacketSideData.size.
+ */
+ AV_PKT_DATA_A53_CC,
+
+ /**
+ * This side data is encryption initialization data.
+ * The format is not part of ABI, use av_encryption_init_info_* methods to
+ * access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INIT_INFO,
+
+ /**
+ * This side data contains encryption info for how to decrypt the packet.
+ * The format is not part of ABI, use av_encryption_info_* methods to access.
+ */
+ AV_PKT_DATA_ENCRYPTION_INFO,
+
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_PKT_DATA_AFD,
+
+ /**
+ * Producer Reference Time data corresponding to the AVProducerReferenceTime struct,
+ * usually exported by some encoders (on demand through the prft flag set in the
+ * AVCodecContext export_side_data field).
+ */
+ AV_PKT_DATA_PRFT,
+
+ /**
+ * ICC profile data consisting of an opaque octet buffer following the
+ * format described by ISO 15076-1.
+ */
+ AV_PKT_DATA_ICC_PROFILE,
+
+ /**
+ * DOVI configuration
+ * ref:
+ * dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2.1.2, section 2.2
+ * dolby-vision-bitstreams-in-mpeg-2-transport-stream-multiplex-v1.2, section 3.3
+ * Tags are stored in struct AVDOVIDecoderConfigurationRecord.
+ */
+ AV_PKT_DATA_DOVI_CONF,
+
+ /**
+ * Timecode which conforms to SMPTE ST 12-1:2014. The data is an array of 4 uint32_t
+ * where the first uint32_t describes how many (1-3) of the other timecodes are used.
+ * The timecode format is described in the documentation of av_timecode_get_smpte_from_framenum()
+ * function in libavutil/timecode.h.
+ */
+ AV_PKT_DATA_S12M_TIMECODE,
+
+ /**
+ * HDR10+ dynamic metadata associated with a video frame. The metadata is in
+ * the form of the AVDynamicHDRPlus struct and contains
+ * information for color volume transform - application 4 of
+ * SMPTE 2094-40:2016 standard.
+ */
+ AV_PKT_DATA_DYNAMIC_HDR10_PLUS,
+
+ /**
+ * The number of side data types.
+ * This is not part of the public API/ABI in the sense that it may
+ * change when new side data types are added.
+ * This must stay the last enum value.
+ * If its value becomes huge, some code using it
+ * needs to be updated as it assumes it to be smaller than other limits.
+ */
+ AV_PKT_DATA_NB
+};
+
+#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED
+
+typedef struct AVPacketSideData {
+ uint8_t *data;
+ size_t size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames. Encoders are allowed to output empty
+ * packets, with no compressed data, containing only side data
+ * (e.g. to update some stream parameters at the end of encoding).
+ *
+ * The semantics of data ownership depends on the buf field.
+ * If it is set, the packet data is dynamically allocated and is
+ * valid indefinitely until a call to av_packet_unref() reduces the
+ * reference count to 0.
+ *
+ * If the buf field is not set av_packet_ref() would make a copy instead
+ * of increasing the reference count.
+ *
+ * The side data is always allocated with av_malloc(), copied by
+ * av_packet_ref() and freed by av_packet_unref().
+ *
+ * sizeof(AVPacket) being a part of the public ABI is deprecated. once
+ * av_init_packet() is removed, new packets will only be able to be allocated
+ * with av_packet_alloc(), and new fields may be added to the end of the struct
+ * with a minor bump.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ * @see av_packet_unref
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef *buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int64_t duration;
+
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+ /**
+ * AVBufferRef for free use by the API user. FFmpeg will never check the
+ * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when
+ * the packet is unreferenced. av_packet_copy_props() calls create a new
+ * reference with av_buffer_ref() for the target packet's opaque_ref field.
+ *
+ * This is unrelated to the opaque field, although it serves a similar
+ * purpose.
+ */
+ AVBufferRef *opaque_ref;
+
+ /**
+ * Time base of the packet's timestamps.
+ * In the future, this field may be set on packets output by encoders or
+ * demuxers, but its value will be by default ignored on input to decoders
+ * or muxers.
+ */
+ AVRational time_base;
+} AVPacket;
+
+#if FF_API_INIT_PACKET
+attribute_deprecated
+typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList *next;
+} AVPacketList;
+#endif
+
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+/**
+ * Flag is used to discard packets which are required to maintain valid
+ * decoder state but are not required for output and should be dropped
+ * after decoding.
+ **/
+#define AV_PKT_FLAG_DISCARD 0x0004
+/**
+ * The packet comes from a trusted source.
+ *
+ * Otherwise-unsafe constructs such as arbitrary pointers to data
+ * outside the packet may be followed.
+ */
+#define AV_PKT_FLAG_TRUSTED 0x0008
+/**
+ * Flag is used to indicate packets that contain frames that can
+ * be discarded by the decoder. I.e. Non-reference frames.
+ */
+#define AV_PKT_FLAG_DISPOSABLE 0x0010
+
+enum AVSideDataParamChangeFlags {
+#if FF_API_OLD_CHANNEL_LAYOUT
+ /**
+ * @deprecated those are not used by any decoder
+ */
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+#endif
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+
+/**
+ * Allocate an AVPacket and set its fields to default values. The resulting
+ * struct must be freed using av_packet_free().
+ *
+ * @return An AVPacket filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVPacket itself, not the data buffers. Those
+ * must be allocated through other means such as av_new_packet.
+ *
+ * @see av_new_packet
+ */
+AVPacket *av_packet_alloc(void);
+
+/**
+ * Create a new packet that references the same data as src.
+ *
+ * This is a shortcut for av_packet_alloc()+av_packet_ref().
+ *
+ * @return newly created AVPacket on success, NULL on error.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ */
+AVPacket *av_packet_clone(const AVPacket *src);
+
+/**
+ * Free the packet, if the packet is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param pkt packet to be freed. The pointer will be set to NULL.
+ * @note passing NULL is a no-op.
+ */
+void av_packet_free(AVPacket **pkt);
+
+#if FF_API_INIT_PACKET
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ *
+ * @see av_packet_alloc
+ * @see av_packet_unref
+ *
+ * @deprecated This function is deprecated. Once it's removed,
+ sizeof(AVPacket) will not be a part of the ABI anymore.
+ */
+attribute_deprecated
+void av_init_packet(AVPacket *pkt);
+#endif
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * and buf fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying AVBuffer.
+ * The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Wrap an existing array as a packet side data.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param data the side data array. It must be allocated with the av_malloc()
+ * family of functions. The ownership of the data is transferred to
+ * pkt.
+ * @param size side information size
+ * @return a non-negative number on success, a negative AVERROR code on
+ * failure. On failure, the packet is unchanged and the data remains
+ * owned by the caller.
+ */
+int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ uint8_t *data, size_t size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size If supplied, *size will be set to the size of the side data
+ * or to zero if the desired side data is not present.
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type,
+ size_t *size);
+
+const char *av_packet_side_data_name(enum AVPacketSideDataType type);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, size_t *size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t *data, size_t size,
+ AVDictionary **dict);
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet. Will be completely overwritten.
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error. On error, dst
+ * will be blank (as if returned by av_packet_alloc()).
+ */
+int av_packet_ref(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Ensure the data described by a given packet is reference counted.
+ *
+ * @note This function does not ensure that the reference will be writable.
+ * Use av_packet_make_writable instead for that purpose.
+ *
+ * @see av_packet_ref
+ * @see av_packet_make_writable
+ *
+ * @param pkt packet whose data should be made reference counted.
+ *
+ * @return 0 on success, a negative AVERROR on error. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_refcounted(AVPacket *pkt);
+
+/**
+ * Create a writable reference for the data described by a given packet,
+ * avoiding data copy if possible.
+ *
+ * @param pkt Packet whose data should be made writable.
+ *
+ * @return 0 on success, a negative AVERROR on failure. On failure, the
+ * packet is unchanged.
+ */
+int av_packet_make_writable(AVPacket *pkt);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+#endif // AVCODEC_PACKET_H
diff --git a/media/ffvpx/libavcodec/packet_internal.h b/media/ffvpx/libavcodec/packet_internal.h
new file mode 100644
index 0000000000..92a0d4e6d5
--- /dev/null
+++ b/media/ffvpx/libavcodec/packet_internal.h
@@ -0,0 +1,73 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PACKET_INTERNAL_H
+#define AVCODEC_PACKET_INTERNAL_H
+
+#include <stdint.h>
+
+#include "packet.h"
+
+typedef struct PacketListEntry {
+ struct PacketListEntry *next;
+ AVPacket pkt;
+} PacketListEntry;
+
+typedef struct PacketList {
+ PacketListEntry *head, *tail;
+} PacketList;
+
+/**
+ * Append an AVPacket to the list.
+ *
+ * @param list A PacketList
+ * @param pkt The packet being appended. The data described in it will
+ * be made reference counted if it isn't already.
+ * @param copy A callback to copy the contents of the packet to the list.
+ May be null, in which case the packet's reference will be
+ moved to the list.
+ * @return 0 on success, negative AVERROR value on failure. On failure,
+ the packet and the list are unchanged.
+ */
+int avpriv_packet_list_put(PacketList *list, AVPacket *pkt,
+ int (*copy)(AVPacket *dst, const AVPacket *src),
+ int flags);
+
+/**
+ * Remove the oldest AVPacket in the list and return it.
+ *
+ * @note The pkt will be overwritten completely on success. The caller
+ * owns the packet and must unref it by itself.
+ *
+ * @param head A pointer to a PacketList struct
+ * @param pkt Pointer to an AVPacket struct
+ * @return 0 on success, and a packet is returned. AVERROR(EAGAIN) if
+ * the list was empty.
+ */
+int avpriv_packet_list_get(PacketList *list, AVPacket *pkt);
+
+/**
+ * Wipe the list and unref all the packets in it.
+ */
+void avpriv_packet_list_free(PacketList *list);
+
+int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
+
+int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp);
+
+#endif // AVCODEC_PACKET_INTERNAL_H
diff --git a/media/ffvpx/libavcodec/parser.c b/media/ffvpx/libavcodec/parser.c
index 3e19810a94..49de7e6a57 100644
--- a/media/ffvpx/libavcodec/parser.c
+++ b/media/ffvpx/libavcodec/parser.c
@@ -25,10 +25,8 @@
#include <string.h>
#include "libavutil/avassert.h"
-#include "libavutil/internal.h"
#include "libavutil/mem.h"
-#include "internal.h"
#include "parser.h"
AVCodecParserContext *av_parser_init(int codec_id)
@@ -46,7 +44,9 @@ AVCodecParserContext *av_parser_init(int codec_id)
parser->codec_ids[1] == codec_id ||
parser->codec_ids[2] == codec_id ||
parser->codec_ids[3] == codec_id ||
- parser->codec_ids[4] == codec_id)
+ parser->codec_ids[4] == codec_id ||
+ parser->codec_ids[5] == codec_id ||
+ parser->codec_ids[6] == codec_id)
goto found;
}
return NULL;
@@ -55,7 +55,7 @@ found:
s = av_mallocz(sizeof(AVCodecParserContext));
if (!s)
goto err_out;
- s->parser = (AVCodecParser*)parser;
+ s->parser = parser;
s->priv_data = av_mallocz(parser->priv_data_size);
if (!s->priv_data)
goto err_out;
@@ -67,11 +67,6 @@ found:
goto err_out;
}
s->key_frame = -1;
-#if FF_API_CONVERGENCE_DURATION
-FF_DISABLE_DEPRECATION_WARNINGS
- s->convergence_duration = 0;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
s->dts_sync_point = INT_MIN;
s->dts_ref_dts_delta = INT_MIN;
s->pts_dts_delta = INT_MIN;
@@ -132,7 +127,9 @@ int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
avctx->codec_id == s->parser->codec_ids[1] ||
avctx->codec_id == s->parser->codec_ids[2] ||
avctx->codec_id == s->parser->codec_ids[3] ||
- avctx->codec_id == s->parser->codec_ids[4]);
+ avctx->codec_id == s->parser->codec_ids[4] ||
+ avctx->codec_id == s->parser->codec_ids[5] ||
+ avctx->codec_id == s->parser->codec_ids[6]);
if (!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) {
s->next_frame_offset =
@@ -179,6 +176,9 @@ int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
/* offset of the next frame */
s->next_frame_offset = s->cur_offset + index;
s->fetch_timestamp = 1;
+ } else {
+ /* Don't return a pointer to dummy_buf. */
+ *poutbuf = NULL;
}
if (index < 0)
index = 0;
@@ -186,41 +186,6 @@ int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx,
return index;
}
-int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx,
- uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size, int keyframe)
-{
- if (s && s->parser->split) {
- if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER ||
- avctx->flags2 & AV_CODEC_FLAG2_LOCAL_HEADER) {
- int i = s->parser->split(avctx, buf, buf_size);
- buf += i;
- buf_size -= i;
- }
- }
-
- /* cast to avoid warning about discarding qualifiers */
- *poutbuf = (uint8_t *) buf;
- *poutbuf_size = buf_size;
- if (avctx->extradata) {
- if (keyframe && (avctx->flags2 & AV_CODEC_FLAG2_LOCAL_HEADER)) {
- int size = buf_size + avctx->extradata_size;
-
- *poutbuf_size = size;
- *poutbuf = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (!*poutbuf)
- return AVERROR(ENOMEM);
-
- memcpy(*poutbuf, avctx->extradata, avctx->extradata_size);
- memcpy(*poutbuf + avctx->extradata_size, buf,
- buf_size + AV_INPUT_BUFFER_PADDING_SIZE);
- return 1;
- }
- }
-
- return 0;
-}
-
void av_parser_close(AVCodecParserContext *s)
{
if (s) {
@@ -295,6 +260,10 @@ int ff_combine_frame(ParseContext *pc, int next,
*buf = pc->buffer;
}
+ if (next < -8) {
+ pc->overread += -8 - next;
+ next = -8;
+ }
/* store overread bytes */
for (; next < 0; next++) {
pc->state = pc->state << 8 | pc->buffer[pc->last_index + next];
@@ -318,17 +287,3 @@ void ff_parse_close(AVCodecParserContext *s)
av_freep(&pc->buffer);
}
-
-int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
-{
- uint32_t state = -1;
- const uint8_t *ptr = buf, *end = buf + buf_size;
-
- while (ptr < end) {
- ptr = avpriv_find_start_code(ptr, end, &state);
- if (state == 0x1B3 || state == 0x1B6)
- return ptr - 4 - buf;
- }
-
- return 0;
-}
diff --git a/media/ffvpx/libavcodec/parser.h b/media/ffvpx/libavcodec/parser.h
index ef35547e9b..2cee5ae4ff 100644
--- a/media/ffvpx/libavcodec/parser.h
+++ b/media/ffvpx/libavcodec/parser.h
@@ -45,8 +45,6 @@ typedef struct ParseContext{
* AVERROR(ENOMEM) if there was a memory allocation error
*/
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size);
-int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf,
- int buf_size);
void ff_parse_close(AVCodecParserContext *s);
/**
diff --git a/media/ffvpx/libavcodec/parser_list.c b/media/ffvpx/libavcodec/parser_list.c
index b60c60bce9..9006d3f33b 100644
--- a/media/ffvpx/libavcodec/parser_list.c
+++ b/media/ffvpx/libavcodec/parser_list.c
@@ -1,3 +1,5 @@
+#include "config_common.h"
+
static const AVCodecParser * const parser_list[] = {
#if CONFIG_VP8_PARSER
&ff_vp8_parser,
diff --git a/media/ffvpx/libavcodec/parsers.c b/media/ffvpx/libavcodec/parsers.c
index 33a71de8a0..d355808018 100644
--- a/media/ffvpx/libavcodec/parsers.c
+++ b/media/ffvpx/libavcodec/parsers.c
@@ -18,81 +18,69 @@
#include <stdint.h>
-#include "libavutil/thread.h"
-
#include "avcodec.h"
-extern AVCodecParser ff_aac_parser;
-extern AVCodecParser ff_aac_latm_parser;
-extern AVCodecParser ff_ac3_parser;
-extern AVCodecParser ff_adx_parser;
-extern AVCodecParser ff_av1_parser;
-extern AVCodecParser ff_avs2_parser;
-extern AVCodecParser ff_bmp_parser;
-extern AVCodecParser ff_cavsvideo_parser;
-extern AVCodecParser ff_cook_parser;
-extern AVCodecParser ff_dca_parser;
-extern AVCodecParser ff_dirac_parser;
-extern AVCodecParser ff_dnxhd_parser;
-extern AVCodecParser ff_dpx_parser;
-extern AVCodecParser ff_dvaudio_parser;
-extern AVCodecParser ff_dvbsub_parser;
-extern AVCodecParser ff_dvdsub_parser;
-extern AVCodecParser ff_dvd_nav_parser;
-extern AVCodecParser ff_flac_parser;
-extern AVCodecParser ff_g723_1_parser;
-extern AVCodecParser ff_g729_parser;
-extern AVCodecParser ff_gif_parser;
-extern AVCodecParser ff_gsm_parser;
-extern AVCodecParser ff_h261_parser;
-extern AVCodecParser ff_h263_parser;
-extern AVCodecParser ff_h264_parser;
-extern AVCodecParser ff_hevc_parser;
-extern AVCodecParser ff_mjpeg_parser;
-extern AVCodecParser ff_mlp_parser;
-extern AVCodecParser ff_mpeg4video_parser;
-extern AVCodecParser ff_mpegaudio_parser;
-extern AVCodecParser ff_mpegvideo_parser;
-extern AVCodecParser ff_opus_parser;
-extern AVCodecParser ff_png_parser;
-extern AVCodecParser ff_pnm_parser;
-extern AVCodecParser ff_rv30_parser;
-extern AVCodecParser ff_rv40_parser;
-extern AVCodecParser ff_sbc_parser;
-extern AVCodecParser ff_sipr_parser;
-extern AVCodecParser ff_tak_parser;
-extern AVCodecParser ff_vc1_parser;
-extern AVCodecParser ff_vorbis_parser;
-extern AVCodecParser ff_vp3_parser;
-extern AVCodecParser ff_vp8_parser;
-extern AVCodecParser ff_vp9_parser;
-extern AVCodecParser ff_xma_parser;
+extern const AVCodecParser ff_aac_parser;
+extern const AVCodecParser ff_aac_latm_parser;
+extern const AVCodecParser ff_ac3_parser;
+extern const AVCodecParser ff_adx_parser;
+extern const AVCodecParser ff_amr_parser;
+extern const AVCodecParser ff_av1_parser;
+extern const AVCodecParser ff_avs2_parser;
+extern const AVCodecParser ff_avs3_parser;
+extern const AVCodecParser ff_bmp_parser;
+extern const AVCodecParser ff_cavsvideo_parser;
+extern const AVCodecParser ff_cook_parser;
+extern const AVCodecParser ff_cri_parser;
+extern const AVCodecParser ff_dca_parser;
+extern const AVCodecParser ff_dirac_parser;
+extern const AVCodecParser ff_dnxhd_parser;
+extern const AVCodecParser ff_dolby_e_parser;
+extern const AVCodecParser ff_dpx_parser;
+extern const AVCodecParser ff_dvaudio_parser;
+extern const AVCodecParser ff_dvbsub_parser;
+extern const AVCodecParser ff_dvdsub_parser;
+extern const AVCodecParser ff_dvd_nav_parser;
+extern const AVCodecParser ff_flac_parser;
+extern const AVCodecParser ff_ftr_parser;
+extern const AVCodecParser ff_g723_1_parser;
+extern const AVCodecParser ff_g729_parser;
+extern const AVCodecParser ff_gif_parser;
+extern const AVCodecParser ff_gsm_parser;
+extern const AVCodecParser ff_h261_parser;
+extern const AVCodecParser ff_h263_parser;
+extern const AVCodecParser ff_h264_parser;
+extern const AVCodecParser ff_hevc_parser;
+extern const AVCodecParser ff_hdr_parser;
+extern const AVCodecParser ff_ipu_parser;
+extern const AVCodecParser ff_jpeg2000_parser;
+extern const AVCodecParser ff_misc4_parser;
+extern const AVCodecParser ff_mjpeg_parser;
+extern const AVCodecParser ff_mlp_parser;
+extern const AVCodecParser ff_mpeg4video_parser;
+extern const AVCodecParser ff_mpegaudio_parser;
+extern const AVCodecParser ff_mpegvideo_parser;
+extern const AVCodecParser ff_opus_parser;
+extern const AVCodecParser ff_png_parser;
+extern const AVCodecParser ff_pnm_parser;
+extern const AVCodecParser ff_qoi_parser;
+extern const AVCodecParser ff_rv30_parser;
+extern const AVCodecParser ff_rv40_parser;
+extern const AVCodecParser ff_sbc_parser;
+extern const AVCodecParser ff_sipr_parser;
+extern const AVCodecParser ff_tak_parser;
+extern const AVCodecParser ff_vc1_parser;
+extern const AVCodecParser ff_vorbis_parser;
+extern const AVCodecParser ff_vp3_parser;
+extern const AVCodecParser ff_vp8_parser;
+extern const AVCodecParser ff_vp9_parser;
+extern const AVCodecParser ff_webp_parser;
+extern const AVCodecParser ff_xbm_parser;
+extern const AVCodecParser ff_xma_parser;
+extern const AVCodecParser ff_xwd_parser;
#include "libavcodec/parser_list.c"
-static AVOnce av_parser_next_init = AV_ONCE_INIT;
-
-static void av_parser_init_next(void)
-{
- AVCodecParser *prev = NULL, *p;
- int i = 0;
- while ((p = (AVCodecParser*)parser_list[i++])) {
- if (prev)
- prev->next = p;
- prev = p;
- }
-}
-
-AVCodecParser *av_parser_next(const AVCodecParser *p)
-{
- ff_thread_once(&av_parser_next_init, av_parser_init_next);
-
- if (p)
- return p->next;
- else
- return (AVCodecParser*)parser_list[0];
-}
-
const AVCodecParser *av_parser_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
@@ -103,8 +91,3 @@ const AVCodecParser *av_parser_iterate(void **opaque)
return p;
}
-
-void av_register_codec_parser(AVCodecParser *parser)
-{
- ff_thread_once(&av_parser_next_init, av_parser_init_next);
-}
diff --git a/media/ffvpx/libavcodec/pixblockdsp.h b/media/ffvpx/libavcodec/pixblockdsp.h
index e036700ff0..9b002aa3d6 100644
--- a/media/ffvpx/libavcodec/pixblockdsp.h
+++ b/media/ffvpx/libavcodec/pixblockdsp.h
@@ -29,6 +29,9 @@ typedef struct PixblockDSPContext {
void (*get_pixels)(int16_t *av_restrict block /* align 16 */,
const uint8_t *pixels /* align 8 */,
ptrdiff_t stride);
+ void (*get_pixels_unaligned)(int16_t *av_restrict block /* align 16 */,
+ const uint8_t *pixels,
+ ptrdiff_t stride);
void (*diff_pixels)(int16_t *av_restrict block /* align 16 */,
const uint8_t *s1 /* align 8 */,
const uint8_t *s2 /* align 8 */,
@@ -41,12 +44,16 @@ typedef struct PixblockDSPContext {
} PixblockDSPContext;
void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx);
+void ff_pixblockdsp_init_aarch64(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
void ff_pixblockdsp_init_alpha(PixblockDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_pixblockdsp_init_arm(PixblockDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_pixblockdsp_init_ppc(PixblockDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
+void ff_pixblockdsp_init_riscv(PixblockDSPContext *c, AVCodecContext *avctx,
+ unsigned high_bit_depth);
void ff_pixblockdsp_init_x86(PixblockDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth);
void ff_pixblockdsp_init_mips(PixblockDSPContext *c, AVCodecContext *avctx,
diff --git a/media/ffvpx/libavcodec/profiles.c b/media/ffvpx/libavcodec/profiles.c
index eaf0d68d32..7af7fbeb13 100644
--- a/media/ffvpx/libavcodec/profiles.c
+++ b/media/ffvpx/libavcodec/profiles.c
@@ -74,6 +74,12 @@ const AVProfile ff_h264_profiles[] = {
{ FF_PROFILE_UNKNOWN },
};
+const AVProfile ff_vvc_profiles[] = {
+ { FF_PROFILE_VVC_MAIN_10, "Main 10" },
+ { FF_PROFILE_VVC_MAIN_10_444, "Main 10 4:4:4" },
+ { FF_PROFILE_UNKNOWN },
+};
+
const AVProfile ff_hevc_profiles[] = {
{ FF_PROFILE_HEVC_MAIN, "Main" },
{ FF_PROFILE_HEVC_MAIN_10, "Main 10" },
@@ -99,7 +105,6 @@ const AVProfile ff_mpeg2_video_profiles[] = {
{ FF_PROFILE_MPEG2_MAIN, "Main" },
{ FF_PROFILE_MPEG2_SIMPLE, "Simple" },
{ FF_PROFILE_RESERVED, "Reserved" },
- { FF_PROFILE_RESERVED, "Reserved" },
{ FF_PROFILE_UNKNOWN },
};
diff --git a/media/ffvpx/libavcodec/profiles.h b/media/ffvpx/libavcodec/profiles.h
index a53b67e7f2..41a19aa9ad 100644
--- a/media/ffvpx/libavcodec/profiles.h
+++ b/media/ffvpx/libavcodec/profiles.h
@@ -20,12 +20,48 @@
#define AVCODEC_PROFILES_H
#include "avcodec.h"
+#include "libavutil/opt.h"
+
+#define FF_AVCTX_PROFILE_OPTION(name, description, type, value) \
+ {name, description, 0, AV_OPT_TYPE_CONST, {.i64 = value }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_## type ##_PARAM, "avctx.profile"},
+
+#define FF_AAC_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("aac_main", NULL, AUDIO, FF_PROFILE_AAC_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("aac_low", NULL, AUDIO, FF_PROFILE_AAC_LOW)\
+ FF_AVCTX_PROFILE_OPTION("aac_ssr", NULL, AUDIO, FF_PROFILE_AAC_SSR)\
+ FF_AVCTX_PROFILE_OPTION("aac_ltp", NULL, AUDIO, FF_PROFILE_AAC_LTP)\
+ FF_AVCTX_PROFILE_OPTION("aac_he", NULL, AUDIO, FF_PROFILE_AAC_HE)\
+ FF_AVCTX_PROFILE_OPTION("aac_he_v2", NULL, AUDIO, FF_PROFILE_AAC_HE_V2)\
+ FF_AVCTX_PROFILE_OPTION("aac_ld", NULL, AUDIO, FF_PROFILE_AAC_LD)\
+ FF_AVCTX_PROFILE_OPTION("aac_eld", NULL, AUDIO, FF_PROFILE_AAC_ELD)\
+ FF_AVCTX_PROFILE_OPTION("mpeg2_aac_low", NULL, AUDIO, FF_PROFILE_MPEG2_AAC_LOW)\
+ FF_AVCTX_PROFILE_OPTION("mpeg2_aac_he", NULL, AUDIO, FF_PROFILE_MPEG2_AAC_HE)\
+
+#define FF_MPEG4_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("mpeg4_sp", NULL, VIDEO, FF_PROFILE_MPEG4_SIMPLE)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_core", NULL, VIDEO, FF_PROFILE_MPEG4_CORE)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_main", NULL, VIDEO, FF_PROFILE_MPEG4_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("mpeg4_asp", NULL, VIDEO, FF_PROFILE_MPEG4_ADVANCED_SIMPLE)\
+
+#define FF_MPEG2_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("422", NULL, VIDEO, FF_PROFILE_MPEG2_422)\
+ FF_AVCTX_PROFILE_OPTION("high", NULL, VIDEO, FF_PROFILE_MPEG2_HIGH)\
+ FF_AVCTX_PROFILE_OPTION("ss", NULL, VIDEO, FF_PROFILE_MPEG2_SS)\
+ FF_AVCTX_PROFILE_OPTION("snr", NULL, VIDEO, FF_PROFILE_MPEG2_SNR_SCALABLE)\
+ FF_AVCTX_PROFILE_OPTION("main", NULL, VIDEO, FF_PROFILE_MPEG2_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("simple", NULL, VIDEO, FF_PROFILE_MPEG2_SIMPLE)\
+
+#define FF_AV1_PROFILE_OPTS \
+ FF_AVCTX_PROFILE_OPTION("main", NULL, VIDEO, FF_PROFILE_AV1_MAIN)\
+ FF_AVCTX_PROFILE_OPTION("high", NULL, VIDEO, FF_PROFILE_AV1_HIGH)\
+ FF_AVCTX_PROFILE_OPTION("professional", NULL, VIDEO, FF_PROFILE_AV1_PROFESSIONAL)\
extern const AVProfile ff_aac_profiles[];
extern const AVProfile ff_dca_profiles[];
extern const AVProfile ff_dnxhd_profiles[];
extern const AVProfile ff_h264_profiles[];
extern const AVProfile ff_hevc_profiles[];
+extern const AVProfile ff_vvc_profiles[];
extern const AVProfile ff_jpeg2000_profiles[];
extern const AVProfile ff_mpeg2_video_profiles[];
extern const AVProfile ff_mpeg4_video_profiles[];
diff --git a/media/ffvpx/libavcodec/pthread.c b/media/ffvpx/libavcodec/pthread.c
index 572471586d..ca84b81391 100644
--- a/media/ffvpx/libavcodec/pthread.c
+++ b/media/ffvpx/libavcodec/pthread.c
@@ -29,8 +29,10 @@
* @see doc/multithreading.txt
*/
+#include "libavutil/thread.h"
+
#include "avcodec.h"
-#include "internal.h"
+#include "codec_internal.h"
#include "pthread_internal.h"
#include "thread.h"
@@ -46,7 +48,6 @@
static void validate_thread_parameters(AVCodecContext *avctx)
{
int frame_threading_supported = (avctx->codec->capabilities & AV_CODEC_CAP_FRAME_THREADS)
- && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED)
&& !(avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
&& !(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS);
if (avctx->thread_count == 1) {
@@ -56,7 +57,7 @@ static void validate_thread_parameters(AVCodecContext *avctx)
} else if (avctx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS &&
avctx->thread_type & FF_THREAD_SLICE) {
avctx->active_thread_type = FF_THREAD_SLICE;
- } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) {
+ } else if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_AUTO_THREADS)) {
avctx->thread_count = 1;
avctx->active_thread_type = 0;
}
@@ -86,3 +87,39 @@ void ff_thread_free(AVCodecContext *avctx)
else
ff_slice_thread_free(avctx);
}
+
+av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
+{
+ unsigned cnt = *(unsigned*)((char*)obj + offsets[0]);
+ const unsigned *cur_offset = offsets;
+
+ *(unsigned*)((char*)obj + offsets[0]) = 0;
+
+ for (; *(++cur_offset) != THREAD_SENTINEL && cnt; cnt--)
+ pthread_mutex_destroy((pthread_mutex_t*)((char*)obj + *cur_offset));
+ for (; *(++cur_offset) != THREAD_SENTINEL && cnt; cnt--)
+ pthread_cond_destroy ((pthread_cond_t *)((char*)obj + *cur_offset));
+}
+
+av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
+{
+ const unsigned *cur_offset = offsets;
+ unsigned cnt = 0;
+ int err;
+
+#define PTHREAD_INIT_LOOP(type) \
+ for (; *(++cur_offset) != THREAD_SENTINEL; cnt++) { \
+ pthread_ ## type ## _t *dst = (void*)((char*)obj + *cur_offset); \
+ err = pthread_ ## type ## _init(dst, NULL); \
+ if (err) { \
+ err = AVERROR(err); \
+ goto fail; \
+ } \
+ }
+ PTHREAD_INIT_LOOP(mutex)
+ PTHREAD_INIT_LOOP(cond)
+
+fail:
+ *(unsigned*)((char*)obj + offsets[0]) = cnt;
+ return err;
+}
diff --git a/media/ffvpx/libavcodec/pthread_frame.c b/media/ffvpx/libavcodec/pthread_frame.c
index 36ac0ac1e5..d9d5afaa82 100644
--- a/media/ffvpx/libavcodec/pthread_frame.c
+++ b/media/ffvpx/libavcodec/pthread_frame.c
@@ -28,11 +28,14 @@
#include <stdint.h>
#include "avcodec.h"
-#include "hwaccel.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
#include "internal.h"
#include "pthread_internal.h"
#include "thread.h"
-#include "version.h"
+#include "threadframe.h"
+#include "version_major.h"
#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
@@ -64,6 +67,12 @@ enum {
STATE_SETUP_FINISHED,
};
+enum {
+ UNINITIALIZED, ///< Thread has not been created, AVCodec->close mustn't be called
+ NEEDS_CLOSE, ///< FFCodec->close needs to be called
+ INITIALIZED, ///< Thread has been properly set up
+};
+
/**
* Context used by codec threads and stored in their AVCodecInternal thread_ctx.
*/
@@ -72,6 +81,7 @@ typedef struct PerThreadContext {
pthread_t thread;
int thread_init;
+ unsigned pthread_init_cnt;///< Number of successfully initialized mutexes/conditions
pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
@@ -81,7 +91,7 @@ typedef struct PerThreadContext {
AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
- AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
+ AVPacket *avpkt; ///< Input packet (for decoding) or output (for encoding).
AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
@@ -89,20 +99,6 @@ typedef struct PerThreadContext {
atomic_int state;
- /**
- * Array of frames passed to ff_thread_release_buffer().
- * Frames are released after all threads referencing them are finished.
- */
- AVFrame *released_buffers;
- int num_released_buffers;
- int released_buffers_allocated;
-
- AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
- int requested_flags; ///< flags passed to get_buffer() for requested_frame
-
- const enum AVPixelFormat *available_formats; ///< Format array for get_format()
- enum AVPixelFormat result_format; ///< get_format() result
-
int die; ///< Set when the thread should exit.
int hwaccel_serializing;
@@ -118,6 +114,7 @@ typedef struct FrameThreadContext {
PerThreadContext *threads; ///< The contexts for each thread.
PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
+ unsigned pthread_init_cnt; ///< Number of successfully initialized mutexes/conditions
pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
/**
* This lock is used for ensuring threads run in serial when hwaccel
@@ -135,10 +132,13 @@ typedef struct FrameThreadContext {
* Set for the first N packets, where N is the number of threads.
* While it is set, ff_thread_en/decode_frame won't return any results.
*/
-} FrameThreadContext;
-#define THREAD_SAFE_CALLBACKS(avctx) \
-((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
+ /* hwaccel state is temporarily stored here in order to transfer its ownership
+ * to the next decoding thread without the need for extra synchronization */
+ const AVHWAccel *stash_hwaccel;
+ void *stash_hwaccel_context;
+ void *stash_hwaccel_priv;
+} FrameThreadContext;
static void async_lock(FrameThreadContext *fctx)
{
@@ -158,6 +158,17 @@ static void async_unlock(FrameThreadContext *fctx)
pthread_mutex_unlock(&fctx->async_mutex);
}
+static void thread_set_name(PerThreadContext *p)
+{
+ AVCodecContext *avctx = p->avctx;
+ int idx = p - p->parent->threads;
+ char name[16];
+
+ snprintf(name, sizeof(name), "av:%.7s:df%d", avctx->codec->name, idx);
+
+ ff_thread_setname(name);
+}
+
/**
* Codec worker thread.
*
@@ -169,7 +180,9 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
{
PerThreadContext *p = arg;
AVCodecContext *avctx = p->avctx;
- const AVCodec *codec = avctx->codec;
+ const FFCodec *codec = ffcodec(avctx->codec);
+
+ thread_set_name(p);
pthread_mutex_lock(&p->mutex);
while (1) {
@@ -178,7 +191,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
if (p->die) break;
- if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
+ if (!codec->update_thread_context)
ff_thread_finish_setup(avctx);
/* If a decoder supports hwaccel, then it must call ff_get_format().
@@ -198,22 +211,26 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
av_frame_unref(p->frame);
p->got_frame = 0;
- p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
+ p->result = codec->cb.decode(avctx, p->frame, &p->got_frame, p->avpkt);
- if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
- if (avctx->internal->allocate_progress)
- av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
- "free the frame on failure. This is a bug, please report it.\n");
- av_frame_unref(p->frame);
- }
+ if ((p->result < 0 || !p->got_frame) && p->frame->buf[0])
+ ff_thread_release_buffer(avctx, p->frame);
if (atomic_load(&p->state) == STATE_SETTING_UP)
ff_thread_finish_setup(avctx);
if (p->hwaccel_serializing) {
+ /* wipe hwaccel state to avoid stale pointers lying around;
+ * the state was transferred to FrameThreadContext in
+ * ff_thread_finish_setup(), so nothing is leaked */
+ avctx->hwaccel = NULL;
+ avctx->hwaccel_context = NULL;
+ avctx->internal->hwaccel_priv_data = NULL;
+
p->hwaccel_serializing = 0;
pthread_mutex_unlock(&p->parent->hwaccel_mutex);
}
+ av_assert0(!avctx->hwaccel);
if (p->async_serializing) {
p->async_serializing = 0;
@@ -244,9 +261,10 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
*/
static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
{
+ const FFCodec *const codec = ffcodec(dst->codec);
int err = 0;
- if (dst != src && (for_user || !(src->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY))) {
+ if (dst != src && (for_user || codec->update_thread_context)) {
dst->time_base = src->time_base;
dst->framerate = src->framerate;
dst->width = src->width;
@@ -259,6 +277,7 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
dst->has_b_frames = src->has_b_frames;
dst->idct_algo = src->idct_algo;
+ dst->properties = src->properties;
dst->bits_per_coded_sample = src->bits_per_coded_sample;
dst->sample_aspect_ratio = src->sample_aspect_ratio;
@@ -275,14 +294,17 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
dst->color_range = src->color_range;
dst->chroma_sample_location = src->chroma_sample_location;
- dst->hwaccel = src->hwaccel;
- dst->hwaccel_context = src->hwaccel_context;
-
- dst->channels = src->channels;
dst->sample_rate = src->sample_rate;
dst->sample_fmt = src->sample_fmt;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ dst->channels = src->channels;
dst->channel_layout = src->channel_layout;
- dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ err = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (err < 0)
+ return err;
if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
(dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
@@ -296,18 +318,18 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
}
dst->hwaccel_flags = src->hwaccel_flags;
+
+ err = av_buffer_replace(&dst->internal->pool, src->internal->pool);
+ if (err < 0)
+ return err;
}
if (for_user) {
- dst->delay = src->thread_count - 1;
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- dst->coded_frame = src->coded_frame;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
+ if (codec->update_thread_context_for_user)
+ err = codec->update_thread_context_for_user(dst, src);
} else {
- if (dst->codec->update_thread_context)
- err = dst->codec->update_thread_context(dst, src);
+ if (codec->update_thread_context)
+ err = codec->update_thread_context(dst, src);
}
return err;
@@ -322,7 +344,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
*/
static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
{
-#define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s);
+ int err;
+
dst->flags = src->flags;
dst->draw_horiz_band= src->draw_horiz_band;
@@ -330,16 +353,26 @@ static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
dst->opaque = src->opaque;
dst->debug = src->debug;
- dst->debug_mv = src->debug_mv;
dst->slice_flags = src->slice_flags;
dst->flags2 = src->flags2;
+ dst->export_side_data = src->export_side_data;
- copy_fields(skip_loop_filter, subtitle_header);
+ dst->skip_loop_filter = src->skip_loop_filter;
+ dst->skip_idct = src->skip_idct;
+ dst->skip_frame = src->skip_frame;
+ dst->frame_num = src->frame_num;
+#if FF_API_AVCTX_FRAME_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
dst->frame_number = src->frame_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
dst->reordered_opaque = src->reordered_opaque;
- dst->thread_safe_callbacks = src->thread_safe_callbacks;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (src->slice_count && src->slice_offset) {
if (dst->slice_count < src->slice_count) {
@@ -352,29 +385,13 @@ static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
src->slice_count * sizeof(*dst->slice_offset));
}
dst->slice_count = src->slice_count;
- return 0;
-#undef copy_fields
-}
-/// Releases the buffers that this decoding thread was the last user of.
-static void release_delayed_buffers(PerThreadContext *p)
-{
- FrameThreadContext *fctx = p->parent;
-
- while (p->num_released_buffers > 0) {
- AVFrame *f;
-
- pthread_mutex_lock(&fctx->buffer_mutex);
+ av_packet_unref(dst->internal->last_pkt_props);
+ err = av_packet_copy_props(dst->internal->last_pkt_props, src->internal->last_pkt_props);
+ if (err < 0)
+ return err;
- // fix extended data in case the caller screwed it up
- av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
- p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
- f = &p->released_buffers[--p->num_released_buffers];
- f->extended_data = f->data;
- av_frame_unref(f);
-
- pthread_mutex_unlock(&fctx->buffer_mutex);
- }
+ return 0;
}
static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
@@ -399,8 +416,6 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
(p->avctx->debug & FF_DEBUG_THREADS) != 0,
memory_order_relaxed);
- release_delayed_buffers(p);
-
if (prev_thread) {
int err;
if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
@@ -417,8 +432,14 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
}
}
- av_packet_unref(&p->avpkt);
- ret = av_packet_ref(&p->avpkt, avpkt);
+ /* transfer the stashed hwaccel state, if any */
+ av_assert0(!p->avctx->hwaccel);
+ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
+ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
+ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
+
+ av_packet_unref(p->avpkt);
+ ret = av_packet_ref(p->avpkt, avpkt);
if (ret < 0) {
pthread_mutex_unlock(&p->mutex);
av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
@@ -429,40 +450,6 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
pthread_cond_signal(&p->input_cond);
pthread_mutex_unlock(&p->mutex);
- /*
- * If the client doesn't have a thread-safe get_buffer(),
- * then decoding threads call back to the main thread,
- * and it calls back to the client here.
- */
-
- if (!p->avctx->thread_safe_callbacks && (
- p->avctx->get_format != avcodec_default_get_format ||
- p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
- while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
- int call_done = 1;
- pthread_mutex_lock(&p->progress_mutex);
- while (atomic_load(&p->state) == STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
- case STATE_GET_BUFFER:
- p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
- break;
- case STATE_GET_FORMAT:
- p->result_format = ff_get_format(p->avctx, p->available_formats);
- break;
- default:
- call_done = 0;
- break;
- }
- if (call_done) {
- atomic_store(&p->state, STATE_SETTING_UP);
- pthread_cond_signal(&p->progress_cond);
- }
- pthread_mutex_unlock(&p->progress_mutex);
- }
- }
-
fctx->prev_thread = p;
fctx->next_decoding++;
@@ -525,7 +512,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx,
av_frame_move_ref(picture, p->frame);
*got_picture_ptr = p->got_frame;
- picture->pkt_dts = p->avpkt.dts;
+ picture->pkt_dts = p->avpkt->dts;
err = p->result;
/*
@@ -577,7 +564,7 @@ void ff_thread_report_progress(ThreadFrame *f, int n, int field)
pthread_mutex_unlock(&p->progress_mutex);
}
-void ff_thread_await_progress(ThreadFrame *f, int n, int field)
+void ff_thread_await_progress(const ThreadFrame *f, int n, int field)
{
PerThreadContext *p;
atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
@@ -616,6 +603,14 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
async_lock(p->parent);
}
+ /* save hwaccel state for passing to the next thread;
+ * this is done here so that this worker thread can wipe its own hwaccel
+ * state after decoding, without requiring synchronization */
+ av_assert0(!p->parent->stash_hwaccel);
+ p->parent->stash_hwaccel = avctx->hwaccel;
+ p->parent->stash_hwaccel_context = avctx->hwaccel_context;
+ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
+
pthread_mutex_lock(&p->progress_mutex);
if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
@@ -649,91 +644,165 @@ static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count
async_lock(fctx);
}
+#define OFF(member) offsetof(FrameThreadContext, member)
+DEFINE_OFFSET_ARRAY(FrameThreadContext, thread_ctx, pthread_init_cnt,
+ (OFF(buffer_mutex), OFF(hwaccel_mutex), OFF(async_mutex)),
+ (OFF(async_cond)));
+#undef OFF
+
+#define OFF(member) offsetof(PerThreadContext, member)
+DEFINE_OFFSET_ARRAY(PerThreadContext, per_thread, pthread_init_cnt,
+ (OFF(progress_mutex), OFF(mutex)),
+ (OFF(input_cond), OFF(progress_cond), OFF(output_cond)));
+#undef OFF
+
void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
{
FrameThreadContext *fctx = avctx->internal->thread_ctx;
- const AVCodec *codec = avctx->codec;
+ const FFCodec *codec = ffcodec(avctx->codec);
int i;
park_frame_worker_threads(fctx, thread_count);
- if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
- if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
- av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
- fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
- fctx->threads->avctx->internal->is_copy = 1;
- }
-
for (i = 0; i < thread_count; i++) {
PerThreadContext *p = &fctx->threads[i];
+ AVCodecContext *ctx = p->avctx;
- pthread_mutex_lock(&p->mutex);
- p->die = 1;
- pthread_cond_signal(&p->input_cond);
- pthread_mutex_unlock(&p->mutex);
-
- if (p->thread_init)
- pthread_join(p->thread, NULL);
- p->thread_init=0;
+ if (ctx->internal) {
+ if (p->thread_init == INITIALIZED) {
+ pthread_mutex_lock(&p->mutex);
+ p->die = 1;
+ pthread_cond_signal(&p->input_cond);
+ pthread_mutex_unlock(&p->mutex);
- if (codec->close && p->avctx)
- codec->close(p->avctx);
+ pthread_join(p->thread, NULL);
+ }
+ if (codec->close && p->thread_init != UNINITIALIZED)
+ codec->close(ctx);
- release_delayed_buffers(p);
- av_frame_free(&p->frame);
- }
+ if (ctx->priv_data) {
+ if (codec->p.priv_class)
+ av_opt_free(ctx->priv_data);
+ av_freep(&ctx->priv_data);
+ }
- for (i = 0; i < thread_count; i++) {
- PerThreadContext *p = &fctx->threads[i];
+ av_freep(&ctx->slice_offset);
- pthread_mutex_destroy(&p->mutex);
- pthread_mutex_destroy(&p->progress_mutex);
- pthread_cond_destroy(&p->input_cond);
- pthread_cond_destroy(&p->progress_cond);
- pthread_cond_destroy(&p->output_cond);
- av_packet_unref(&p->avpkt);
- av_freep(&p->released_buffers);
-
- if (i && p->avctx) {
- av_freep(&p->avctx->priv_data);
- av_freep(&p->avctx->slice_offset);
+ av_buffer_unref(&ctx->internal->pool);
+ av_packet_free(&ctx->internal->last_pkt_props);
+ av_freep(&ctx->internal);
+ av_buffer_unref(&ctx->hw_frames_ctx);
}
- if (p->avctx) {
- av_freep(&p->avctx->internal);
- av_buffer_unref(&p->avctx->hw_frames_ctx);
- }
+ av_frame_free(&p->frame);
+
+ ff_pthread_free(p, per_thread_offsets);
+ av_packet_free(&p->avpkt);
av_freep(&p->avctx);
}
av_freep(&fctx->threads);
- pthread_mutex_destroy(&fctx->buffer_mutex);
- pthread_mutex_destroy(&fctx->hwaccel_mutex);
- pthread_mutex_destroy(&fctx->async_mutex);
- pthread_cond_destroy(&fctx->async_cond);
+ ff_pthread_free(fctx, thread_ctx_offsets);
+
+ /* if we have stashed hwaccel state, move it to the user-facing context,
+ * so it will be freed in avcodec_close() */
+ av_assert0(!avctx->hwaccel);
+ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
+ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
+ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
av_freep(&avctx->internal->thread_ctx);
+}
+
+static av_cold int init_thread(PerThreadContext *p, int *threads_to_free,
+ FrameThreadContext *fctx, AVCodecContext *avctx,
+ const FFCodec *codec, int first)
+{
+ AVCodecContext *copy;
+ int err;
+
+ atomic_init(&p->state, STATE_INPUT_READY);
+
+ copy = av_memdup(avctx, sizeof(*avctx));
+ if (!copy)
+ return AVERROR(ENOMEM);
+ copy->priv_data = NULL;
+
+ /* From now on, this PerThreadContext will be cleaned up by
+ * ff_frame_thread_free in case of errors. */
+ (*threads_to_free)++;
+
+ p->parent = fctx;
+ p->avctx = copy;
+
+ copy->internal = av_mallocz(sizeof(*copy->internal));
+ if (!copy->internal)
+ return AVERROR(ENOMEM);
+ copy->internal->thread_ctx = p;
+
+ copy->delay = avctx->delay;
+
+ if (codec->priv_data_size) {
+ copy->priv_data = av_mallocz(codec->priv_data_size);
+ if (!copy->priv_data)
+ return AVERROR(ENOMEM);
+
+ if (codec->p.priv_class) {
+ *(const AVClass **)copy->priv_data = codec->p.priv_class;
+ err = av_opt_copy(copy->priv_data, avctx->priv_data);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ err = ff_pthread_init(p, per_thread_offsets);
+ if (err < 0)
+ return err;
+
+ if (!(p->frame = av_frame_alloc()) ||
+ !(p->avpkt = av_packet_alloc()))
+ return AVERROR(ENOMEM);
+
+ if (!first)
+ copy->internal->is_copy = 1;
+
+ copy->internal->last_pkt_props = av_packet_alloc();
+ if (!copy->internal->last_pkt_props)
+ return AVERROR(ENOMEM);
+
+ if (codec->init) {
+ err = codec->init(copy);
+ if (err < 0) {
+ if (codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP)
+ p->thread_init = NEEDS_CLOSE;
+ return err;
+ }
+ }
+ p->thread_init = NEEDS_CLOSE;
+
+ if (first)
+ update_context_from_thread(avctx, copy, 1);
- if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
- av_opt_free(avctx->priv_data);
- avctx->codec = NULL;
+ atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
+
+ err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
+ if (err < 0)
+ return err;
+ p->thread_init = INITIALIZED;
+
+ return 0;
}
int ff_frame_thread_init(AVCodecContext *avctx)
{
int thread_count = avctx->thread_count;
- const AVCodec *codec = avctx->codec;
- AVCodecContext *src = avctx;
+ const FFCodec *codec = ffcodec(avctx->codec);
FrameThreadContext *fctx;
- int i, err = 0;
+ int err, i = 0;
if (!thread_count) {
int nb_cpus = av_cpu_count();
-#if FF_API_DEBUG_MV
- if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
- nb_cpus = 1;
-#endif
// use number of cores + 1 as thread count if there is more than one
if (nb_cpus > 1)
thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
@@ -750,92 +819,38 @@ int ff_frame_thread_init(AVCodecContext *avctx)
if (!fctx)
return AVERROR(ENOMEM);
- fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
- if (!fctx->threads) {
+ err = ff_pthread_init(fctx, thread_ctx_offsets);
+ if (err < 0) {
+ ff_pthread_free(fctx, thread_ctx_offsets);
av_freep(&avctx->internal->thread_ctx);
- return AVERROR(ENOMEM);
+ return err;
}
- pthread_mutex_init(&fctx->buffer_mutex, NULL);
- pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
- pthread_mutex_init(&fctx->async_mutex, NULL);
- pthread_cond_init(&fctx->async_cond, NULL);
-
fctx->async_lock = 1;
fctx->delaying = 1;
- for (i = 0; i < thread_count; i++) {
- AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
- PerThreadContext *p = &fctx->threads[i];
-
- pthread_mutex_init(&p->mutex, NULL);
- pthread_mutex_init(&p->progress_mutex, NULL);
- pthread_cond_init(&p->input_cond, NULL);
- pthread_cond_init(&p->progress_cond, NULL);
- pthread_cond_init(&p->output_cond, NULL);
-
- p->frame = av_frame_alloc();
- if (!p->frame) {
- av_freep(&copy);
- err = AVERROR(ENOMEM);
- goto error;
- }
-
- p->parent = fctx;
- p->avctx = copy;
+ if (codec->p.type == AVMEDIA_TYPE_VIDEO)
+ avctx->delay = avctx->thread_count - 1;
- if (!copy) {
- err = AVERROR(ENOMEM);
- goto error;
- }
-
- *copy = *src;
-
- copy->internal = av_malloc(sizeof(AVCodecInternal));
- if (!copy->internal) {
- copy->priv_data = NULL;
- err = AVERROR(ENOMEM);
- goto error;
- }
- *copy->internal = *src->internal;
- copy->internal->thread_ctx = p;
- copy->internal->last_pkt_props = &p->avpkt;
-
- if (!i) {
- src = copy;
-
- if (codec->init)
- err = codec->init(copy);
-
- update_context_from_thread(avctx, copy, 1);
- } else {
- copy->priv_data = av_malloc(codec->priv_data_size);
- if (!copy->priv_data) {
- err = AVERROR(ENOMEM);
- goto error;
- }
- memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
- copy->internal->is_copy = 1;
-
- if (codec->init_thread_copy)
- err = codec->init_thread_copy(copy);
- }
-
- if (err) goto error;
+ fctx->threads = av_calloc(thread_count, sizeof(*fctx->threads));
+ if (!fctx->threads) {
+ err = AVERROR(ENOMEM);
+ goto error;
+ }
- atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
+ for (; i < thread_count; ) {
+ PerThreadContext *p = &fctx->threads[i];
+ int first = !i;
- err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
- p->thread_init= !err;
- if(!p->thread_init)
+ err = init_thread(p, &i, fctx, avctx, codec, first);
+ if (err < 0)
goto error;
}
return 0;
error:
- ff_frame_thread_free(avctx, i+1);
-
+ ff_frame_thread_free(avctx, i);
return err;
}
@@ -862,106 +877,49 @@ void ff_thread_flush(AVCodecContext *avctx)
av_frame_unref(p->frame);
p->result = 0;
- release_delayed_buffers(p);
-
- if (avctx->codec->flush)
- avctx->codec->flush(p->avctx);
+ if (ffcodec(avctx->codec)->flush)
+ ffcodec(avctx->codec)->flush(p->avctx);
}
}
int ff_thread_can_start_frame(AVCodecContext *avctx)
{
PerThreadContext *p = avctx->internal->thread_ctx;
+
if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
- (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
+ ffcodec(avctx->codec)->update_thread_context) {
return 0;
}
+
return 1;
}
-static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
+static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
{
- PerThreadContext *p = avctx->internal->thread_ctx;
+ PerThreadContext *p;
int err;
- f->owner[0] = f->owner[1] = avctx;
-
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
- return ff_get_buffer(avctx, f->f, flags);
+ return ff_get_buffer(avctx, f, flags);
+ p = avctx->internal->thread_ctx;
+FF_DISABLE_DEPRECATION_WARNINGS
if (atomic_load(&p->state) != STATE_SETTING_UP &&
- (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
+ ffcodec(avctx->codec)->update_thread_context) {
+FF_ENABLE_DEPRECATION_WARNINGS
av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
return -1;
}
- if (avctx->internal->allocate_progress) {
- atomic_int *progress;
- f->progress = av_buffer_alloc(2 * sizeof(*progress));
- if (!f->progress) {
- return AVERROR(ENOMEM);
- }
- progress = (atomic_int*)f->progress->data;
-
- atomic_init(&progress[0], -1);
- atomic_init(&progress[1], -1);
- }
-
pthread_mutex_lock(&p->parent->buffer_mutex);
- if (THREAD_SAFE_CALLBACKS(avctx)) {
- err = ff_get_buffer(avctx, f->f, flags);
- } else {
- pthread_mutex_lock(&p->progress_mutex);
- p->requested_frame = f->f;
- p->requested_flags = flags;
- atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
- pthread_cond_broadcast(&p->progress_cond);
-
- while (atomic_load(&p->state) != STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- err = p->result;
-
- pthread_mutex_unlock(&p->progress_mutex);
-
- }
- if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
- ff_thread_finish_setup(avctx);
- if (err)
- av_buffer_unref(&f->progress);
+ err = ff_get_buffer(avctx, f, flags);
pthread_mutex_unlock(&p->parent->buffer_mutex);
return err;
}
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
-{
- enum AVPixelFormat res;
- PerThreadContext *p = avctx->internal->thread_ctx;
- if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
- avctx->get_format == avcodec_default_get_format)
- return ff_get_format(avctx, fmt);
- if (atomic_load(&p->state) != STATE_SETTING_UP) {
- av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
- return -1;
- }
- pthread_mutex_lock(&p->progress_mutex);
- p->available_formats = fmt;
- atomic_store(&p->state, STATE_GET_FORMAT);
- pthread_cond_broadcast(&p->progress_cond);
-
- while (atomic_load(&p->state) != STATE_SETTING_UP)
- pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
-
- res = p->result_format;
-
- pthread_mutex_unlock(&p->progress_mutex);
-
- return res;
-}
-
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
int ret = thread_get_buffer_internal(avctx, f, flags);
if (ret < 0)
@@ -969,45 +927,51 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
return ret;
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{
- PerThreadContext *p = avctx->internal->thread_ctx;
- FrameThreadContext *fctx;
- AVFrame *dst, *tmp;
- int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
- THREAD_SAFE_CALLBACKS(avctx);
-
- if (!f->f || !f->f->buf[0])
- return;
+ int ret;
- if (avctx->debug & FF_DEBUG_BUFFERS)
- av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
+ f->owner[0] = f->owner[1] = avctx;
+ /* Hint: It is possible for this function to be called with codecs
+ * that don't support frame threading at all, namely in case
+ * a frame-threaded decoder shares code with codecs that are not.
+ * This currently affects non-MPEG-4 mpegvideo codecs and and VP7.
+ * The following check will always be true for them. */
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f->f, flags);
- av_buffer_unref(&f->progress);
- f->owner[0] = f->owner[1] = NULL;
+ if (ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
+ atomic_int *progress;
+ f->progress = av_buffer_alloc(2 * sizeof(*progress));
+ if (!f->progress) {
+ return AVERROR(ENOMEM);
+ }
+ progress = (atomic_int*)f->progress->data;
- if (can_direct_free) {
- av_frame_unref(f->f);
- return;
+ atomic_init(&progress[0], -1);
+ atomic_init(&progress[1], -1);
}
- fctx = p->parent;
- pthread_mutex_lock(&fctx->buffer_mutex);
+ ret = ff_thread_get_buffer(avctx, f->f, flags);
+ if (ret)
+ av_buffer_unref(&f->progress);
+ return ret;
+}
- if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers))
- goto fail;
- tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
- (p->num_released_buffers + 1) *
- sizeof(*p->released_buffers));
- if (!tmp)
- goto fail;
- p->released_buffers = tmp;
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
+{
+ if (!f)
+ return;
- dst = &p->released_buffers[p->num_released_buffers];
- av_frame_move_ref(dst, f->f);
+ if (avctx->debug & FF_DEBUG_BUFFERS)
+ av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
- p->num_released_buffers++;
+ av_frame_unref(f);
+}
-fail:
- pthread_mutex_unlock(&fctx->buffer_mutex);
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+ av_buffer_unref(&f->progress);
+ f->owner[0] = f->owner[1] = NULL;
+ ff_thread_release_buffer(avctx, f->f);
}
diff --git a/media/ffvpx/libavcodec/pthread_internal.h b/media/ffvpx/libavcodec/pthread_internal.h
index d2115cbbaf..d0b6a7a673 100644
--- a/media/ffvpx/libavcodec/pthread_internal.h
+++ b/media/ffvpx/libavcodec/pthread_internal.h
@@ -31,4 +31,36 @@ void ff_slice_thread_free(AVCodecContext *avctx);
int ff_frame_thread_init(AVCodecContext *avctx);
void ff_frame_thread_free(AVCodecContext *avctx, int thread_count);
+#define THREAD_SENTINEL 0 // This forbids putting a mutex/condition variable at the front.
+/**
+ * Initialize/destroy a list of mutexes/conditions contained in a structure.
+ * The positions of these mutexes/conditions in the structure are given by
+ * their offsets. Because it is undefined behaviour to destroy
+ * an uninitialized mutex/condition, ff_pthread_init() stores the number
+ * of successfully initialized mutexes and conditions in the object itself
+ * and ff_pthread_free() uses this number to destroy exactly the mutexes and
+ * condition variables that have been successfully initialized.
+ *
+ * @param obj The object containing the mutexes/conditions.
+ * @param[in] offsets An array of offsets. Its first member gives the offset
+ * of the variable that contains the count of successfully
+ * initialized mutexes/condition variables; said variable
+ * must be an unsigned int. Two arrays of offsets, each
+ * delimited by a THREAD_SENTINEL follow. The first
+ * contains the offsets of all the mutexes, the second
+ * contains the offsets of all the condition variables.
+ */
+int ff_pthread_init(void *obj, const unsigned offsets[]);
+void ff_pthread_free(void *obj, const unsigned offsets[]);
+
+/**
+ * Macros to help creating the above lists. mutexes and conds need
+ * to be parentheses-enclosed lists of offsets in the containing structure.
+ */
+#define OFFSET_ARRAY(...) __VA_ARGS__, THREAD_SENTINEL
+#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds) \
+static const unsigned name ## _offsets[] = { offsetof(type, cnt_variable), \
+ OFFSET_ARRAY mutexes, \
+ OFFSET_ARRAY conds }
+
#endif // AVCODEC_PTHREAD_INTERNAL_H
diff --git a/media/ffvpx/libavcodec/pthread_slice.c b/media/ffvpx/libavcodec/pthread_slice.c
index 77cfe3c9f6..a4d31c6f4d 100644
--- a/media/ffvpx/libavcodec/pthread_slice.c
+++ b/media/ffvpx/libavcodec/pthread_slice.c
@@ -25,6 +25,7 @@
#include "config.h"
#include "avcodec.h"
+#include "codec_internal.h"
#include "internal.h"
#include "pthread_internal.h"
#include "thread.h"
@@ -40,6 +41,11 @@ typedef int (action_func)(AVCodecContext *c, void *arg);
typedef int (action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr);
typedef int (main_func)(AVCodecContext *c);
+typedef struct Progress {
+ pthread_cond_t cond;
+ pthread_mutex_t mutex;
+} Progress;
+
typedef struct SliceThreadContext {
AVSliceThread *thread;
action_func *func;
@@ -52,8 +58,7 @@ typedef struct SliceThreadContext {
int *entries;
int entries_count;
int thread_count;
- pthread_cond_t *progress_cond;
- pthread_mutex_t *progress_mutex;
+ Progress *progress;
} SliceThreadContext;
static void main_function(void *priv) {
@@ -82,13 +87,13 @@ void ff_slice_thread_free(AVCodecContext *avctx)
avpriv_slicethread_free(&c->thread);
for (i = 0; i < c->thread_count; i++) {
- pthread_mutex_destroy(&c->progress_mutex[i]);
- pthread_cond_destroy(&c->progress_cond[i]);
+ Progress *const progress = &c->progress[i];
+ pthread_mutex_destroy(&progress->mutex);
+ pthread_cond_destroy(&progress->cond);
}
av_freep(&c->entries);
- av_freep(&c->progress_mutex);
- av_freep(&c->progress_cond);
+ av_freep(&c->progress);
av_freep(&avctx->internal->thread_ctx);
}
@@ -130,7 +135,7 @@ int ff_slice_thread_init(AVCodecContext *avctx)
{
SliceThreadContext *c;
int thread_count = avctx->thread_count;
- static void (*mainfunc)(void *);
+ void (*mainfunc)(void *);
// We cannot do this in the encoder init as the threads are created before
if (av_codec_is_encoder(avctx->codec) &&
@@ -155,7 +160,7 @@ int ff_slice_thread_init(AVCodecContext *avctx)
}
avctx->internal->thread_ctx = c = av_mallocz(sizeof(*c));
- mainfunc = avctx->codec->caps_internal & FF_CODEC_CAP_SLICE_THREAD_HAS_MF ? &main_function : NULL;
+ mainfunc = ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SLICE_THREAD_HAS_MF ? &main_function : NULL;
if (!c || (thread_count = avpriv_slicethread_create(&c->thread, avctx, worker_func, mainfunc, thread_count)) <= 1) {
if (c)
avpriv_slicethread_free(&c->thread);
@@ -171,72 +176,85 @@ int ff_slice_thread_init(AVCodecContext *avctx)
return 0;
}
+int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
+{
+ SliceThreadContext *const p = avctx->internal->thread_ctx;
+ int err, i = 0, thread_count = avctx->thread_count;
+
+ p->progress = av_calloc(thread_count, sizeof(*p->progress));
+ if (!p->progress) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (; i < thread_count; i++) {
+ Progress *const progress = &p->progress[i];
+ err = pthread_mutex_init(&progress->mutex, NULL);
+ if (err) {
+ err = AVERROR(err);
+ goto fail;
+ }
+ err = pthread_cond_init (&progress->cond, NULL);
+ if (err) {
+ err = AVERROR(err);
+ pthread_mutex_destroy(&progress->mutex);
+ goto fail;
+ }
+ }
+ err = 0;
+fail:
+ p->thread_count = i;
+ return err;
+}
+
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
{
SliceThreadContext *p = avctx->internal->thread_ctx;
+ Progress *const progress = &p->progress[thread];
int *entries = p->entries;
- pthread_mutex_lock(&p->progress_mutex[thread]);
+ pthread_mutex_lock(&progress->mutex);
entries[field] +=n;
- pthread_cond_signal(&p->progress_cond[thread]);
- pthread_mutex_unlock(&p->progress_mutex[thread]);
+ pthread_cond_signal(&progress->cond);
+ pthread_mutex_unlock(&progress->mutex);
}
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
{
SliceThreadContext *p = avctx->internal->thread_ctx;
+ Progress *progress;
int *entries = p->entries;
if (!entries || !field) return;
thread = thread ? thread - 1 : p->thread_count - 1;
+ progress = &p->progress[thread];
- pthread_mutex_lock(&p->progress_mutex[thread]);
+ pthread_mutex_lock(&progress->mutex);
while ((entries[field - 1] - entries[field]) < shift){
- pthread_cond_wait(&p->progress_cond[thread], &p->progress_mutex[thread]);
+ pthread_cond_wait(&progress->cond, &progress->mutex);
}
- pthread_mutex_unlock(&p->progress_mutex[thread]);
+ pthread_mutex_unlock(&progress->mutex);
}
-int ff_alloc_entries(AVCodecContext *avctx, int count)
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
{
- int i;
-
if (avctx->active_thread_type & FF_THREAD_SLICE) {
SliceThreadContext *p = avctx->internal->thread_ctx;
- if (p->entries) {
- av_assert0(p->thread_count == avctx->thread_count);
- av_freep(&p->entries);
+ if (p->entries_count == count) {
+ memset(p->entries, 0, p->entries_count * sizeof(*p->entries));
+ return 0;
}
+ av_freep(&p->entries);
- p->thread_count = avctx->thread_count;
- p->entries = av_mallocz_array(count, sizeof(int));
-
- if (!p->progress_mutex) {
- p->progress_mutex = av_malloc_array(p->thread_count, sizeof(pthread_mutex_t));
- p->progress_cond = av_malloc_array(p->thread_count, sizeof(pthread_cond_t));
- }
-
- if (!p->entries || !p->progress_mutex || !p->progress_cond) {
- av_freep(&p->entries);
- av_freep(&p->progress_mutex);
- av_freep(&p->progress_cond);
+ p->entries = av_calloc(count, sizeof(*p->entries));
+ if (!p->entries) {
+ p->entries_count = 0;
return AVERROR(ENOMEM);
}
p->entries_count = count;
-
- for (i = 0; i < p->thread_count; i++) {
- pthread_mutex_init(&p->progress_mutex[i], NULL);
- pthread_cond_init(&p->progress_cond[i], NULL);
- }
}
return 0;
}
-
-void ff_reset_entries(AVCodecContext *avctx)
-{
- SliceThreadContext *p = avctx->internal->thread_ctx;
- memset(p->entries, 0, p->entries_count * sizeof(int));
-}
diff --git a/media/ffvpx/libavcodec/put_bits.h b/media/ffvpx/libavcodec/put_bits.h
index 1ceb1cc766..4561dc131a 100644
--- a/media/ffvpx/libavcodec/put_bits.h
+++ b/media/ffvpx/libavcodec/put_bits.h
@@ -29,14 +29,28 @@
#include <stdint.h>
#include <stddef.h>
+#include "config.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+
+#if ARCH_X86_64
+// TODO: Benchmark and optionally enable on other 64-bit architectures.
+typedef uint64_t BitBuf;
+#define AV_WBBUF AV_WB64
+#define AV_WLBUF AV_WL64
+#else
+typedef uint32_t BitBuf;
+#define AV_WBBUF AV_WB32
+#define AV_WLBUF AV_WL32
+#endif
+
+static const int BUF_BITS = 8 * sizeof(BitBuf);
typedef struct PutBitContext {
- uint32_t bit_buf;
+ BitBuf bit_buf;
int bit_left;
uint8_t *buf, *buf_ptr, *buf_end;
- int size_in_bits;
} PutBitContext;
/**
@@ -53,46 +67,74 @@ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
buffer = NULL;
}
- s->size_in_bits = 8 * buffer_size;
s->buf = buffer;
s->buf_end = s->buf + buffer_size;
s->buf_ptr = s->buf;
- s->bit_left = 32;
+ s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
/**
+ * @return the total number of bits written to the bitstream.
+ */
+static inline int put_bits_count(PutBitContext *s)
+{
+ return (s->buf_ptr - s->buf) * 8 + BUF_BITS - s->bit_left;
+}
+
+/**
+ * @return the number of bytes output so far; may only be called
+ * when the PutBitContext is freshly initialized or flushed.
+ */
+static inline int put_bytes_output(const PutBitContext *s)
+{
+ av_assert2(s->bit_left == BUF_BITS);
+ return s->buf_ptr - s->buf;
+}
+
+/**
+ * @param round_up When set, the number of bits written so far will be
+ * rounded up to the next byte.
+ * @return the number of bytes output so far.
+ */
+static inline int put_bytes_count(const PutBitContext *s, int round_up)
+{
+ return s->buf_ptr - s->buf + ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
+}
+
+/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
- * must be larger than the previous size
+ * must be large enough to hold everything written so far
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size)
{
- av_assert0(8*buffer_size > s->size_in_bits);
+ av_assert0(8*buffer_size >= put_bits_count(s));
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
- s->size_in_bits = 8 * buffer_size;
}
/**
- * @return the total number of bits written to the bitstream.
+ * @return the number of bits available in the bitstream.
*/
-static inline int put_bits_count(PutBitContext *s)
+static inline int put_bits_left(PutBitContext* s)
{
- return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
+ return (s->buf_end - s->buf_ptr) * 8 - BUF_BITS + s->bit_left;
}
/**
- * @return the number of bits available in the bitstream.
+ * @param round_up When set, the number of bits written will be
+ * rounded up to the next byte.
+ * @return the number of bytes left.
*/
-static inline int put_bits_left(PutBitContext* s)
+static inline int put_bytes_left(const PutBitContext *s, int round_up)
{
- return (s->buf_end - s->buf_ptr) * 8 - 32 + s->bit_left;
+ return s->buf_end - s->buf_ptr - ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
}
/**
@@ -101,52 +143,47 @@ static inline int put_bits_left(PutBitContext* s)
static inline void flush_put_bits(PutBitContext *s)
{
#ifndef BITSTREAM_WRITER_LE
- if (s->bit_left < 32)
+ if (s->bit_left < BUF_BITS)
s->bit_buf <<= s->bit_left;
#endif
- while (s->bit_left < 32) {
+ while (s->bit_left < BUF_BITS) {
av_assert0(s->buf_ptr < s->buf_end);
#ifdef BITSTREAM_WRITER_LE
*s->buf_ptr++ = s->bit_buf;
s->bit_buf >>= 8;
#else
- *s->buf_ptr++ = s->bit_buf >> 24;
+ *s->buf_ptr++ = s->bit_buf >> (BUF_BITS - 8);
s->bit_buf <<= 8;
#endif
s->bit_left += 8;
}
- s->bit_left = 32;
+ s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
static inline void flush_put_bits_le(PutBitContext *s)
{
- while (s->bit_left < 32) {
+ while (s->bit_left < BUF_BITS) {
av_assert0(s->buf_ptr < s->buf_end);
*s->buf_ptr++ = s->bit_buf;
s->bit_buf >>= 8;
s->bit_left += 8;
}
- s->bit_left = 32;
+ s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
#ifdef BITSTREAM_WRITER_LE
-#define avpriv_align_put_bits align_put_bits_unsupported_here
-#define avpriv_put_string ff_put_string_unsupported_here
-#define avpriv_copy_bits avpriv_copy_bits_unsupported_here
+#define ff_put_string ff_put_string_unsupported_here
+#define ff_copy_bits ff_copy_bits_unsupported_here
#else
-/**
- * Pad the bitstream with zeros up to the next byte boundary.
- */
-void avpriv_align_put_bits(PutBitContext *s);
/**
* Put the string string in the bitstream.
*
* @param terminate_string 0-terminates the written string if value is 1
*/
-void avpriv_put_string(PutBitContext *pb, const char *string,
+void ff_put_string(PutBitContext *pb, const char *string,
int terminate_string);
/**
@@ -154,36 +191,30 @@ void avpriv_put_string(PutBitContext *pb, const char *string,
*
* @param length the number of bits of src to copy
*/
-void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
+void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
#endif
-/**
- * Write up to 31 bits into a bitstream.
- * Use put_bits32 to write 32 bits.
- */
-static inline void put_bits(PutBitContext *s, int n, unsigned int value)
+static inline void put_bits_no_assert(PutBitContext *s, int n, BitBuf value)
{
- unsigned int bit_buf;
+ BitBuf bit_buf;
int bit_left;
- av_assert2(n <= 31 && value < (1U << n));
-
bit_buf = s->bit_buf;
bit_left = s->bit_left;
/* XXX: optimize */
#ifdef BITSTREAM_WRITER_LE
- bit_buf |= value << (32 - bit_left);
+ bit_buf |= value << (BUF_BITS - bit_left);
if (n >= bit_left) {
- if (3 < s->buf_end - s->buf_ptr) {
- AV_WL32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
} else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = value >> bit_left;
- bit_left += 32;
+ bit_left += BUF_BITS;
}
bit_left -= n;
#else
@@ -193,14 +224,14 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
} else {
bit_buf <<= bit_left;
bit_buf |= value >> (n - bit_left);
- if (3 < s->buf_end - s->buf_ptr) {
- AV_WB32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WBBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
} else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
- bit_left += 32 - n;
+ bit_left += BUF_BITS - n;
bit_buf = value;
}
#endif
@@ -209,27 +240,37 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
s->bit_left = bit_left;
}
-static inline void put_bits_le(PutBitContext *s, int n, unsigned int value)
+/**
+ * Write up to 31 bits into a bitstream.
+ * Use put_bits32 to write 32 bits.
+ */
+static inline void put_bits(PutBitContext *s, int n, BitBuf value)
+{
+ av_assert2(n <= 31 && value < (1UL << n));
+ put_bits_no_assert(s, n, value);
+}
+
+static inline void put_bits_le(PutBitContext *s, int n, BitBuf value)
{
- unsigned int bit_buf;
+ BitBuf bit_buf;
int bit_left;
- av_assert2(n <= 31 && value < (1U << n));
+ av_assert2(n <= 31 && value < (1UL << n));
bit_buf = s->bit_buf;
bit_left = s->bit_left;
- bit_buf |= value << (32 - bit_left);
+ bit_buf |= value << (BUF_BITS - bit_left);
if (n >= bit_left) {
- if (3 < s->buf_end - s->buf_ptr) {
- AV_WL32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
} else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = value >> bit_left;
- bit_left += 32;
+ bit_left += BUF_BITS;
}
bit_left -= n;
@@ -249,17 +290,22 @@ static inline void put_sbits(PutBitContext *pb, int n, int32_t value)
*/
static void av_unused put_bits32(PutBitContext *s, uint32_t value)
{
- unsigned int bit_buf;
+ BitBuf bit_buf;
int bit_left;
+ if (BUF_BITS > 32) {
+ put_bits_no_assert(s, 32, value);
+ return;
+ }
+
bit_buf = s->bit_buf;
bit_left = s->bit_left;
#ifdef BITSTREAM_WRITER_LE
- bit_buf |= value << (32 - bit_left);
- if (3 < s->buf_end - s->buf_ptr) {
- AV_WL32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ bit_buf |= (BitBuf)value << (BUF_BITS - bit_left);
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WLBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
} else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
@@ -267,10 +313,10 @@ static void av_unused put_bits32(PutBitContext *s, uint32_t value)
bit_buf = (uint64_t)value >> bit_left;
#else
bit_buf = (uint64_t)bit_buf << bit_left;
- bit_buf |= value >> (32 - bit_left);
- if (3 < s->buf_end - s->buf_ptr) {
- AV_WB32(s->buf_ptr, bit_buf);
- s->buf_ptr += 4;
+ bit_buf |= (BitBuf)value >> (BUF_BITS - bit_left);
+ if (s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
+ AV_WBBUF(s->buf_ptr, bit_buf);
+ s->buf_ptr += sizeof(BitBuf);
} else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
@@ -317,6 +363,13 @@ static inline void put_bits64(PutBitContext *s, int n, uint64_t value)
}
}
+static inline void put_sbits63(PutBitContext *pb, int n, int64_t value)
+{
+ av_assert2(n >= 0 && n < 64);
+
+ put_bits64(pb, n, (uint64_t)(value) & (~(UINT64_MAX << n)));
+}
+
/**
* Return the pointer to the byte where the bitstream writer will put
* the next bit.
@@ -333,7 +386,7 @@ static inline uint8_t *put_bits_ptr(PutBitContext *s)
static inline void skip_put_bytes(PutBitContext *s, int n)
{
av_assert2((put_bits_count(s) & 7) == 0);
- av_assert2(s->bit_left == 32);
+ av_assert2(s->bit_left == BUF_BITS);
av_assert0(n <= s->buf_end - s->buf_ptr);
s->buf_ptr += n;
}
@@ -341,13 +394,13 @@ static inline void skip_put_bytes(PutBitContext *s, int n)
/**
* Skip the given number of bits.
* Must only be used if the actual values in the bitstream do not matter.
- * If n is 0 the behavior is undefined.
+ * If n is < 0 the behavior is undefined.
*/
static inline void skip_put_bits(PutBitContext *s, int n)
{
- s->bit_left -= n;
- s->buf_ptr -= 4 * (s->bit_left >> 5);
- s->bit_left &= 31;
+ unsigned bits = BUF_BITS - s->bit_left + n;
+ s->buf_ptr += sizeof(BitBuf) * (bits / BUF_BITS);
+ s->bit_left = BUF_BITS - (bits & (BUF_BITS - 1));
}
/**
@@ -357,9 +410,19 @@ static inline void skip_put_bits(PutBitContext *s, int n)
*/
static inline void set_put_bits_buffer_size(PutBitContext *s, int size)
{
- av_assert0(size <= INT_MAX/8 - 32);
+ av_assert0(size <= INT_MAX/8 - BUF_BITS);
s->buf_end = s->buf + size;
- s->size_in_bits = 8*size;
}
+/**
+ * Pad the bitstream with zeros up to the next byte boundary.
+ */
+static inline void align_put_bits(PutBitContext *s)
+{
+ put_bits(s, s->bit_left & 7, 0);
+}
+
+#undef AV_WBBUF
+#undef AV_WLBUF
+
#endif /* AVCODEC_PUT_BITS_H */
diff --git a/media/ffvpx/libavcodec/ratecontrol.h b/media/ffvpx/libavcodec/ratecontrol.h
index 2a7aaec644..4de80fad90 100644
--- a/media/ffvpx/libavcodec/ratecontrol.h
+++ b/media/ffvpx/libavcodec/ratecontrol.h
@@ -80,9 +80,6 @@ typedef struct RateControlContext{
int frame_count[5];
int last_non_b_pict_type;
- void *non_lavc_opaque; ///< context for non lavc rc code (for example xvid)
- float dry_run_qscale; ///< for xvid rc
- int last_picture_number; ///< for xvid rc
AVExpr * rc_eq_eval;
}RateControlContext;
diff --git a/media/ffvpx/libavcodec/raw.c b/media/ffvpx/libavcodec/raw.c
index b6fb91c1c6..1e5b48d1e0 100644
--- a/media/ffvpx/libavcodec/raw.c
+++ b/media/ffvpx/libavcodec/raw.c
@@ -24,11 +24,11 @@
* Raw Video Codec
*/
+#include "libavutil/macros.h"
#include "avcodec.h"
#include "raw.h"
-#include "libavutil/common.h"
-const PixelFormatTag ff_raw_pix_fmt_tags[] = {
+static const PixelFormatTag raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
{ AV_PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
{ AV_PIX_FMT_YUV420P, MKTAG('y', 'v', '1', '2') },
@@ -72,6 +72,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') },
{ AV_PIX_FMT_NV12, MKTAG('N', 'V', '1', '2') },
{ AV_PIX_FMT_NV21, MKTAG('N', 'V', '2', '1') },
+ { AV_PIX_FMT_VUYA, MKTAG('A', 'Y', 'U', 'V') }, /* MS 4:4:4:4 */
/* nut */
{ AV_PIX_FMT_RGB555LE, MKTAG('R', 'G', 'B', 15) },
@@ -246,6 +247,7 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_GRAY16BE,MKTAG('b', '1', '6', 'g') },
{ AV_PIX_FMT_RGB48BE, MKTAG('b', '4', '8', 'r') },
{ AV_PIX_FMT_RGBA64BE,MKTAG('b', '6', '4', 'a') },
+ { AV_PIX_FMT_BAYER_RGGB16BE, MKTAG('B', 'G', 'G', 'R') },
/* vlc */
{ AV_PIX_FMT_YUV410P, MKTAG('I', '4', '1', '0') },
@@ -298,12 +300,12 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = {
const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void)
{
- return ff_raw_pix_fmt_tags;
+ return raw_pix_fmt_tags;
}
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat fmt)
{
- const PixelFormatTag *tags = ff_raw_pix_fmt_tags;
+ const PixelFormatTag *tags = raw_pix_fmt_tags;
while (tags->pix_fmt >= 0) {
if (tags->pix_fmt == fmt)
return tags->fourcc;
@@ -312,7 +314,7 @@ unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat fmt)
return 0;
}
-const PixelFormatTag avpriv_pix_fmt_bps_avi[] = {
+static const PixelFormatTag pix_fmt_bps_avi[] = {
{ AV_PIX_FMT_PAL8, 1 },
{ AV_PIX_FMT_PAL8, 2 },
{ AV_PIX_FMT_PAL8, 4 },
@@ -325,7 +327,7 @@ const PixelFormatTag avpriv_pix_fmt_bps_avi[] = {
{ AV_PIX_FMT_NONE, 0 },
};
-const PixelFormatTag avpriv_pix_fmt_bps_mov[] = {
+static const PixelFormatTag pix_fmt_bps_mov[] = {
{ AV_PIX_FMT_PAL8, 1 },
{ AV_PIX_FMT_PAL8, 2 },
{ AV_PIX_FMT_PAL8, 4 },
@@ -336,3 +338,33 @@ const PixelFormatTag avpriv_pix_fmt_bps_mov[] = {
{ AV_PIX_FMT_PAL8, 33 },
{ AV_PIX_FMT_NONE, 0 },
};
+
+static enum AVPixelFormat find_pix_fmt(const PixelFormatTag *tags,
+ unsigned int fourcc)
+{
+ while (tags->pix_fmt != AV_PIX_FMT_NONE) {
+ if (tags->fourcc == fourcc)
+ return tags->pix_fmt;
+ tags++;
+ }
+ return AV_PIX_FMT_NONE;
+}
+
+enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
+ unsigned fourcc)
+{
+ const PixelFormatTag *tags;
+
+ switch (list) {
+ case PIX_FMT_LIST_RAW:
+ tags = raw_pix_fmt_tags;
+ break;
+ case PIX_FMT_LIST_AVI:
+ tags = pix_fmt_bps_avi;
+ break;
+ case PIX_FMT_LIST_MOV:
+ tags = pix_fmt_bps_mov;
+ break;
+ }
+ return find_pix_fmt(tags, fourcc);
+}
diff --git a/media/ffvpx/libavcodec/raw.h b/media/ffvpx/libavcodec/raw.h
index 28a27b1f9e..9a4ddef8fc 100644
--- a/media/ffvpx/libavcodec/raw.h
+++ b/media/ffvpx/libavcodec/raw.h
@@ -27,22 +27,22 @@
#ifndef AVCODEC_RAW_H
#define AVCODEC_RAW_H
-#include "avcodec.h"
-#include "internal.h"
-#include "libavutil/internal.h"
+#include "libavutil/pixfmt.h"
typedef struct PixelFormatTag {
enum AVPixelFormat pix_fmt;
unsigned int fourcc;
} PixelFormatTag;
-extern const PixelFormatTag ff_raw_pix_fmt_tags[]; // exposed through avpriv_get_raw_pix_fmt_tags()
-
const struct PixelFormatTag *avpriv_get_raw_pix_fmt_tags(void);
-enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc);
+enum PixelFormatTagLists {
+ PIX_FMT_LIST_RAW,
+ PIX_FMT_LIST_AVI,
+ PIX_FMT_LIST_MOV,
+};
-extern av_export_avcodec const PixelFormatTag avpriv_pix_fmt_bps_avi[];
-extern av_export_avcodec const PixelFormatTag avpriv_pix_fmt_bps_mov[];
+enum AVPixelFormat avpriv_pix_fmt_find(enum PixelFormatTagLists list,
+ unsigned fourcc);
#endif /* AVCODEC_RAW_H */
diff --git a/media/ffvpx/libavcodec/rdft.c b/media/ffvpx/libavcodec/rdft.c
index 6ba7484238..ac6f5d6781 100644
--- a/media/ffvpx/libavcodec/rdft.c
+++ b/media/ffvpx/libavcodec/rdft.c
@@ -20,6 +20,7 @@
*/
#include <stdlib.h>
#include <math.h>
+#include "libavutil/error.h"
#include "libavutil/mathematics.h"
#include "rdft.h"
@@ -106,7 +107,9 @@ av_cold int ff_rdft_init(RDFTContext *s, int nbits, enum RDFTransformType trans)
s->tsin = ff_cos_tabs[nbits] + (n >> 2);
s->rdft_calc = rdft_calc_c;
- if (ARCH_ARM) ff_rdft_init_arm(s);
+#if ARCH_ARM
+ ff_rdft_init_arm(s);
+#endif
return 0;
}
diff --git a/media/ffvpx/libavcodec/rl.h b/media/ffvpx/libavcodec/rl.h
index 9a767bc5fd..4380fda272 100644
--- a/media/ffvpx/libavcodec/rl.h
+++ b/media/ffvpx/libavcodec/rl.h
@@ -49,30 +49,55 @@ typedef struct RLTable {
} RLTable;
/**
- * @param static_store static uint8_t array[2][2*MAX_RUN + MAX_LEVEL + 3] which will hold
- * the level and run tables, if this is NULL av_malloc() will be used
+ * Initialize max_level and index_run from table_run and table_level;
+ * this is equivalent to initializing RLTable.max_level[0] and
+ * RLTable.index_run[0] with ff_rl_init().
*/
-int ff_rl_init(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
-void ff_rl_init_vlc(RLTable *rl, unsigned static_size);
+void ff_rl_init_level_run(uint8_t max_level[MAX_LEVEL + 1],
+ uint8_t index_run[MAX_RUN + 1],
+ const uint8_t table_run[/* n */],
+ const uint8_t table_level[/* n*/], int n);
+
+/**
+ * Initialize index_run, max_level and max_run from n, last, table_vlc,
+ * table_run and table_level.
+ * @param static_store static uint8_t array[2][2*MAX_RUN + MAX_LEVEL + 3]
+ * to hold the level and run tables.
+ * @note This function does not touch rl_vlc at all, hence there is no need
+ * to synchronize calls to ff_rl_init() and ff_rl_init_vlc() using the
+ * same RLTable.
+ */
+void ff_rl_init(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3]);
/**
- * Free the contents of a dynamically allocated table.
+ * Initialize rl_vlc from n, last, table_vlc, table_run and table_level.
+ * All rl_vlc pointers to be initialized must already point to a static
+ * buffer of `static_size` RL_VLC_ELEM elements; if a pointer is NULL,
+ * initializing further VLCs stops.
+ * @note This function does not touch what ff_rl_init() initializes at all,
+ * hence there is no need to synchronize calls to ff_rl_init() and
+ * ff_rl_init_vlc() using the same RLTable.
*/
-void ff_rl_free(RLTable *rl);
+void ff_rl_init_vlc(RLTable *rl, unsigned static_size);
#define INIT_VLC_RL(rl, static_size)\
{\
- int q;\
static RL_VLC_ELEM rl_vlc_table[32][static_size];\
\
- if(!rl.rl_vlc[0]){\
- for(q=0; q<32; q++)\
- rl.rl_vlc[q]= rl_vlc_table[q];\
+ for (int q = 0; q < 32; q++) \
+ rl.rl_vlc[q] = rl_vlc_table[q]; \
\
- ff_rl_init_vlc(&rl, static_size);\
- }\
+ ff_rl_init_vlc(&rl, static_size); \
}
+#define INIT_FIRST_VLC_RL(rl, static_size) \
+do { \
+ static RL_VLC_ELEM rl_vlc_table[static_size]; \
+ \
+ rl.rl_vlc[0] = rl_vlc_table; \
+ ff_rl_init_vlc(&rl, static_size); \
+} while (0)
+
static inline int get_rl_index(const RLTable *rl, int last, int run, int level)
{
int index;
diff --git a/media/ffvpx/libavcodec/startcode.h b/media/ffvpx/libavcodec/startcode.h
new file mode 100644
index 0000000000..8b75832aaf
--- /dev/null
+++ b/media/ffvpx/libavcodec/startcode.h
@@ -0,0 +1,36 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Accelerated start code search function for start codes common to
+ * MPEG-1/2/4 video, VC-1, H.264/5
+ */
+
+#ifndef AVCODEC_STARTCODE_H
+#define AVCODEC_STARTCODE_H
+
+#include <stdint.h>
+
+const uint8_t *avpriv_find_start_code(const uint8_t *p,
+ const uint8_t *end,
+ uint32_t *state);
+
+int ff_startcode_find_candidate_c(const uint8_t *buf, int size);
+
+#endif /* AVCODEC_STARTCODE_H */
diff --git a/media/ffvpx/libavcodec/thread.h b/media/ffvpx/libavcodec/thread.h
index 540135fbc9..88a14cfeb1 100644
--- a/media/ffvpx/libavcodec/thread.h
+++ b/media/ffvpx/libavcodec/thread.h
@@ -31,14 +31,6 @@
#include "avcodec.h"
-typedef struct ThreadFrame {
- AVFrame *f;
- AVCodecContext *owner[2];
- // progress->data is an array of 2 ints holding progress for top/bottom
- // fields
- AVBufferRef *progress;
-} ThreadFrame;
-
/**
* Wait for decoding threads to finish and reset internal state.
* Called by avcodec_flush_buffers().
@@ -52,10 +44,10 @@ void ff_thread_flush(AVCodecContext *avctx);
* Returns the next available frame in picture. *got_picture_ptr
* will be 0 if none is available.
* The return value on success is the size of the consumed packet for
- * compatibility with avcodec_decode_video2(). This means the decoder
+ * compatibility with FFCodec.decode. This means the decoder
* has to consume the full packet.
*
- * Parameters are the same as avcodec_decode_video2().
+ * Parameters are the same as FFCodec.decode.
*/
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr, AVPacket *avpkt);
@@ -70,41 +62,7 @@ int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture,
*/
void ff_thread_finish_setup(AVCodecContext *avctx);
-/**
- * Notify later decoding threads when part of their reference picture is ready.
- * Call this when some part of the picture is finished decoding.
- * Later calls with lower values of progress have no effect.
- *
- * @param f The picture being decoded.
- * @param progress Value, in arbitrary units, of how much of the picture has decoded.
- * @param field The field being decoded, for field-picture codecs.
- * 0 for top field or frame pictures, 1 for bottom field.
- */
-void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
-
-/**
- * Wait for earlier decoding threads to finish reference pictures.
- * Call this before accessing some part of a picture, with a given
- * value for progress, and it will return after the responsible decoding
- * thread calls ff_thread_report_progress() with the same or
- * higher value for progress.
- *
- * @param f The picture being referenced.
- * @param progress Value, in arbitrary units, to wait for.
- * @param field The field being referenced, for field-picture codecs.
- * 0 for top field or frame pictures, 1 for bottom field.
- */
-void ff_thread_await_progress(ThreadFrame *f, int progress, int field);
-
-/**
- * Wrapper around get_format() for frame-multithreaded codecs.
- * Call this function instead of avctx->get_format().
- * Cannot be called after the codec has called ff_thread_finish_setup().
- *
- * @param avctx The current context.
- * @param fmt The list of available formats.
- */
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt);
+#define ff_thread_get_format ff_get_format
/**
* Wrapper around get_buffer() for frame-multithreaded codecs.
@@ -114,7 +72,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
* @param avctx The current context.
* @param f The frame to write into.
*/
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
/**
* Wrapper around release_buffer() frame-for multithreaded codecs.
@@ -127,17 +85,15 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
* @param avctx The current context.
* @param f The picture being released.
*/
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
-
-int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src);
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
int ff_thread_init(AVCodecContext *s);
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
int (*action_func2)(AVCodecContext *c, void *arg, int jobnr, int threadnr),
int (*main_func)(AVCodecContext *c), void *arg, int *ret, int job_count);
void ff_thread_free(AVCodecContext *s);
-int ff_alloc_entries(AVCodecContext *avctx, int count);
-void ff_reset_entries(AVCodecContext *avctx);
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count);
+int ff_slice_thread_init_progress(AVCodecContext *avctx);
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n);
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift);
diff --git a/media/ffvpx/libavcodec/threadframe.h b/media/ffvpx/libavcodec/threadframe.h
new file mode 100644
index 0000000000..d2f93c5cd0
--- /dev/null
+++ b/media/ffvpx/libavcodec/threadframe.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2022 Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_THREADFRAME_H
+#define AVCODEC_THREADFRAME_H
+
+#include "libavutil/frame.h"
+#include "avcodec.h"
+
+typedef struct ThreadFrame {
+ AVFrame *f;
+ AVCodecContext *owner[2];
+ // progress->data is an array of 2 ints holding progress for top/bottom
+ // fields
+ AVBufferRef *progress;
+} ThreadFrame;
+
+/**
+ * Notify later decoding threads when part of their reference picture is ready.
+ * Call this when some part of the picture is finished decoding.
+ * Later calls with lower values of progress have no effect.
+ *
+ * @param f The picture being decoded.
+ * @param progress Value, in arbitrary units, of how much of the picture has decoded.
+ * @param field The field being decoded, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
+
+/**
+ * Wait for earlier decoding threads to finish reference pictures.
+ * Call this before accessing some part of a picture, with a given
+ * value for progress, and it will return after the responsible decoding
+ * thread calls ff_thread_report_progress() with the same or
+ * higher value for progress.
+ *
+ * @param f The picture being referenced.
+ * @param progress Value, in arbitrary units, to wait for.
+ * @param field The field being referenced, for field-picture codecs.
+ * 0 for top field or frame pictures, 1 for bottom field.
+ */
+void ff_thread_await_progress(const ThreadFrame *f, int progress, int field);
+
+/**
+ * Wrapper around ff_get_buffer() for frame-multithreaded codecs.
+ * Call this function instead of ff_get_buffer() if you might need
+ * to wait for progress on this frame.
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param f The frame to write into.
+ * @note: It is fine to call this with codecs that do not support
+ * frame threading.
+ */
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+
+/**
+ * Unref a ThreadFrame.
+ *
+ * This is basically a wrapper around av_frame_unref() and should
+ * be called instead of it.
+ *
+ * @param avctx The current context.
+ * @param f The picture being released.
+ */
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
+
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+
+int ff_thread_can_start_frame(AVCodecContext *avctx);
+
+#endif
diff --git a/media/ffvpx/libavcodec/utils.c b/media/ffvpx/libavcodec/utils.c
index bdd9a60c39..599da21dba 100644
--- a/media/ffvpx/libavcodec/utils.c
+++ b/media/ffvpx/libavcodec/utils.c
@@ -26,46 +26,25 @@
*/
#include "config.h"
-#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
-#include "libavutil/avstring.h"
-#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
-#include "libavutil/crc.h"
-#include "libavutil/frame.h"
-#include "libavutil/hwcontext.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/mem_internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
-#include "libavutil/samplefmt.h"
-#include "libavutil/dict.h"
-#include "libavutil/thread.h"
+#include "libavutil/pixfmt.h"
#include "avcodec.h"
+#include "codec.h"
+#include "codec_internal.h"
#include "decode.h"
-#include "hwaccel.h"
-#include "libavutil/opt.h"
-#include "mpegvideo.h"
+#include "hwconfig.h"
#include "thread.h"
-#include "frame_thread_encoder.h"
+#include "threadframe.h"
#include "internal.h"
-#include "raw.h"
-#include "bytestream.h"
-#include "version.h"
+#include "put_bits.h"
+#include "startcode.h"
#include <stdlib.h>
-#include <stdarg.h>
-#include <stdatomic.h>
#include <limits.h>
-#include <float.h>
-#if CONFIG_ICONV
-# include <iconv.h>
-#endif
-
-#include "libavutil/ffversion.h"
-const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
-
-static AVMutex codec_mutex = AV_MUTEX_INITIALIZER;
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
{
@@ -75,7 +54,8 @@ void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
*size = 0;
return;
}
- if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1))
+ av_fast_mallocz(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (*p)
memset(*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
}
@@ -87,18 +67,25 @@ void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
*size = 0;
return;
}
- if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1))
+ av_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (*p)
memset(*p, 0, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
}
-int av_codec_is_encoder(const AVCodec *codec)
+int av_codec_is_encoder(const AVCodec *avcodec)
{
- return codec && (codec->encode_sub || codec->encode2 ||codec->send_frame);
+ const FFCodec *const codec = ffcodec(avcodec);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_ENCODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_ENCODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET);
}
-int av_codec_is_decoder(const AVCodec *codec)
+int av_codec_is_decoder(const AVCodec *avcodec)
{
- return codec && (codec->decode || codec->receive_frame);
+ const FFCodec *const codec = ffcodec(avcodec);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_DECODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_DECODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME);
}
int ff_set_dimensions(AVCodecContext *s, int width, int height)
@@ -256,6 +243,8 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
case AV_PIX_FMT_GBRAP16BE:
w_align = 16; //FIXME assume 16 pixel per macroblock
h_align = 16 * 2; // interlaced needs 2 macroblocks height
+ if (s->codec_id == AV_CODEC_ID_BINKVIDEO)
+ w_align = 16*2;
break;
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUVJ411P:
@@ -288,6 +277,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
h_align = 4;
}
if (s->codec_id == AV_CODEC_ID_JV ||
+ s->codec_id == AV_CODEC_ID_ARGO ||
s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) {
w_align = 8;
h_align = 8;
@@ -316,6 +306,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
h_align = 4;
}
break;
+ case AV_PIX_FMT_BGR0:
+ if (s->codec_id == AV_CODEC_ID_ARGO) {
+ w_align = 8;
+ h_align = 8;
+ }
+ break;
default:
break;
}
@@ -327,6 +323,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
*width = FFALIGN(*width, w_align);
*height = FFALIGN(*height, h_align);
if (s->codec_id == AV_CODEC_ID_H264 || s->lowres ||
+ s->codec_id == AV_CODEC_ID_VC1 || s->codec_id == AV_CODEC_ID_WMV3 ||
s->codec_id == AV_CODEC_ID_VP5 || s->codec_id == AV_CODEC_ID_VP6 ||
s->codec_id == AV_CODEC_ID_VP6F || s->codec_id == AV_CODEC_ID_VP6A
) {
@@ -340,6 +337,9 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
// the next rounded up width is 32
*width = FFMAX(*width, 32);
}
+ if (s->codec_id == AV_CODEC_ID_SVQ3) {
+ *width = FFMAX(*width, 32);
+ }
for (i = 0; i < 4; i++)
linesize_align[i] = STRIDE_ALIGN;
@@ -359,29 +359,17 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
align = FFMAX3(align, linesize_align[1], linesize_align[2]);
*width = FFALIGN(*width, align);
}
-
+#if FF_API_AVCODEC_CHROMA_POS
int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
{
- if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB)
- return AVERROR(EINVAL);
- pos--;
-
- *xpos = (pos&1) * 128;
- *ypos = ((pos>>1)^(pos<4)) * 128;
-
- return 0;
+ return av_chroma_location_enum_to_pos(xpos, ypos, pos);
}
enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
{
- int pos, xout, yout;
-
- for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) {
- if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos)
- return pos;
- }
- return AVCHROMA_LOC_UNSPECIFIED;
+ return av_chroma_location_pos_to_enum(xpos, ypos);
}
+#endif
int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
enum AVSampleFormat sample_fmt, const uint8_t *buf,
@@ -397,8 +385,7 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
planar = av_sample_fmt_is_planar(sample_fmt);
if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
- if (!(frame->extended_data = av_mallocz_array(nb_channels,
- sizeof(*frame->extended_data))))
+ if (!FF_ALLOCZ_TYPED_ARRAY(frame->extended_data, nb_channels))
return AVERROR(ENOMEM);
} else {
frame->extended_data = frame->data;
@@ -448,742 +435,14 @@ void ff_color_frame(AVFrame *frame, const int c[4])
}
}
-int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- int r = func(c, (char *)arg + i * size);
- if (ret)
- ret[i] = r;
- }
- emms_c();
- return 0;
-}
-
-int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- int r = func(c, arg, i, 0);
- if (ret)
- ret[i] = r;
- }
- emms_c();
- return 0;
-}
-
-enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
- unsigned int fourcc)
-{
- while (tags->pix_fmt >= 0) {
- if (tags->fourcc == fourcc)
- return tags->pix_fmt;
- tags++;
- }
- return AV_PIX_FMT_NONE;
-}
-
-#if FF_API_CODEC_GET_SET
-MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase)
-MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor)
-MAKE_ACCESSORS(AVCodecContext, codec, int, lowres)
-MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll)
-MAKE_ACCESSORS(AVCodecContext, codec, uint16_t*, chroma_intra_matrix)
-
-unsigned av_codec_get_codec_properties(const AVCodecContext *codec)
-{
- return codec->properties;
-}
-
-int av_codec_get_max_lowres(const AVCodec *codec)
-{
- return codec->max_lowres;
-}
-#endif
-
int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){
- return !!(codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
-}
-
-static int64_t get_bit_rate(AVCodecContext *ctx)
-{
- int64_t bit_rate;
- int bits_per_sample;
-
- switch (ctx->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- case AVMEDIA_TYPE_DATA:
- case AVMEDIA_TYPE_SUBTITLE:
- case AVMEDIA_TYPE_ATTACHMENT:
- bit_rate = ctx->bit_rate;
- break;
- case AVMEDIA_TYPE_AUDIO:
- bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
- if (bits_per_sample) {
- bit_rate = ctx->sample_rate * (int64_t)ctx->channels;
- if (bit_rate > INT64_MAX / bits_per_sample) {
- bit_rate = 0;
- } else
- bit_rate *= bits_per_sample;
- } else
- bit_rate = ctx->bit_rate;
- break;
- default:
- bit_rate = 0;
- break;
- }
- return bit_rate;
-}
-
-
-static void ff_lock_avcodec(AVCodecContext *log_ctx, const AVCodec *codec)
-{
- if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init)
- ff_mutex_lock(&codec_mutex);
-}
-
-static void ff_unlock_avcodec(const AVCodec *codec)
-{
- if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init)
- ff_mutex_unlock(&codec_mutex);
-}
-
-int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
-{
- int ret = 0;
-
- ff_unlock_avcodec(codec);
-
- ret = avcodec_open2(avctx, codec, options);
-
- ff_lock_avcodec(avctx, codec);
- return ret;
-}
-
-int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
-{
- int ret = 0;
- int codec_init_ok = 0;
- AVDictionary *tmp = NULL;
- const AVPixFmtDescriptor *pixdesc;
-
- if (avcodec_is_open(avctx))
- return 0;
-
- if ((!codec && !avctx->codec)) {
- av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n");
- return AVERROR(EINVAL);
- }
- if ((codec && avctx->codec && codec != avctx->codec)) {
- av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
- "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name);
- return AVERROR(EINVAL);
- }
- if (!codec)
- codec = avctx->codec;
-
- if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
- return AVERROR(EINVAL);
-
- if (options)
- av_dict_copy(&tmp, *options, 0);
-
- ff_lock_avcodec(avctx, codec);
-
- avctx->internal = av_mallocz(sizeof(*avctx->internal));
- if (!avctx->internal) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
-
- avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool));
- if (!avctx->internal->pool) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->to_free = av_frame_alloc();
- if (!avctx->internal->to_free) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->compat_decode_frame = av_frame_alloc();
- if (!avctx->internal->compat_decode_frame) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->buffer_frame = av_frame_alloc();
- if (!avctx->internal->buffer_frame) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->buffer_pkt = av_packet_alloc();
- if (!avctx->internal->buffer_pkt) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->ds.in_pkt = av_packet_alloc();
- if (!avctx->internal->ds.in_pkt) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->last_pkt_props = av_packet_alloc();
- if (!avctx->internal->last_pkt_props) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-
- avctx->internal->skip_samples_multiplier = 1;
-
- if (codec->priv_data_size > 0) {
- if (!avctx->priv_data) {
- avctx->priv_data = av_mallocz(codec->priv_data_size);
- if (!avctx->priv_data) {
- ret = AVERROR(ENOMEM);
- goto end;
- }
- if (codec->priv_class) {
- *(const AVClass **)avctx->priv_data = codec->priv_class;
- av_opt_set_defaults(avctx->priv_data);
- }
- }
- if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
- goto free_and_end;
- } else {
- avctx->priv_data = NULL;
- }
- if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
- goto free_and_end;
-
- if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
- av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
-
- // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions
- if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height &&
- (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) {
- if (avctx->coded_width && avctx->coded_height)
- ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
- else if (avctx->width && avctx->height)
- ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
- if (ret < 0)
- goto free_and_end;
- }
-
- if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
- && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0
- || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) {
- av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n");
- ff_set_dimensions(avctx, 0, 0);
- }
-
- if (avctx->width > 0 && avctx->height > 0) {
- if (av_image_check_sar(avctx->width, avctx->height,
- avctx->sample_aspect_ratio) < 0) {
- av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
- avctx->sample_aspect_ratio.num,
- avctx->sample_aspect_ratio.den);
- avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
- }
- }
-
- /* if the decoder init function was already called previously,
- * free the already allocated subtitle_header before overwriting it */
- if (av_codec_is_decoder(codec))
- av_freep(&avctx->subtitle_header);
-
- if (avctx->channels > FF_SANE_NB_CHANNELS || avctx->channels < 0) {
- av_log(avctx, AV_LOG_ERROR, "Too many or invalid channels: %d\n", avctx->channels);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->sample_rate < 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid sample rate: %d\n", avctx->sample_rate);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->block_align < 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid block align: %d\n", avctx->block_align);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
-
- avctx->codec = codec;
- if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
- avctx->codec_id == AV_CODEC_ID_NONE) {
- avctx->codec_type = codec->type;
- avctx->codec_id = codec->id;
- }
- if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
- && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
- av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- avctx->frame_number = 0;
- avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
-
- if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
- avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
- const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
- AVCodec *codec2;
- av_log(avctx, AV_LOG_ERROR,
- "The %s '%s' is experimental but experimental codecs are not enabled, "
- "add '-strict %d' if you want to use it.\n",
- codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
- codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
- if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
- av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
- codec_string, codec2->name);
- ret = AVERROR_EXPERIMENTAL;
- goto free_and_end;
- }
-
- if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
- (!avctx->time_base.num || !avctx->time_base.den)) {
- avctx->time_base.num = 1;
- avctx->time_base.den = avctx->sample_rate;
- }
-
- if (!HAVE_THREADS)
- av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n");
-
- if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) {
- ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem
- ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL);
- ff_lock_avcodec(avctx, codec);
- if (ret < 0)
- goto free_and_end;
- }
-
- if (av_codec_is_decoder(avctx->codec)) {
- ret = ff_decode_bsfs_init(avctx);
- if (ret < 0)
- goto free_and_end;
- }
-
- if (HAVE_THREADS
- && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) {
- ret = ff_thread_init(avctx);
- if (ret < 0) {
- goto free_and_end;
- }
- }
- if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS))
- avctx->thread_count = 1;
-
- if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
- av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
- avctx->codec->max_lowres);
- avctx->lowres = avctx->codec->max_lowres;
- }
-
- if (av_codec_is_encoder(avctx->codec)) {
- int i;
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- avctx->coded_frame = av_frame_alloc();
- if (!avctx->coded_frame) {
- ret = AVERROR(ENOMEM);
- goto free_and_end;
- }
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
- if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
- av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
-
- if (avctx->codec->sample_fmts) {
- for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
- if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
- break;
- if (avctx->channels == 1 &&
- av_get_planar_sample_fmt(avctx->sample_fmt) ==
- av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
- avctx->sample_fmt = avctx->codec->sample_fmts[i];
- break;
- }
- }
- if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
- char buf[128];
- snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
- av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
- (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- }
- if (avctx->codec->pix_fmts) {
- for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
- if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
- break;
- if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
- && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG)
- && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
- char buf[128];
- snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
- av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
- (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
- avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
- avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
- avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
- avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
- avctx->color_range = AVCOL_RANGE_JPEG;
- }
- if (avctx->codec->supported_samplerates) {
- for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
- if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
- break;
- if (avctx->codec->supported_samplerates[i] == 0) {
- av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
- avctx->sample_rate);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- }
- if (avctx->sample_rate < 0) {
- av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
- avctx->sample_rate);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->codec->channel_layouts) {
- if (!avctx->channel_layout) {
- av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
- } else {
- for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
- if (avctx->channel_layout == avctx->codec->channel_layouts[i])
- break;
- if (avctx->codec->channel_layouts[i] == 0) {
- char buf[512];
- av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
- av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- }
- }
- if (avctx->channel_layout && avctx->channels) {
- int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
- if (channels != avctx->channels) {
- char buf[512];
- av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
- av_log(avctx, AV_LOG_ERROR,
- "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
- buf, channels, avctx->channels);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- } else if (avctx->channel_layout) {
- avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
- }
- if (avctx->channels < 0) {
- av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
- avctx->channels);
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
- if ( avctx->bits_per_raw_sample < 0
- || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
- av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
- avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
- avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
- }
- if (avctx->width <= 0 || avctx->height <= 0) {
- av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- }
- if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
- && avctx->bit_rate>0 && avctx->bit_rate<1000) {
- av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
- }
-
- if (!avctx->rc_initial_buffer_occupancy)
- avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
-
- if (avctx->ticks_per_frame && avctx->time_base.num &&
- avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
- av_log(avctx, AV_LOG_ERROR,
- "ticks_per_frame %d too large for the timebase %d/%d.",
- avctx->ticks_per_frame,
- avctx->time_base.num,
- avctx->time_base.den);
- goto free_and_end;
- }
-
- if (avctx->hw_frames_ctx) {
- AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
- if (frames_ctx->format != avctx->pix_fmt) {
- av_log(avctx, AV_LOG_ERROR,
- "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
- avctx->sw_pix_fmt != frames_ctx->sw_format) {
- av_log(avctx, AV_LOG_ERROR,
- "Mismatching AVCodecContext.sw_pix_fmt (%s) "
- "and AVHWFramesContext.sw_format (%s)\n",
- av_get_pix_fmt_name(avctx->sw_pix_fmt),
- av_get_pix_fmt_name(frames_ctx->sw_format));
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- avctx->sw_pix_fmt = frames_ctx->sw_format;
- }
- }
-
- avctx->pts_correction_num_faulty_pts =
- avctx->pts_correction_num_faulty_dts = 0;
- avctx->pts_correction_last_pts =
- avctx->pts_correction_last_dts = INT64_MIN;
-
- if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
- && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
- av_log(avctx, AV_LOG_WARNING,
- "gray decoding requested but not enabled at configuration time\n");
-
- if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME)
- || avctx->internal->frame_thread_encoder)) {
- ret = avctx->codec->init(avctx);
- if (ret < 0) {
- goto free_and_end;
- }
- codec_init_ok = 1;
- }
-
- ret=0;
-
- if (av_codec_is_decoder(avctx->codec)) {
- if (!avctx->bit_rate)
- avctx->bit_rate = get_bit_rate(avctx);
- /* validate channel layout from the decoder */
- if (avctx->channel_layout) {
- int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
- if (!avctx->channels)
- avctx->channels = channels;
- else if (channels != avctx->channels) {
- char buf[512];
- av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
- av_log(avctx, AV_LOG_WARNING,
- "Channel layout '%s' with %d channels does not match specified number of channels %d: "
- "ignoring specified channel layout\n",
- buf, channels, avctx->channels);
- avctx->channel_layout = 0;
- }
- }
- if (avctx->channels && avctx->channels < 0 ||
- avctx->channels > FF_SANE_NB_CHANNELS) {
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->bits_per_coded_sample < 0) {
- ret = AVERROR(EINVAL);
- goto free_and_end;
- }
- if (avctx->sub_charenc) {
- if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
- av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
- "supported with subtitles codecs\n");
- ret = AVERROR(EINVAL);
- goto free_and_end;
- } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
- av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
- "subtitles character encoding will be ignored\n",
- avctx->codec_descriptor->name);
- avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
- } else {
- /* input character encoding is set for a text based subtitle
- * codec at this point */
- if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
- avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
-
- if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
-#if CONFIG_ICONV
- iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
- if (cd == (iconv_t)-1) {
- ret = AVERROR(errno);
- av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
- "with input character encoding \"%s\"\n", avctx->sub_charenc);
- goto free_and_end;
- }
- iconv_close(cd);
-#else
- av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
- "conversion needs a libavcodec built with iconv support "
- "for this codec\n");
- ret = AVERROR(ENOSYS);
- goto free_and_end;
-#endif
- }
- }
- }
-
-#if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
-#endif
- }
- if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) {
- av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
- }
-
-end:
- ff_unlock_avcodec(codec);
- if (options) {
- av_dict_free(options);
- *options = tmp;
- }
-
- return ret;
-free_and_end:
- if (avctx->codec && avctx->codec->close &&
- (codec_init_ok ||
- (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP)))
- avctx->codec->close(avctx);
-
- if (HAVE_THREADS && avctx->internal->thread_ctx)
- ff_thread_free(avctx);
-
- if (codec->priv_class && codec->priv_data_size)
- av_opt_free(avctx->priv_data);
- av_opt_free(avctx);
-
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- av_frame_free(&avctx->coded_frame);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
- av_dict_free(&tmp);
- av_freep(&avctx->priv_data);
- av_freep(&avctx->subtitle_header);
- if (avctx->internal) {
- av_frame_free(&avctx->internal->to_free);
- av_frame_free(&avctx->internal->compat_decode_frame);
- av_frame_free(&avctx->internal->buffer_frame);
- av_packet_free(&avctx->internal->buffer_pkt);
- av_packet_free(&avctx->internal->last_pkt_props);
-
- av_packet_free(&avctx->internal->ds.in_pkt);
- ff_decode_bsfs_uninit(avctx);
-
- av_freep(&avctx->internal->pool);
- }
- av_freep(&avctx->internal);
- avctx->codec = NULL;
- goto end;
-}
-
-void avsubtitle_free(AVSubtitle *sub)
-{
- int i;
-
- for (i = 0; i < sub->num_rects; i++) {
- av_freep(&sub->rects[i]->data[0]);
- av_freep(&sub->rects[i]->data[1]);
- av_freep(&sub->rects[i]->data[2]);
- av_freep(&sub->rects[i]->data[3]);
- av_freep(&sub->rects[i]->text);
- av_freep(&sub->rects[i]->ass);
- av_freep(&sub->rects[i]);
- }
-
- av_freep(&sub->rects);
-
- memset(sub, 0, sizeof(*sub));
-}
-
-av_cold int avcodec_close(AVCodecContext *avctx)
-{
- int i;
-
- if (!avctx)
- return 0;
-
- if (avcodec_is_open(avctx)) {
- FramePool *pool = avctx->internal->pool;
- if (CONFIG_FRAME_THREAD_ENCODER &&
- avctx->internal->frame_thread_encoder && avctx->thread_count > 1) {
- ff_frame_thread_encoder_free(avctx);
- }
- if (HAVE_THREADS && avctx->internal->thread_ctx)
- ff_thread_free(avctx);
- if (avctx->codec && avctx->codec->close)
- avctx->codec->close(avctx);
- avctx->internal->byte_buffer_size = 0;
- av_freep(&avctx->internal->byte_buffer);
- av_frame_free(&avctx->internal->to_free);
- av_frame_free(&avctx->internal->compat_decode_frame);
- av_frame_free(&avctx->internal->buffer_frame);
- av_packet_free(&avctx->internal->buffer_pkt);
- av_packet_free(&avctx->internal->last_pkt_props);
-
- av_packet_free(&avctx->internal->ds.in_pkt);
-
- for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
- av_buffer_pool_uninit(&pool->pools[i]);
- av_freep(&avctx->internal->pool);
-
- if (avctx->hwaccel && avctx->hwaccel->uninit)
- avctx->hwaccel->uninit(avctx);
- av_freep(&avctx->internal->hwaccel_priv_data);
-
- ff_decode_bsfs_uninit(avctx);
-
- av_freep(&avctx->internal);
- }
-
- for (i = 0; i < avctx->nb_coded_side_data; i++)
- av_freep(&avctx->coded_side_data[i].data);
- av_freep(&avctx->coded_side_data);
- avctx->nb_coded_side_data = 0;
-
- av_buffer_unref(&avctx->hw_frames_ctx);
- av_buffer_unref(&avctx->hw_device_ctx);
-
- if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
- av_opt_free(avctx->priv_data);
- av_opt_free(avctx);
- av_freep(&avctx->priv_data);
- if (av_codec_is_encoder(avctx->codec)) {
- av_freep(&avctx->extradata);
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- av_frame_free(&avctx->coded_frame);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- }
- avctx->codec = NULL;
- avctx->active_thread_type = 0;
-
- return 0;
+ return !!(ffcodec(codec)->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
}
const char *avcodec_get_name(enum AVCodecID id)
{
const AVCodecDescriptor *cd;
- AVCodec *codec;
+ const AVCodec *codec;
if (id == AV_CODEC_ID_NONE)
return "none";
@@ -1200,221 +459,6 @@ const char *avcodec_get_name(enum AVCodecID id)
return "unknown_codec";
}
-size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag)
-{
- int i, len, ret = 0;
-
-#define TAG_PRINT(x) \
- (((x) >= '0' && (x) <= '9') || \
- ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \
- ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_'))
-
- for (i = 0; i < 4; i++) {
- len = snprintf(buf, buf_size,
- TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF);
- buf += len;
- buf_size = buf_size > len ? buf_size - len : 0;
- ret += len;
- codec_tag >>= 8;
- }
- return ret;
-}
-
-void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
-{
- const char *codec_type;
- const char *codec_name;
- const char *profile = NULL;
- int64_t bitrate;
- int new_line = 0;
- AVRational display_aspect_ratio;
- const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", ";
-
- if (!buf || buf_size <= 0)
- return;
- codec_type = av_get_media_type_string(enc->codec_type);
- codec_name = avcodec_get_name(enc->codec_id);
- profile = avcodec_profile_name(enc->codec_id, enc->profile);
-
- snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown",
- codec_name);
- buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
-
- if (enc->codec && strcmp(enc->codec->name, codec_name))
- snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name);
-
- if (profile)
- snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile);
- if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
- && av_log_get_level() >= AV_LOG_VERBOSE
- && enc->refs)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %d reference frame%s",
- enc->refs, enc->refs > 1 ? "s" : "");
-
- if (enc->codec_tag)
- snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s / 0x%04X)",
- av_fourcc2str(enc->codec_tag), enc->codec_tag);
-
- switch (enc->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- {
- char detail[256] = "(";
-
- av_strlcat(buf, separator, buf_size);
-
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- "%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
- av_get_pix_fmt_name(enc->pix_fmt));
- if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
- enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
- av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample);
- if (enc->color_range != AVCOL_RANGE_UNSPECIFIED)
- av_strlcatf(detail, sizeof(detail), "%s, ",
- av_color_range_name(enc->color_range));
-
- if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
- enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
- enc->color_trc != AVCOL_TRC_UNSPECIFIED) {
- if (enc->colorspace != (int)enc->color_primaries ||
- enc->colorspace != (int)enc->color_trc) {
- new_line = 1;
- av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ",
- av_color_space_name(enc->colorspace),
- av_color_primaries_name(enc->color_primaries),
- av_color_transfer_name(enc->color_trc));
- } else
- av_strlcatf(detail, sizeof(detail), "%s, ",
- av_get_colorspace_name(enc->colorspace));
- }
-
- if (enc->field_order != AV_FIELD_UNKNOWN) {
- const char *field_order = "progressive";
- if (enc->field_order == AV_FIELD_TT)
- field_order = "top first";
- else if (enc->field_order == AV_FIELD_BB)
- field_order = "bottom first";
- else if (enc->field_order == AV_FIELD_TB)
- field_order = "top coded first (swapped)";
- else if (enc->field_order == AV_FIELD_BT)
- field_order = "bottom coded first (swapped)";
-
- av_strlcatf(detail, sizeof(detail), "%s, ", field_order);
- }
-
- if (av_log_get_level() >= AV_LOG_VERBOSE &&
- enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED)
- av_strlcatf(detail, sizeof(detail), "%s, ",
- av_chroma_location_name(enc->chroma_sample_location));
-
- if (strlen(detail) > 1) {
- detail[strlen(detail) - 2] = 0;
- av_strlcatf(buf, buf_size, "%s)", detail);
- }
- }
-
- if (enc->width) {
- av_strlcat(buf, new_line ? separator : ", ", buf_size);
-
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- "%dx%d",
- enc->width, enc->height);
-
- if (av_log_get_level() >= AV_LOG_VERBOSE &&
- (enc->width != enc->coded_width ||
- enc->height != enc->coded_height))
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- " (%dx%d)", enc->coded_width, enc->coded_height);
-
- if (enc->sample_aspect_ratio.num) {
- av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
- enc->width * (int64_t)enc->sample_aspect_ratio.num,
- enc->height * (int64_t)enc->sample_aspect_ratio.den,
- 1024 * 1024);
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- " [SAR %d:%d DAR %d:%d]",
- enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
- display_aspect_ratio.num, display_aspect_ratio.den);
- }
- if (av_log_get_level() >= AV_LOG_DEBUG) {
- int g = av_gcd(enc->time_base.num, enc->time_base.den);
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %d/%d",
- enc->time_base.num / g, enc->time_base.den / g);
- }
- }
- if (encode) {
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", q=%d-%d", enc->qmin, enc->qmax);
- } else {
- if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", Closed Captions");
- if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", lossless");
- }
- break;
- case AVMEDIA_TYPE_AUDIO:
- av_strlcat(buf, separator, buf_size);
-
- if (enc->sample_rate) {
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- "%d Hz, ", enc->sample_rate);
- }
- av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout);
- if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) {
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %s", av_get_sample_fmt_name(enc->sample_fmt));
- }
- if ( enc->bits_per_raw_sample > 0
- && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- " (%d bit)", enc->bits_per_raw_sample);
- if (av_log_get_level() >= AV_LOG_VERBOSE) {
- if (enc->initial_padding)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", delay %d", enc->initial_padding);
- if (enc->trailing_padding)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", padding %d", enc->trailing_padding);
- }
- break;
- case AVMEDIA_TYPE_DATA:
- if (av_log_get_level() >= AV_LOG_DEBUG) {
- int g = av_gcd(enc->time_base.num, enc->time_base.den);
- if (g)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %d/%d",
- enc->time_base.num / g, enc->time_base.den / g);
- }
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- if (enc->width)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %dx%d", enc->width, enc->height);
- break;
- default:
- return;
- }
- if (encode) {
- if (enc->flags & AV_CODEC_FLAG_PASS1)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", pass 1");
- if (enc->flags & AV_CODEC_FLAG_PASS2)
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", pass 2");
- }
- bitrate = get_bit_rate(enc);
- if (bitrate != 0) {
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", %"PRId64" kb/s", bitrate / 1000);
- } else if (enc->rc_max_rate > 0) {
- snprintf(buf + strlen(buf), buf_size - strlen(buf),
- ", max. %"PRId64" kb/s", enc->rc_max_rate / 1000);
- }
-}
-
const char *av_get_profile_name(const AVCodec *codec, int profile)
{
const AVProfile *p;
@@ -1443,37 +487,23 @@ const char *avcodec_profile_name(enum AVCodecID codec_id, int profile)
return NULL;
}
-unsigned avcodec_version(void)
-{
- av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563);
- av_assert0(AV_CODEC_ID_ADPCM_G722==69660);
- av_assert0(AV_CODEC_ID_SRT==94216);
- av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
-
- return LIBAVCODEC_VERSION_INT;
-}
-
-const char *avcodec_configuration(void)
-{
- return FFMPEG_CONFIGURATION;
-}
-
-const char *avcodec_license(void)
-{
-#define LICENSE_PREFIX "libavcodec license: "
- return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
-}
-
int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
{
switch (codec_id) {
+ case AV_CODEC_ID_DFPWM:
+ return 1;
case AV_CODEC_ID_8SVX_EXP:
case AV_CODEC_ID_8SVX_FIB:
+ case AV_CODEC_ID_ADPCM_ARGO:
case AV_CODEC_ID_ADPCM_CT:
+ case AV_CODEC_ID_ADPCM_IMA_ALP:
+ case AV_CODEC_ID_ADPCM_IMA_AMV:
case AV_CODEC_ID_ADPCM_IMA_APC:
+ case AV_CODEC_ID_ADPCM_IMA_APM:
case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
case AV_CODEC_ID_ADPCM_IMA_OKI:
case AV_CODEC_ID_ADPCM_IMA_WS:
+ case AV_CODEC_ID_ADPCM_IMA_SSI:
case AV_CODEC_ID_ADPCM_G722:
case AV_CODEC_ID_ADPCM_YAMAHA:
case AV_CODEC_ID_ADPCM_AICA:
@@ -1487,9 +517,12 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
case AV_CODEC_ID_PCM_VIDC:
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_S8_PLANAR:
+ case AV_CODEC_ID_PCM_SGA:
case AV_CODEC_ID_PCM_U8:
- case AV_CODEC_ID_PCM_ZORK:
case AV_CODEC_ID_SDX2_DPCM:
+ case AV_CODEC_ID_CBD2_DPCM:
+ case AV_CODEC_ID_DERF_DPCM:
+ case AV_CODEC_ID_WADY_DPCM:
return 8;
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S16BE_PLANAR:
@@ -1527,7 +560,7 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
{
- static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = {
+ static const enum AVCodecID map[][2] = {
[AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 },
[AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE },
[AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE },
@@ -1540,7 +573,7 @@ enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
[AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE },
[AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE },
};
- if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB)
+ if (fmt < 0 || fmt >= FF_ARRAY_ELEMS(map))
return AV_CODEC_ID_NONE;
if (be < 0 || be > 1)
be = AV_NE(1, 0);
@@ -1550,6 +583,8 @@ enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
int av_get_bits_per_sample(enum AVCodecID codec_id)
{
switch (codec_id) {
+ case AV_CODEC_ID_DFPWM:
+ return 1;
case AV_CODEC_ID_ADPCM_SBPRO_2:
return 2;
case AV_CODEC_ID_ADPCM_SBPRO_3:
@@ -1600,6 +635,7 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MUSEPACK7: return 1152;
case AV_CODEC_ID_AC3: return 1536;
+ case AV_CODEC_ID_FTR: return 1024;
}
if (sr > 0) {
@@ -1608,14 +644,10 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
return 256 * sr / 245;
else if (id == AV_CODEC_ID_DST)
return 588 * sr / 44100;
-
- if (ch > 0) {
- /* calc from sample rate and channels */
- if (id == AV_CODEC_ID_BINKAUDIO_DCT) {
- if (sr / 22050 > 22)
- return 0;
- return (480 << (sr / 22050)) / ch;
- }
+ else if (id == AV_CODEC_ID_BINKAUDIO_DCT) {
+ if (sr / 22050 > 22)
+ return 0;
+ return (480 << (sr / 22050));
}
if (id == AV_CODEC_ID_MP3)
@@ -1647,6 +679,10 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
return 256 * (frame_bytes / 64);
if (id == AV_CODEC_ID_RA_144)
return 160 * (frame_bytes / 20);
+ if (id == AV_CODEC_ID_APTX)
+ return 4 * (frame_bytes / 4);
+ if (id == AV_CODEC_ID_APTX_HD)
+ return 4 * (frame_bytes / 6);
if (bps > 0) {
/* calc from frame_bytes and bits_per_coded_sample */
@@ -1657,6 +693,10 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
if (ch > 0 && ch < INT_MAX/16) {
/* calc from frame_bytes and channels */
switch (id) {
+ case AV_CODEC_ID_FASTAUDIO:
+ return frame_bytes / (40 * ch) * 256;
+ case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
+ return (frame_bytes - 4 * ch) / (128 * ch) * 256;
case AV_CODEC_ID_ADPCM_AFC:
return frame_bytes / (9 * ch) * 16;
case AV_CODEC_ID_ADPCM_PSX:
@@ -1666,13 +706,14 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
return 0;
return frame_bytes * 28;
case AV_CODEC_ID_ADPCM_4XM:
+ case AV_CODEC_ID_ADPCM_IMA_ACORN:
case AV_CODEC_ID_ADPCM_IMA_DAT4:
case AV_CODEC_ID_ADPCM_IMA_ISS:
return (frame_bytes - 4 * ch) * 2 / ch;
case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
return (frame_bytes - 4) * 2 / ch;
case AV_CODEC_ID_ADPCM_IMA_AMV:
- return (frame_bytes - 8) * 2 / ch;
+ return (frame_bytes - 8) * 2;
case AV_CODEC_ID_ADPCM_THP:
case AV_CODEC_ID_ADPCM_THP_LE:
if (extradata)
@@ -1732,6 +773,9 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
case AV_CODEC_ID_ADPCM_MTAF:
tmp = blocks * (ba - 16LL) * 2 / ch;
break;
+ case AV_CODEC_ID_ADPCM_XMD:
+ tmp = blocks * 32;
+ break;
}
if (tmp) {
if (tmp != (int)tmp)
@@ -1774,8 +818,16 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
{
- int duration = get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
- avctx->channels, avctx->block_align,
+ int channels = avctx->ch_layout.nb_channels;
+ int duration;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels)
+ channels = avctx->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ duration = get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
+ channels, avctx->block_align,
avctx->codec_tag, avctx->bits_per_coded_sample,
avctx->bit_rate, avctx->extradata, avctx->frame_size,
frame_bytes);
@@ -1784,8 +836,16 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
{
- int duration = get_audio_frame_duration(par->codec_id, par->sample_rate,
- par->channels, par->block_align,
+ int channels = par->ch_layout.nb_channels;
+ int duration;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels)
+ channels = par->channels;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ duration = get_audio_frame_duration(par->codec_id, par->sample_rate,
+ channels, par->block_align,
par->codec_tag, par->bits_per_coded_sample,
par->bit_rate, par->extradata, par->frame_size,
frame_bytes);
@@ -1821,8 +881,9 @@ int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
return i;
}
-const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index)
+const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *avcodec, int index)
{
+ const FFCodec *const codec = ffcodec(avcodec);
int i;
if (!codec->hw_configs || index < 0)
return NULL;
@@ -1832,33 +893,7 @@ const AVCodecHWConfig *avcodec_get_hw_config(const AVCodec *codec, int index)
return &codec->hw_configs[index]->public;
}
-#if FF_API_USER_VISIBLE_AVHWACCEL
-AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel)
-{
- return NULL;
-}
-
-void av_register_hwaccel(AVHWAccel *hwaccel)
-{
-}
-#endif
-
-#if FF_API_LOCKMGR
-int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
-{
- return 0;
-}
-#endif
-
-unsigned int avpriv_toupper4(unsigned int x)
-{
- return av_toupper(x & 0xFF) +
- (av_toupper((x >> 8) & 0xFF) << 8) +
- (av_toupper((x >> 16) & 0xFF) << 16) +
-((unsigned)av_toupper((x >> 24) & 0xFF) << 24);
-}
-
-int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
{
int ret;
@@ -1873,7 +908,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
if (src->progress &&
!(dst->progress = av_buffer_ref(src->progress))) {
- ff_thread_release_buffer(dst->owner[0], dst);
+ ff_thread_release_ext_buffer(dst->owner[0], dst);
return AVERROR(ENOMEM);
}
@@ -1882,19 +917,26 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
#if !HAVE_THREADS
-enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
- return ff_get_format(avctx, fmt);
+ return ff_get_buffer(avctx, f, flags);
}
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{
f->owner[0] = f->owner[1] = avctx;
return ff_get_buffer(avctx, f->f, flags);
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
+{
+ if (f)
+ av_frame_unref(f);
+}
+
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
+ f->owner[0] = f->owner[1] = NULL;
if (f->f)
av_frame_unref(f->f);
}
@@ -1907,7 +949,7 @@ void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
{
}
-void ff_thread_await_progress(ThreadFrame *f, int progress, int field)
+void ff_thread_await_progress(const ThreadFrame *f, int progress, int field)
{
}
@@ -1916,13 +958,14 @@ int ff_thread_can_start_frame(AVCodecContext *avctx)
return 1;
}
-int ff_alloc_entries(AVCodecContext *avctx, int count)
+int ff_slice_thread_init_progress(AVCodecContext *avctx)
{
return 0;
}
-void ff_reset_entries(AVCodecContext *avctx)
+int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
{
+ return 0;
}
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
@@ -1935,34 +978,6 @@ void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, in
#endif
-int avcodec_is_open(AVCodecContext *s)
-{
- return !!s->internal;
-}
-
-int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
-{
- int ret;
- char *str;
-
- ret = av_bprint_finalize(buf, &str);
- if (ret < 0)
- return ret;
- if (!av_bprint_is_complete(buf)) {
- av_free(str);
- return AVERROR(ENOMEM);
- }
-
- avctx->extradata = str;
- /* Note: the string is NUL terminated (so extradata can be read as a
- * string), but the ending character is not accounted in the size (in
- * binary formats you are likely not supposed to mux that character). When
- * extradata is copied, it is also padded with AV_INPUT_BUFFER_PADDING_SIZE
- * zeros. */
- avctx->extradata_size = buf->len;
- return 0;
-}
-
const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p,
const uint8_t *end,
uint32_t *av_restrict state)
@@ -2015,6 +1030,11 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx)
AVPacketSideData *tmp;
AVCPBProperties *props;
size_t size;
+ int i;
+
+ for (i = 0; i < avctx->nb_coded_side_data; i++)
+ if (avctx->coded_side_data[i].type == AV_PKT_DATA_CPB_PROPERTIES)
+ return (AVCPBProperties *)avctx->coded_side_data[i].data;
props = av_cpb_properties_alloc(&size);
if (!props)
@@ -2036,218 +1056,75 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx)
return props;
}
-static void codec_parameters_reset(AVCodecParameters *par)
+static unsigned bcd2uint(uint8_t bcd)
{
- av_freep(&par->extradata);
-
- memset(par, 0, sizeof(*par));
-
- par->codec_type = AVMEDIA_TYPE_UNKNOWN;
- par->codec_id = AV_CODEC_ID_NONE;
- par->format = -1;
- par->field_order = AV_FIELD_UNKNOWN;
- par->color_range = AVCOL_RANGE_UNSPECIFIED;
- par->color_primaries = AVCOL_PRI_UNSPECIFIED;
- par->color_trc = AVCOL_TRC_UNSPECIFIED;
- par->color_space = AVCOL_SPC_UNSPECIFIED;
- par->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
- par->sample_aspect_ratio = (AVRational){ 0, 1 };
- par->profile = FF_PROFILE_UNKNOWN;
- par->level = FF_LEVEL_UNKNOWN;
-}
-
-AVCodecParameters *avcodec_parameters_alloc(void)
-{
- AVCodecParameters *par = av_mallocz(sizeof(*par));
-
- if (!par)
- return NULL;
- codec_parameters_reset(par);
- return par;
-}
-
-void avcodec_parameters_free(AVCodecParameters **ppar)
-{
- AVCodecParameters *par = *ppar;
-
- if (!par)
- return;
- codec_parameters_reset(par);
-
- av_freep(ppar);
-}
-
-int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
-{
- codec_parameters_reset(dst);
- memcpy(dst, src, sizeof(*dst));
-
- dst->extradata = NULL;
- dst->extradata_size = 0;
- if (src->extradata) {
- dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (!dst->extradata)
- return AVERROR(ENOMEM);
- memcpy(dst->extradata, src->extradata, src->extradata_size);
- dst->extradata_size = src->extradata_size;
- }
-
- return 0;
-}
-
-int avcodec_parameters_from_context(AVCodecParameters *par,
- const AVCodecContext *codec)
-{
- codec_parameters_reset(par);
-
- par->codec_type = codec->codec_type;
- par->codec_id = codec->codec_id;
- par->codec_tag = codec->codec_tag;
-
- par->bit_rate = codec->bit_rate;
- par->bits_per_coded_sample = codec->bits_per_coded_sample;
- par->bits_per_raw_sample = codec->bits_per_raw_sample;
- par->profile = codec->profile;
- par->level = codec->level;
-
- switch (par->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- par->format = codec->pix_fmt;
- par->width = codec->width;
- par->height = codec->height;
- par->field_order = codec->field_order;
- par->color_range = codec->color_range;
- par->color_primaries = codec->color_primaries;
- par->color_trc = codec->color_trc;
- par->color_space = codec->colorspace;
- par->chroma_location = codec->chroma_sample_location;
- par->sample_aspect_ratio = codec->sample_aspect_ratio;
- par->video_delay = codec->has_b_frames;
- break;
- case AVMEDIA_TYPE_AUDIO:
- par->format = codec->sample_fmt;
- par->channel_layout = codec->channel_layout;
- par->channels = codec->channels;
- par->sample_rate = codec->sample_rate;
- par->block_align = codec->block_align;
- par->frame_size = codec->frame_size;
- par->initial_padding = codec->initial_padding;
- par->trailing_padding = codec->trailing_padding;
- par->seek_preroll = codec->seek_preroll;
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- par->width = codec->width;
- par->height = codec->height;
- break;
- }
-
- if (codec->extradata) {
- par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (!par->extradata)
- return AVERROR(ENOMEM);
- memcpy(par->extradata, codec->extradata, codec->extradata_size);
- par->extradata_size = codec->extradata_size;
- }
-
- return 0;
-}
-
-int avcodec_parameters_to_context(AVCodecContext *codec,
- const AVCodecParameters *par)
-{
- codec->codec_type = par->codec_type;
- codec->codec_id = par->codec_id;
- codec->codec_tag = par->codec_tag;
-
- codec->bit_rate = par->bit_rate;
- codec->bits_per_coded_sample = par->bits_per_coded_sample;
- codec->bits_per_raw_sample = par->bits_per_raw_sample;
- codec->profile = par->profile;
- codec->level = par->level;
-
- switch (par->codec_type) {
- case AVMEDIA_TYPE_VIDEO:
- codec->pix_fmt = par->format;
- codec->width = par->width;
- codec->height = par->height;
- codec->field_order = par->field_order;
- codec->color_range = par->color_range;
- codec->color_primaries = par->color_primaries;
- codec->color_trc = par->color_trc;
- codec->colorspace = par->color_space;
- codec->chroma_sample_location = par->chroma_location;
- codec->sample_aspect_ratio = par->sample_aspect_ratio;
- codec->has_b_frames = par->video_delay;
- break;
- case AVMEDIA_TYPE_AUDIO:
- codec->sample_fmt = par->format;
- codec->channel_layout = par->channel_layout;
- codec->channels = par->channels;
- codec->sample_rate = par->sample_rate;
- codec->block_align = par->block_align;
- codec->frame_size = par->frame_size;
- codec->delay =
- codec->initial_padding = par->initial_padding;
- codec->trailing_padding = par->trailing_padding;
- codec->seek_preroll = par->seek_preroll;
- break;
- case AVMEDIA_TYPE_SUBTITLE:
- codec->width = par->width;
- codec->height = par->height;
- break;
- }
-
- if (par->extradata) {
- av_freep(&codec->extradata);
- codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (!codec->extradata)
- return AVERROR(ENOMEM);
- memcpy(codec->extradata, par->extradata, par->extradata_size);
- codec->extradata_size = par->extradata_size;
- }
-
- return 0;
+ unsigned low = bcd & 0xf;
+ unsigned high = bcd >> 4;
+ if (low > 9 || high > 9)
+ return 0;
+ return low + 10*high;
}
-int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
+int ff_alloc_timecode_sei(const AVFrame *frame, AVRational rate, size_t prefix_len,
void **data, size_t *sei_size)
{
- AVFrameSideData *side_data = NULL;
+ AVFrameSideData *sd = NULL;
uint8_t *sei_data;
+ PutBitContext pb;
+ uint32_t *tc;
+ int m;
if (frame)
- side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_S12M_TIMECODE);
- if (!side_data) {
+ if (!sd) {
*data = NULL;
return 0;
}
+ tc = (uint32_t*)sd->data;
+ m = tc[0] & 3;
- *sei_size = side_data->size + 11;
+ *sei_size = sizeof(uint32_t) * 4;
*data = av_mallocz(*sei_size + prefix_len);
if (!*data)
return AVERROR(ENOMEM);
sei_data = (uint8_t*)*data + prefix_len;
- // country code
- sei_data[0] = 181;
- sei_data[1] = 0;
- sei_data[2] = 49;
-
- /**
- * 'GA94' is standard in North America for ATSC, but hard coding
- * this style may not be the right thing to do -- other formats
- * do exist. This information is not available in the side_data
- * so we are going with this right now.
- */
- AV_WL32(sei_data + 3, MKTAG('G', 'A', '9', '4'));
- sei_data[7] = 3;
- sei_data[8] = ((side_data->size/3) & 0x1f) | 0x40;
- sei_data[9] = 0;
-
- memcpy(sei_data + 10, side_data->data, side_data->size);
-
- sei_data[side_data->size+10] = 255;
+ init_put_bits(&pb, sei_data, *sei_size);
+ put_bits(&pb, 2, m); // num_clock_ts
+
+ for (int j = 1; j <= m; j++) {
+ uint32_t tcsmpte = tc[j];
+ unsigned hh = bcd2uint(tcsmpte & 0x3f); // 6-bit hours
+ unsigned mm = bcd2uint(tcsmpte>>8 & 0x7f); // 7-bit minutes
+ unsigned ss = bcd2uint(tcsmpte>>16 & 0x7f); // 7-bit seconds
+ unsigned ff = bcd2uint(tcsmpte>>24 & 0x3f); // 6-bit frames
+ unsigned drop = tcsmpte & 1<<30 && !0; // 1-bit drop if not arbitrary bit
+
+ /* Calculate frame number of HEVC by SMPTE ST 12-1:2014 Sec 12.2 if rate > 30FPS */
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ unsigned pc;
+ ff *= 2;
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ pc = !!(tcsmpte & 1 << 7);
+ else
+ pc = !!(tcsmpte & 1 << 23);
+ ff = (ff + pc) & 0x7f;
+ }
+
+ put_bits(&pb, 1, 1); // clock_timestamp_flag
+ put_bits(&pb, 1, 1); // units_field_based_flag
+ put_bits(&pb, 5, 0); // counting_type
+ put_bits(&pb, 1, 1); // full_timestamp_flag
+ put_bits(&pb, 1, 0); // discontinuity_flag
+ put_bits(&pb, 1, drop);
+ put_bits(&pb, 9, ff);
+ put_bits(&pb, 6, ss);
+ put_bits(&pb, 6, mm);
+ put_bits(&pb, 5, hh);
+ put_bits(&pb, 5, 0);
+ }
+ flush_put_bits(&pb);
return 0;
}
diff --git a/media/ffvpx/libavcodec/version.c b/media/ffvpx/libavcodec/version.c
new file mode 100644
index 0000000000..d7966b2015
--- /dev/null
+++ b/media/ffvpx/libavcodec/version.c
@@ -0,0 +1,50 @@
+/*
+ * Version functions.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/avassert.h"
+#include "avcodec.h"
+#include "codec_id.h"
+#include "version.h"
+
+#include "libavutil/ffversion.h"
+const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
+
+unsigned avcodec_version(void)
+{
+ av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563);
+ av_assert0(AV_CODEC_ID_ADPCM_G722==69660);
+ av_assert0(AV_CODEC_ID_SRT==94216);
+ av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
+
+ return LIBAVCODEC_VERSION_INT;
+}
+
+const char *avcodec_configuration(void)
+{
+ return FFMPEG_CONFIGURATION;
+}
+
+const char *avcodec_license(void)
+{
+#define LICENSE_PREFIX "libavcodec license: "
+ return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
+}
diff --git a/media/ffvpx/libavcodec/version.h b/media/ffvpx/libavcodec/version.h
index 3331d47300..43794ea588 100644
--- a/media/ffvpx/libavcodec/version.h
+++ b/media/ffvpx/libavcodec/version.h
@@ -27,8 +27,9 @@
#include "libavutil/version.h"
-#define LIBAVCODEC_VERSION_MAJOR 58
-#define LIBAVCODEC_VERSION_MINOR 54
+#include "version_major.h"
+
+#define LIBAVCODEC_VERSION_MINOR 3
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
@@ -41,100 +42,4 @@
#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
-/**
- * FF_API_* defines may be placed below to indicate public API that will be
- * dropped at a future version bump. The defines themselves are not part of
- * the public API and may change, break or disappear at any time.
- *
- * @note, when bumping the major version it is recommended to manually
- * disable each FF_API_* in its own commit instead of disabling them all
- * at once through the bump. This improves the git bisect-ability of the change.
- */
-
-#ifndef FF_API_LOWRES
-#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_DEBUG_MV
-#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58)
-#endif
-#ifndef FF_API_AVCTX_TIMEBASE
-#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_CODED_FRAME
-#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_SIDEDATA_ONLY_PKT
-#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_VDPAU_PROFILE
-#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_CONVERGENCE_DURATION
-#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_AVPICTURE
-#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_AVPACKET_OLD_API
-#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_RTP_CALLBACK
-#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_VBV_DELAY
-#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_CODER_TYPE
-#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_STAT_BITS
-#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_PRIVATE_OPT
-#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_ASS_TIMING
-#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_OLD_BSF
-#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_COPY_CONTEXT
-#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_GET_CONTEXT_DEFAULTS
-#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_NVENC_OLD_NAME
-#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_STRUCT_VAAPI_CONTEXT
-#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_MERGE_SD_API
-#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_TAG_STRING
-#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_GETCHROMA
-#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_CODEC_GET_SET
-#define FF_API_CODEC_GET_SET (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_USER_VISIBLE_AVHWACCEL
-#define FF_API_USER_VISIBLE_AVHWACCEL (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_LOCKMGR
-#define FF_API_LOCKMGR (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_NEXT
-#define FF_API_NEXT (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-#ifndef FF_API_UNSANITIZED_BITRATES
-#define FF_API_UNSANITIZED_BITRATES (LIBAVCODEC_VERSION_MAJOR < 59)
-#endif
-
-
#endif /* AVCODEC_VERSION_H */
diff --git a/media/ffvpx/libavcodec/version_major.h b/media/ffvpx/libavcodec/version_major.h
new file mode 100644
index 0000000000..c2f118b262
--- /dev/null
+++ b/media/ffvpx/libavcodec/version_major.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_MAJOR_H
+#define AVCODEC_VERSION_MAJOR_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#define LIBAVCODEC_VERSION_MAJOR 60
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ */
+
+#define FF_API_INIT_PACKET (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_IDCT_NONE (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_SVTAV1_OPTS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AYUV_CODECID (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_OUTPUT_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCODEC_CHROMA_POS (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_VT_HWACCEL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 61)
+#define FF_API_AVCTX_FRAME_NUMBER (LIBAVCODEC_VERSION_MAJOR < 61)
+
+// reminder to remove CrystalHD decoders on next major bump
+#define FF_CODEC_CRYSTAL_HD (LIBAVCODEC_VERSION_MAJOR < 61)
+
+#endif /* AVCODEC_VERSION_MAJOR_H */
diff --git a/media/ffvpx/libavcodec/videodsp.c b/media/ffvpx/libavcodec/videodsp.c
index 4f082a4267..ef0f21f99a 100644
--- a/media/ffvpx/libavcodec/videodsp.c
+++ b/media/ffvpx/libavcodec/videodsp.c
@@ -18,9 +18,10 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
-#include "libavutil/common.h"
+#include "libavutil/macros.h"
#include "videodsp.h"
#define BIT_DEPTH 8
@@ -31,7 +32,7 @@
#include "videodsp_template.c"
#undef BIT_DEPTH
-static void just_return(uint8_t *buf, ptrdiff_t stride, int h)
+static void just_return(const uint8_t *buf, ptrdiff_t stride, int h)
{
}
@@ -44,15 +45,17 @@ av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
ctx->emulated_edge_mc = ff_emulated_edge_mc_16;
}
- #if ARCH_AARCH64 == 1
- ff_videodsp_init_aarch64(ctx, bpc);
- #elif ARCH_ARM == 1
- ff_videodsp_init_arm(ctx, bpc);
- #elif ARCH_PPC == 1
- ff_videodsp_init_ppc(ctx, bpc);
- #elif ARCH_X86 == 1
- ff_videodsp_init_x86(ctx, bpc);
- #elif ARCH_MIPS == 1
- ff_videodsp_init_mips(ctx, bpc);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_videodsp_init_aarch64(ctx, bpc);
+#elif ARCH_ARM == 1
+ ff_videodsp_init_arm(ctx, bpc);
+#elif ARCH_PPC == 1
+ ff_videodsp_init_ppc(ctx, bpc);
+#elif ARCH_X86 == 1
+ ff_videodsp_init_x86(ctx, bpc);
+#elif ARCH_MIPS == 1
+ ff_videodsp_init_mips(ctx, bpc);
+#elif ARCH_LOONGARCH64 == 1
+ ff_videodsp_init_loongarch(ctx, bpc);
+#endif
}
diff --git a/media/ffvpx/libavcodec/videodsp.h b/media/ffvpx/libavcodec/videodsp.h
index c0545f22b0..e8960b609d 100644
--- a/media/ffvpx/libavcodec/videodsp.h
+++ b/media/ffvpx/libavcodec/videodsp.h
@@ -36,7 +36,6 @@ void ff_emulated_edge_mc_ ## depth(uint8_t *dst, const uint8_t *src, \
int src_x, int src_y, int w, int h);
EMULATED_EDGE(8)
-EMULATED_EDGE(16)
typedef struct VideoDSPContext {
/**
@@ -73,7 +72,7 @@ typedef struct VideoDSPContext {
* @param stride distance between two lines of buf (in bytes)
* @param h number of lines to prefetch
*/
- void (*prefetch)(uint8_t *buf, ptrdiff_t stride, int h);
+ void (*prefetch)(const uint8_t *buf, ptrdiff_t stride, int h);
} VideoDSPContext;
void ff_videodsp_init(VideoDSPContext *ctx, int bpc);
@@ -84,5 +83,6 @@ void ff_videodsp_init_arm(VideoDSPContext *ctx, int bpc);
void ff_videodsp_init_ppc(VideoDSPContext *ctx, int bpc);
void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc);
void ff_videodsp_init_mips(VideoDSPContext *ctx, int bpc);
+void ff_videodsp_init_loongarch(VideoDSPContext *ctx, int bpc);
#endif /* AVCODEC_VIDEODSP_H */
diff --git a/media/ffvpx/libavcodec/videodsp_template.c b/media/ffvpx/libavcodec/videodsp_template.c
index 55123a5844..d653f4d524 100644
--- a/media/ffvpx/libavcodec/videodsp_template.c
+++ b/media/ffvpx/libavcodec/videodsp_template.c
@@ -20,6 +20,10 @@
*/
#include "bit_depth_template.c"
+#if BIT_DEPTH != 8
+// ff_emulated_edge_mc_8 is used by the x86 MpegVideoDSP API.
+static
+#endif
void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
ptrdiff_t buf_linesize,
ptrdiff_t src_linesize,
@@ -60,7 +64,7 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
av_assert2(start_x < end_x && block_w);
w = end_x - start_x;
- src += start_y * src_linesize + start_x * sizeof(pixel);
+ src += start_y * src_linesize + start_x * (ptrdiff_t)sizeof(pixel);
buf += start_x * sizeof(pixel);
// top
@@ -83,7 +87,7 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src,
buf += buf_linesize;
}
- buf -= block_h * buf_linesize + start_x * sizeof(pixel);
+ buf -= block_h * buf_linesize + start_x * (ptrdiff_t)sizeof(pixel);
while (block_h--) {
pixel *bufp = (pixel *) buf;
diff --git a/media/ffvpx/libavcodec/vlc.c b/media/ffvpx/libavcodec/vlc.c
new file mode 100644
index 0000000000..96f2b28c7e
--- /dev/null
+++ b/media/ffvpx/libavcodec/vlc.c
@@ -0,0 +1,378 @@
+/*
+ * API for creating VLC trees
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2010 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/error.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/macros.h"
+#include "libavutil/mem.h"
+#include "libavutil/qsort.h"
+#include "libavutil/reverse.h"
+#include "vlc.h"
+
+#define GET_DATA(v, table, i, wrap, size) \
+{ \
+ const uint8_t *ptr = (const uint8_t *)table + i * wrap; \
+ switch(size) { \
+ case 1: \
+ v = *(const uint8_t *)ptr; \
+ break; \
+ case 2: \
+ v = *(const uint16_t *)ptr; \
+ break; \
+ case 4: \
+ default: \
+ av_assert1(size == 4); \
+ v = *(const uint32_t *)ptr; \
+ break; \
+ } \
+}
+
+
+static int alloc_table(VLC *vlc, int size, int use_static)
+{
+ int index = vlc->table_size;
+
+ vlc->table_size += size;
+ if (vlc->table_size > vlc->table_allocated) {
+ if (use_static)
+ abort(); // cannot do anything, init_vlc() is used with too little memory
+ vlc->table_allocated += (1 << vlc->bits);
+ vlc->table = av_realloc_f(vlc->table, vlc->table_allocated, sizeof(*vlc->table));
+ if (!vlc->table) {
+ vlc->table_allocated = 0;
+ vlc->table_size = 0;
+ return AVERROR(ENOMEM);
+ }
+ memset(vlc->table + vlc->table_allocated - (1 << vlc->bits), 0, sizeof(*vlc->table) << vlc->bits);
+ }
+ return index;
+}
+
+#define LOCALBUF_ELEMS 1500 // the maximum currently needed is 1296 by rv34
+
+static av_always_inline uint32_t bitswap_32(uint32_t x)
+{
+ return (uint32_t)ff_reverse[ x & 0xFF] << 24 |
+ (uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
+ (uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
+ (uint32_t)ff_reverse[ x >> 24];
+}
+
+typedef struct VLCcode {
+ uint8_t bits;
+ VLCBaseType symbol;
+ /** codeword, with the first bit-to-be-read in the msb
+ * (even if intended for a little-endian bitstream reader) */
+ uint32_t code;
+} VLCcode;
+
+static int vlc_common_init(VLC *vlc, int nb_bits, int nb_codes,
+ VLCcode **buf, int flags)
+{
+ vlc->bits = nb_bits;
+ vlc->table_size = 0;
+ if (flags & INIT_VLC_USE_NEW_STATIC) {
+ av_assert0(nb_codes <= LOCALBUF_ELEMS);
+ } else {
+ vlc->table = NULL;
+ vlc->table_allocated = 0;
+ }
+ if (nb_codes > LOCALBUF_ELEMS) {
+ *buf = av_malloc_array(nb_codes, sizeof(VLCcode));
+ if (!*buf)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int compare_vlcspec(const void *a, const void *b)
+{
+ const VLCcode *sa = a, *sb = b;
+ return (sa->code >> 1) - (sb->code >> 1);
+}
+
+/**
+ * Build VLC decoding tables suitable for use with get_vlc().
+ *
+ * @param vlc the context to be initialized
+ *
+ * @param table_nb_bits max length of vlc codes to store directly in this table
+ * (Longer codes are delegated to subtables.)
+ *
+ * @param nb_codes number of elements in codes[]
+ *
+ * @param codes descriptions of the vlc codes
+ * These must be ordered such that codes going into the same subtable are contiguous.
+ * Sorting by VLCcode.code is sufficient, though not necessary.
+ */
+static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
+ VLCcode *codes, int flags)
+{
+ int table_size, table_index;
+ VLCElem *table;
+
+ if (table_nb_bits > 30)
+ return AVERROR(EINVAL);
+ table_size = 1 << table_nb_bits;
+ table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
+ ff_dlog(NULL, "new table index=%d size=%d\n", table_index, table_size);
+ if (table_index < 0)
+ return table_index;
+ table = &vlc->table[table_index];
+
+ /* first pass: map codes and compute auxiliary table sizes */
+ for (int i = 0; i < nb_codes; i++) {
+ int n = codes[i].bits;
+ uint32_t code = codes[i].code;
+ int symbol = codes[i].symbol;
+ ff_dlog(NULL, "i=%d n=%d code=0x%"PRIx32"\n", i, n, code);
+ if (n <= table_nb_bits) {
+ /* no need to add another table */
+ int j = code >> (32 - table_nb_bits);
+ int nb = 1 << (table_nb_bits - n);
+ int inc = 1;
+
+ if (flags & INIT_VLC_OUTPUT_LE) {
+ j = bitswap_32(code);
+ inc = 1 << n;
+ }
+ for (int k = 0; k < nb; k++) {
+ int bits = table[j].len;
+ int oldsym = table[j].sym;
+ ff_dlog(NULL, "%4x: code=%d n=%d\n", j, i, n);
+ if ((bits || oldsym) && (bits != n || oldsym != symbol)) {
+ av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
+ return AVERROR_INVALIDDATA;
+ }
+ table[j].len = n;
+ table[j].sym = symbol;
+ j += inc;
+ }
+ } else {
+ /* fill auxiliary table recursively */
+ uint32_t code_prefix;
+ int index, subtable_bits, j, k;
+
+ n -= table_nb_bits;
+ code_prefix = code >> (32 - table_nb_bits);
+ subtable_bits = n;
+ codes[i].bits = n;
+ codes[i].code = code << table_nb_bits;
+ for (k = i + 1; k < nb_codes; k++) {
+ n = codes[k].bits - table_nb_bits;
+ if (n <= 0)
+ break;
+ code = codes[k].code;
+ if (code >> (32 - table_nb_bits) != code_prefix)
+ break;
+ codes[k].bits = n;
+ codes[k].code = code << table_nb_bits;
+ subtable_bits = FFMAX(subtable_bits, n);
+ }
+ subtable_bits = FFMIN(subtable_bits, table_nb_bits);
+ j = (flags & INIT_VLC_OUTPUT_LE) ? bitswap_32(code_prefix) >> (32 - table_nb_bits) : code_prefix;
+ table[j].len = -subtable_bits;
+ ff_dlog(NULL, "%4x: n=%d (subtable)\n",
+ j, codes[i].bits + table_nb_bits);
+ index = build_table(vlc, subtable_bits, k-i, codes+i, flags);
+ if (index < 0)
+ return index;
+ /* note: realloc has been done, so reload tables */
+ table = &vlc->table[table_index];
+ table[j].sym = index;
+ if (table[j].sym != index) {
+ avpriv_request_sample(NULL, "strange codes");
+ return AVERROR_PATCHWELCOME;
+ }
+ i = k-1;
+ }
+ }
+
+ for (int i = 0; i < table_size; i++) {
+ if (table[i].len == 0)
+ table[i].sym = -1;
+ }
+
+ return table_index;
+}
+
+static int vlc_common_end(VLC *vlc, int nb_bits, int nb_codes, VLCcode *codes,
+ int flags, VLCcode localbuf[LOCALBUF_ELEMS])
+{
+ int ret = build_table(vlc, nb_bits, nb_codes, codes, flags);
+
+ if (flags & INIT_VLC_USE_NEW_STATIC) {
+ if (vlc->table_size != vlc->table_allocated &&
+ !(flags & (INIT_VLC_STATIC_OVERLONG & ~INIT_VLC_USE_NEW_STATIC)))
+ av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
+ av_assert0(ret >= 0);
+ } else {
+ if (codes != localbuf)
+ av_free(codes);
+ if (ret < 0) {
+ av_freep(&vlc->table);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* Build VLC decoding tables suitable for use with get_vlc().
+
+ 'nb_bits' sets the decoding table size (2^nb_bits) entries. The
+ bigger it is, the faster is the decoding. But it should not be too
+ big to save memory and L1 cache. '9' is a good compromise.
+
+ 'nb_codes' : number of vlcs codes
+
+ 'bits' : table which gives the size (in bits) of each vlc code.
+
+ 'codes' : table which gives the bit pattern of of each vlc code.
+
+ 'symbols' : table which gives the values to be returned from get_vlc().
+
+ 'xxx_wrap' : give the number of bytes between each entry of the
+ 'bits' or 'codes' tables.
+
+ 'xxx_size' : gives the number of bytes of each entry of the 'bits'
+ or 'codes' tables. Currently 1,2 and 4 are supported.
+
+ 'wrap' and 'size' make it possible to use any memory configuration and types
+ (byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
+*/
+int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
+ const void *bits, int bits_wrap, int bits_size,
+ const void *codes, int codes_wrap, int codes_size,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int flags)
+{
+ VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
+ int j, ret;
+
+ ret = vlc_common_init(vlc, nb_bits, nb_codes, &buf, flags);
+ if (ret < 0)
+ return ret;
+
+ av_assert0(symbols_size <= 2 || !symbols);
+ j = 0;
+#define COPY(condition)\
+ for (int i = 0; i < nb_codes; i++) { \
+ unsigned len; \
+ GET_DATA(len, bits, i, bits_wrap, bits_size); \
+ if (!(condition)) \
+ continue; \
+ if (len > 3*nb_bits || len > 32) { \
+ av_log(NULL, AV_LOG_ERROR, "Too long VLC (%u) in init_vlc\n", len);\
+ if (buf != localbuf) \
+ av_free(buf); \
+ return AVERROR(EINVAL); \
+ } \
+ buf[j].bits = len; \
+ GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size); \
+ if (buf[j].code >= (1LL<<buf[j].bits)) { \
+ av_log(NULL, AV_LOG_ERROR, "Invalid code %"PRIx32" for %d in " \
+ "init_vlc\n", buf[j].code, i); \
+ if (buf != localbuf) \
+ av_free(buf); \
+ return AVERROR(EINVAL); \
+ } \
+ if (flags & INIT_VLC_INPUT_LE) \
+ buf[j].code = bitswap_32(buf[j].code); \
+ else \
+ buf[j].code <<= 32 - buf[j].bits; \
+ if (symbols) \
+ GET_DATA(buf[j].symbol, symbols, i, symbols_wrap, symbols_size) \
+ else \
+ buf[j].symbol = i; \
+ j++; \
+ }
+ COPY(len > nb_bits);
+ // qsort is the slowest part of init_vlc, and could probably be improved or avoided
+ AV_QSORT(buf, j, struct VLCcode, compare_vlcspec);
+ COPY(len && len <= nb_bits);
+ nb_codes = j;
+
+ return vlc_common_end(vlc, nb_bits, nb_codes, buf,
+ flags, localbuf);
+}
+
+int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes,
+ const int8_t *lens, int lens_wrap,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int offset, int flags, void *logctx)
+{
+ VLCcode localbuf[LOCALBUF_ELEMS], *buf = localbuf;
+ uint64_t code;
+ int ret, j, len_max = FFMIN(32, 3 * nb_bits);
+
+ ret = vlc_common_init(vlc, nb_bits, nb_codes, &buf, flags);
+ if (ret < 0)
+ return ret;
+
+ j = code = 0;
+ for (int i = 0; i < nb_codes; i++, lens += lens_wrap) {
+ int len = *lens;
+ if (len > 0) {
+ unsigned sym;
+
+ buf[j].bits = len;
+ if (symbols)
+ GET_DATA(sym, symbols, i, symbols_wrap, symbols_size)
+ else
+ sym = i;
+ buf[j].symbol = sym + offset;
+ buf[j++].code = code;
+ } else if (len < 0) {
+ len = -len;
+ } else
+ continue;
+ if (len > len_max || code & ((1U << (32 - len)) - 1)) {
+ av_log(logctx, AV_LOG_ERROR, "Invalid VLC (length %u)\n", len);
+ goto fail;
+ }
+ code += 1U << (32 - len);
+ if (code > UINT32_MAX + 1ULL) {
+ av_log(logctx, AV_LOG_ERROR, "Overdetermined VLC tree\n");
+ goto fail;
+ }
+ }
+ return vlc_common_end(vlc, nb_bits, j, buf, flags, localbuf);
+fail:
+ if (buf != localbuf)
+ av_free(buf);
+ return AVERROR_INVALIDDATA;
+}
+
+void ff_free_vlc(VLC *vlc)
+{
+ av_freep(&vlc->table);
+}
diff --git a/media/ffvpx/libavcodec/vlc.h b/media/ffvpx/libavcodec/vlc.h
index 42ccddf3fc..e63c484755 100644
--- a/media/ffvpx/libavcodec/vlc.h
+++ b/media/ffvpx/libavcodec/vlc.h
@@ -21,11 +21,16 @@
#include <stdint.h>
-#define VLC_TYPE int16_t
+// When changing this, be sure to also update tableprint_vlc.h accordingly.
+typedef int16_t VLCBaseType;
+
+typedef struct VLCElem {
+ VLCBaseType sym, len;
+} VLCElem;
typedef struct VLC {
int bits;
- VLC_TYPE (*table)[2]; ///< code, bits
+ VLCElem *table;
int table_size, table_allocated;
} VLC;
@@ -49,28 +54,73 @@ int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
const void *codes, int codes_wrap, int codes_size,
const void *symbols, int symbols_wrap, int symbols_size,
int flags);
+
+/**
+ * Build VLC decoding tables suitable for use with get_vlc2()
+ *
+ * This function takes lengths and symbols and calculates the codes from them.
+ * For this the input lengths and symbols have to be sorted according to "left
+ * nodes in the corresponding tree first".
+ *
+ * @param[in,out] vlc The VLC to be initialized; table and table_allocated
+ * must have been set when initializing a static VLC,
+ * otherwise this will be treated as uninitialized.
+ * @param[in] nb_bits The number of bits to use for the VLC table;
+ * higher values take up more memory and cache, but
+ * allow to read codes with fewer reads.
+ * @param[in] nb_codes The number of provided length and (if supplied) symbol
+ * entries.
+ * @param[in] lens The lengths of the codes. Entries > 0 correspond to
+ * valid codes; entries == 0 will be skipped and entries
+ * with len < 0 indicate that the tree is incomplete and
+ * has an open end of length -len at this position.
+ * @param[in] lens_wrap Stride (in bytes) of the lengths.
+ * @param[in] symbols The symbols, i.e. what is returned from get_vlc2()
+ * when the corresponding code is encountered.
+ * May be NULL, then 0, 1, 2, 3, 4,... will be used.
+ * @param[in] symbols_wrap Stride (in bytes) of the symbols.
+ * @param[in] symbols_size Size of the symbols. 1 and 2 are supported.
+ * @param[in] offset An offset to apply to all the valid symbols.
+ * @param[in] flags A combination of the INIT_VLC_* flags; notice that
+ * INIT_VLC_INPUT_LE is pointless and ignored.
+ */
+int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes,
+ const int8_t *lens, int lens_wrap,
+ const void *symbols, int symbols_wrap, int symbols_size,
+ int offset, int flags, void *logctx);
+
void ff_free_vlc(VLC *vlc);
-#define INIT_VLC_LE 2
+/* If INIT_VLC_INPUT_LE is set, the LSB bit of the codes used to
+ * initialize the VLC table is the first bit to be read. */
+#define INIT_VLC_INPUT_LE 2
+/* If set the VLC is intended for a little endian bitstream reader. */
+#define INIT_VLC_OUTPUT_LE 8
+#define INIT_VLC_LE (INIT_VLC_INPUT_LE | INIT_VLC_OUTPUT_LE)
#define INIT_VLC_USE_NEW_STATIC 4
+#define INIT_VLC_STATIC_OVERLONG (1 | INIT_VLC_USE_NEW_STATIC)
-#define INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
+#define INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, flags, static_size) \
do { \
- static VLC_TYPE table[static_size][2]; \
+ static VLCElem table[static_size]; \
(vlc)->table = table; \
(vlc)->table_allocated = static_size; \
ff_init_vlc_sparse(vlc, bits, a, b, c, d, e, f, g, h, i, j, \
- INIT_VLC_USE_NEW_STATIC); \
+ flags | INIT_VLC_USE_NEW_STATIC); \
} while (0)
+#define INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, 0, static_size)
+
#define INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
- do { \
- static VLC_TYPE table[static_size][2]; \
- (vlc)->table = table; \
- (vlc)->table_allocated = static_size; \
- ff_init_vlc_sparse(vlc, bits, a, b, c, d, e, f, g, h, i, j, \
- INIT_VLC_USE_NEW_STATIC | INIT_VLC_LE); \
- } while (0)
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ h, i, j, INIT_VLC_LE, static_size)
+
+#define INIT_CUSTOM_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, flags, static_size) \
+ INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
+ NULL, 0, 0, flags, static_size)
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
@@ -78,4 +128,17 @@ void ff_free_vlc(VLC *vlc);
#define INIT_LE_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
+#define INIT_VLC_STATIC_FROM_LENGTHS(vlc, bits, nb_codes, lens, len_wrap, \
+ symbols, symbols_wrap, symbols_size, \
+ offset, flags, static_size) \
+ do { \
+ static VLCElem table[static_size]; \
+ (vlc)->table = table; \
+ (vlc)->table_allocated = static_size; \
+ ff_init_vlc_from_lengths(vlc, bits, nb_codes, lens, len_wrap, \
+ symbols, symbols_wrap, symbols_size, \
+ offset, flags | INIT_VLC_USE_NEW_STATIC, \
+ NULL); \
+ } while (0)
+
#endif /* AVCODEC_VLC_H */
diff --git a/media/ffvpx/libavcodec/vorbis_parser.c b/media/ffvpx/libavcodec/vorbis_parser.c
index 0b2c97cde5..a7d15d4ce9 100644
--- a/media/ffvpx/libavcodec/vorbis_parser.c
+++ b/media/ffvpx/libavcodec/vorbis_parser.c
@@ -25,6 +25,8 @@
* Determines the duration for each packet.
*/
+#include "config_components.h"
+
#include "libavutil/log.h"
#include "get_bits.h"
@@ -332,7 +334,7 @@ static void vorbis_parser_close(AVCodecParserContext *ctx)
av_vorbis_parse_free(&s->vp);
}
-AVCodecParser ff_vorbis_parser = {
+const AVCodecParser ff_vorbis_parser = {
.codec_ids = { AV_CODEC_ID_VORBIS },
.priv_data_size = sizeof(VorbisParseContext),
.parser_parse = vorbis_parse,
diff --git a/media/ffvpx/libavcodec/vp3dsp.h b/media/ffvpx/libavcodec/vp3dsp.h
index 32b2cad0ef..3b849ec05d 100644
--- a/media/ffvpx/libavcodec/vp3dsp.h
+++ b/media/ffvpx/libavcodec/vp3dsp.h
@@ -43,6 +43,8 @@ typedef struct VP3DSPContext {
void (*idct_dc_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void (*v_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
void (*h_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+ void (*v_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
+ void (*h_loop_filter_unaligned)(uint8_t *src, ptrdiff_t stride, int *bounding_values);
} VP3DSPContext;
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values);
diff --git a/media/ffvpx/libavcodec/vp56.h b/media/ffvpx/libavcodec/vp56.h
index 65cf46870a..9dc0b9c7ad 100644
--- a/media/ffvpx/libavcodec/vp56.h
+++ b/media/ffvpx/libavcodec/vp56.h
@@ -26,14 +26,16 @@
#ifndef AVCODEC_VP56_H
#define AVCODEC_VP56_H
+#include "libavutil/mem_internal.h"
+
#include "avcodec.h"
#include "get_bits.h"
#include "hpeldsp.h"
-#include "bytestream.h"
#include "h264chroma.h"
#include "videodsp.h"
#include "vp3dsp.h"
#include "vp56dsp.h"
+#include "vpx_rac.h"
typedef struct vp56_context VP56Context;
@@ -42,7 +44,6 @@ typedef enum {
VP56_FRAME_CURRENT = 0,
VP56_FRAME_PREVIOUS = 1,
VP56_FRAME_GOLDEN = 2,
- VP56_FRAME_GOLDEN2 = 3,
} VP56Frame;
typedef enum {
@@ -82,16 +83,6 @@ typedef int (*VP56ParseCoeffModels)(VP56Context *s);
typedef int (*VP56ParseHeader)(VP56Context *s, const uint8_t *buf,
int buf_size);
-typedef struct VP56RangeCoder {
- int high;
- int bits; /* stored negated (i.e. negative "bits" is a positive number of
- bits left) in order to eliminate a negate in cache refilling */
- const uint8_t *buffer;
- const uint8_t *end;
- unsigned int code_word;
- int end_reached;
-} VP56RangeCoder;
-
typedef struct VP56RefDc {
uint8_t not_null_dc;
VP56Frame ref_frame;
@@ -132,9 +123,9 @@ struct vp56_context {
AVFrame *frames[4];
uint8_t *edge_emu_buffer_alloc;
uint8_t *edge_emu_buffer;
- VP56RangeCoder c;
- VP56RangeCoder cc;
- VP56RangeCoder *ccp;
+ VPXRangeCoder c;
+ VPXRangeCoder cc;
+ VPXRangeCoder *ccp;
int sub_version;
/* frame info */
@@ -214,174 +205,47 @@ struct vp56_context {
};
-int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha);
+/**
+ * Initializes an VP56Context. Expects its caller to clean up
+ * in case of error.
+ */
int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
int flip, int has_alpha);
-int ff_vp56_free(AVCodecContext *avctx);
int ff_vp56_free_context(VP56Context *s);
void ff_vp56_init_dequant(VP56Context *s, int quantizer);
-int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
- AVPacket *avpkt);
+int ff_vp56_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt);
/**
* vp56 specific range coder implementation
*/
-extern const uint8_t ff_vp56_norm_shift[256];
-int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size);
-
-/**
- * vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
- */
-static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
-{
- if (c->end <= c->buffer && c->bits >= 0)
- c->end_reached ++;
- return c->end_reached > 10;
-}
-
-static av_always_inline unsigned int vp56_rac_renorm(VP56RangeCoder *c)
-{
- int shift = ff_vp56_norm_shift[c->high];
- int bits = c->bits;
- unsigned int code_word = c->code_word;
-
- c->high <<= shift;
- code_word <<= shift;
- bits += shift;
- if(bits >= 0 && c->buffer < c->end) {
- code_word |= bytestream_get_be16(&c->buffer) << bits;
- bits -= 16;
- }
- c->bits = bits;
- return code_word;
-}
-
-#if ARCH_ARM
-#include "arm/vp56_arith.h"
-#elif ARCH_X86
-#include "x86/vp56_arith.h"
-#endif
-
-#ifndef vp56_rac_get_prob
-#define vp56_rac_get_prob vp56_rac_get_prob
-static av_always_inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob)
-{
- unsigned int code_word = vp56_rac_renorm(c);
- unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
- unsigned int low_shift = low << 16;
- int bit = code_word >= low_shift;
-
- c->high = bit ? c->high - low : low;
- c->code_word = bit ? code_word - low_shift : code_word;
-
- return bit;
-}
-#endif
-
-#ifndef vp56_rac_get_prob_branchy
-// branchy variant, to be used where there's a branch based on the bit decoded
-static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
-{
- unsigned long code_word = vp56_rac_renorm(c);
- unsigned low = 1 + (((c->high - 1) * prob) >> 8);
- unsigned low_shift = low << 16;
-
- if (code_word >= low_shift) {
- c->high -= low;
- c->code_word = code_word - low_shift;
- return 1;
- }
-
- c->high = low;
- c->code_word = code_word;
- return 0;
-}
-#endif
-
-static av_always_inline int vp56_rac_get(VP56RangeCoder *c)
-{
- unsigned int code_word = vp56_rac_renorm(c);
- /* equiprobable */
- int low = (c->high + 1) >> 1;
- unsigned int low_shift = low << 16;
- int bit = code_word >= low_shift;
- if (bit) {
- c->high -= low;
- code_word -= low_shift;
- } else {
- c->high = low;
- }
-
- c->code_word = code_word;
- return bit;
-}
-
-// rounding is different than vp56_rac_get, is vp56_rac_get wrong?
-static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
-{
- return vp56_rac_get_prob(c, 128);
-}
-
-static int vp56_rac_gets(VP56RangeCoder *c, int bits)
-{
- int value = 0;
-
- while (bits--) {
- value = (value << 1) | vp56_rac_get(c);
- }
-
- return value;
-}
-
-static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
+static int vp56_rac_gets(VPXRangeCoder *c, int bits)
{
int value = 0;
while (bits--) {
- value = (value << 1) | vp8_rac_get(c);
+ value = (value << 1) | vpx_rac_get(c);
}
return value;
}
-// fixme: add 1 bit to all the calls to this?
-static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
-{
- int v;
-
- if (!vp8_rac_get(c))
- return 0;
-
- v = vp8_rac_get_uint(c, bits);
-
- if (vp8_rac_get(c))
- v = -v;
-
- return v;
-}
-
// P(7)
-static av_unused int vp56_rac_gets_nn(VP56RangeCoder *c, int bits)
+static av_unused int vp56_rac_gets_nn(VPXRangeCoder *c, int bits)
{
int v = vp56_rac_gets(c, 7) << 1;
return v + !v;
}
-static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
-{
- int v = vp8_rac_get_uint(c, 7) << 1;
- return v + !v;
-}
-
static av_always_inline
-int vp56_rac_get_tree(VP56RangeCoder *c,
+int vp56_rac_get_tree(VPXRangeCoder *c,
const VP56Tree *tree,
const uint8_t *probs)
{
while (tree->val > 0) {
- if (vp56_rac_get_prob_branchy(c, probs[tree->prob_idx]))
+ if (vpx_rac_get_prob_branchy(c, probs[tree->prob_idx]))
tree += tree->val;
else
tree++;
@@ -389,30 +253,4 @@ int vp56_rac_get_tree(VP56RangeCoder *c,
return -tree->val;
}
-// how probabilities are associated with decisions is different I think
-// well, the new scheme fits in the old but this way has one fewer branches per decision
-static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t (*tree)[2],
- const uint8_t *probs)
-{
- int i = 0;
-
- do {
- i = tree[i][vp56_rac_get_prob(c, probs[i])];
- } while (i > 0);
-
- return -i;
-}
-
-// DCTextra
-static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
-{
- int v = 0;
-
- do {
- v = (v<<1) + vp56_rac_get_prob(c, *prob++);
- } while (*prob);
-
- return v;
-}
-
#endif /* AVCODEC_VP56_H */
diff --git a/media/ffvpx/libavcodec/vp8.c b/media/ffvpx/libavcodec/vp8.c
index 4b07ea8064..db2419deaf 100644
--- a/media/ffvpx/libavcodec/vp8.c
+++ b/media/ffvpx/libavcodec/vp8.c
@@ -24,28 +24,59 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/imgutils.h"
+#include "config_components.h"
+
+#include "libavutil/mem_internal.h"
#include "avcodec.h"
-#include "hwaccel.h"
-#include "internal.h"
+#include "codec_internal.h"
+#include "decode.h"
+#include "hwconfig.h"
#include "mathops.h"
-#include "rectangle.h"
#include "thread.h"
+#include "threadframe.h"
#include "vp8.h"
+#include "vp89_rac.h"
#include "vp8data.h"
+#include "vpx_rac.h"
#if ARCH_ARM
# include "arm/vp8.h"
#endif
-#if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
-#define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
-#elif CONFIG_VP7_DECODER
-#define VPX(vp7, f) vp7_ ## f
-#else // CONFIG_VP8_DECODER
-#define VPX(vp7, f) vp8_ ## f
-#endif
+// fixme: add 1 bit to all the calls to this?
+static int vp8_rac_get_sint(VPXRangeCoder *c, int bits)
+{
+ int v;
+
+ if (!vp89_rac_get(c))
+ return 0;
+
+ v = vp89_rac_get_uint(c, bits);
+
+ if (vp89_rac_get(c))
+ v = -v;
+
+ return v;
+}
+
+static int vp8_rac_get_nn(VPXRangeCoder *c)
+{
+ int v = vp89_rac_get_uint(c, 7) << 1;
+ return v + !v;
+}
+
+// DCTextra
+static int vp8_rac_get_coeff(VPXRangeCoder *c, const uint8_t *prob)
+{
+ int v = 0;
+
+ do {
+ v = (v<<1) + vpx_rac_get_prob(c, *prob++);
+ } while (*prob);
+
+ return v;
+}
static void free_buffers(VP8Context *s)
{
@@ -70,8 +101,8 @@ static void free_buffers(VP8Context *s)
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
{
int ret;
- if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
- ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
+ if ((ret = ff_thread_get_ext_buffer(s->avctx, &f->tf,
+ ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
goto fail;
@@ -88,7 +119,7 @@ static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
fail:
av_buffer_unref(&f->seg_map);
- ff_thread_release_buffer(s->avctx, &f->tf);
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
}
@@ -97,11 +128,11 @@ static void vp8_release_frame(VP8Context *s, VP8Frame *f)
av_buffer_unref(&f->seg_map);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
- ff_thread_release_buffer(s->avctx, &f->tf);
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
}
#if CONFIG_VP8_DECODER
-static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
+static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, const VP8Frame *src)
{
int ret;
@@ -150,10 +181,10 @@ static VP8Frame *vp8_find_free_buffer(VP8Context *s)
// find a free buffer
for (i = 0; i < 5; i++)
- if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
- &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
+ if (&s->frames[i] != s->framep[VP8_FRAME_CURRENT] &&
+ &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+ &s->frames[i] != s->framep[VP8_FRAME_GOLDEN] &&
+ &s->frames[i] != s->framep[VP8_FRAME_ALTREF]) {
frame = &s->frames[i];
break;
}
@@ -187,7 +218,7 @@ static av_always_inline
int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
{
AVCodecContext *avctx = s->avctx;
- int i, ret;
+ int i, ret, dim_reset = 0;
if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
height != s->avctx->height) {
@@ -196,9 +227,12 @@ int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
ret = ff_set_dimensions(s->avctx, width, height);
if (ret < 0)
return ret;
+
+ dim_reset = (s->macroblocks_base != NULL);
}
- if (!s->actually_webp && !is_vp7) {
+ if ((s->pix_fmt == AV_PIX_FMT_NONE || dim_reset) &&
+ !s->actually_webp && !is_vp7) {
s->pix_fmt = get_pixel_format(s);
if (s->pix_fmt < 0)
return AVERROR(EINVAL);
@@ -258,14 +292,14 @@ static int vp8_update_dimensions(VP8Context *s, int width, int height)
static void parse_segment_info(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i;
- s->segmentation.update_map = vp8_rac_get(c);
- s->segmentation.update_feature_data = vp8_rac_get(c);
+ s->segmentation.update_map = vp89_rac_get(c);
+ s->segmentation.update_feature_data = vp89_rac_get(c);
if (s->segmentation.update_feature_data) {
- s->segmentation.absolute_vals = vp8_rac_get(c);
+ s->segmentation.absolute_vals = vp89_rac_get(c);
for (i = 0; i < 4; i++)
s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
@@ -275,28 +309,28 @@ static void parse_segment_info(VP8Context *s)
}
if (s->segmentation.update_map)
for (i = 0; i < 3; i++)
- s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
+ s->prob->segmentid[i] = vp89_rac_get(c) ? vp89_rac_get_uint(c, 8) : 255;
}
static void update_lf_deltas(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i;
for (i = 0; i < 4; i++) {
- if (vp8_rac_get(c)) {
- s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
+ if (vp89_rac_get(c)) {
+ s->lf_delta.ref[i] = vp89_rac_get_uint(c, 6);
- if (vp8_rac_get(c))
+ if (vp89_rac_get(c))
s->lf_delta.ref[i] = -s->lf_delta.ref[i];
}
}
for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
- if (vp8_rac_get(c)) {
- s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
+ if (vp89_rac_get(c)) {
+ s->lf_delta.mode[i] = vp89_rac_get_uint(c, 6);
- if (vp8_rac_get(c))
+ if (vp89_rac_get(c))
s->lf_delta.mode[i] = -s->lf_delta.mode[i];
}
}
@@ -308,7 +342,7 @@ static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
int i;
int ret;
- s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
+ s->num_coeff_partitions = 1 << vp89_rac_get_uint(&s->c, 2);
buf += 3 * (s->num_coeff_partitions - 1);
buf_size -= 3 * (s->num_coeff_partitions - 1);
@@ -321,7 +355,7 @@ static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
return -1;
s->coeff_partition_size[i] = size;
- ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
+ ret = ff_vpx_init_range_decoder(&s->coeff_partition[i], buf, size);
if (ret < 0)
return ret;
buf += size;
@@ -329,21 +363,21 @@ static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
}
s->coeff_partition_size[i] = buf_size;
- ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
+ ff_vpx_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
return 0;
}
static void vp7_get_quants(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
- int yac_qi = vp8_rac_get_uint(c, 7);
- int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
- int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
- int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
- int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
- int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
+ int yac_qi = vp89_rac_get_uint(c, 7);
+ int ydc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int y2dc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int y2ac_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int uvdc_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
+ int uvac_qi = vp89_rac_get(c) ? vp89_rac_get_uint(c, 7) : yac_qi;
s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
@@ -355,10 +389,10 @@ static void vp7_get_quants(VP8Context *s)
static void vp8_get_quants(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i, base_qi;
- s->quant.yac_qi = vp8_rac_get_uint(c, 7);
+ s->quant.yac_qi = vp89_rac_get_uint(c, 7);
s->quant.ydc_delta = vp8_rac_get_sint(c, 4);
s->quant.y2dc_delta = vp8_rac_get_sint(c, 4);
s->quant.y2ac_delta = vp8_rac_get_sint(c, 4);
@@ -391,28 +425,28 @@ static void vp8_get_quants(VP8Context *s)
* The spec isn't clear here, so I'm going by my understanding of what libvpx does
*
* Intra frames update all 3 references
- * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
+ * Inter frames update VP8_FRAME_PREVIOUS if the update_last flag is set
* If the update (golden|altref) flag is set, it's updated with the current frame
- * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
+ * if update_last is set, and VP8_FRAME_PREVIOUS otherwise.
* If the flag is not set, the number read means:
* 0: no update
- * 1: VP56_FRAME_PREVIOUS
+ * 1: VP8_FRAME_PREVIOUS
* 2: update golden with altref, or update altref with golden
*/
-static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
+static VP8FrameType ref_to_update(VP8Context *s, int update, VP8FrameType ref)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
if (update)
- return VP56_FRAME_CURRENT;
+ return VP8_FRAME_CURRENT;
- switch (vp8_rac_get_uint(c, 2)) {
+ switch (vp89_rac_get_uint(c, 2)) {
case 1:
- return VP56_FRAME_PREVIOUS;
+ return VP8_FRAME_PREVIOUS;
case 2:
- return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
+ return (ref == VP8_FRAME_GOLDEN) ? VP8_FRAME_ALTREF : VP8_FRAME_GOLDEN;
}
- return VP56_FRAME_NONE;
+ return VP8_FRAME_NONE;
}
static void vp78_reset_probability_tables(VP8Context *s)
@@ -426,15 +460,15 @@ static void vp78_reset_probability_tables(VP8Context *s)
static void vp78_update_probability_tables(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i, j, k, l, m;
for (i = 0; i < 4; i++)
for (j = 0; j < 8; j++)
for (k = 0; k < 3; k++)
for (l = 0; l < NUM_DCT_TOKENS-1; l++)
- if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
- int prob = vp8_rac_get_uint(c, 8);
+ if (vpx_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
+ int prob = vp89_rac_get_uint(c, 8);
for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
}
@@ -446,35 +480,35 @@ static void vp78_update_probability_tables(VP8Context *s)
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s,
int mvc_size)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i, j;
- if (vp8_rac_get(c))
+ if (vp89_rac_get(c))
for (i = 0; i < 4; i++)
- s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
- if (vp8_rac_get(c))
+ s->prob->pred16x16[i] = vp89_rac_get_uint(c, 8);
+ if (vp89_rac_get(c))
for (i = 0; i < 3; i++)
- s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
+ s->prob->pred8x8c[i] = vp89_rac_get_uint(c, 8);
// 17.2 MV probability update
for (i = 0; i < 2; i++)
for (j = 0; j < mvc_size; j++)
- if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
+ if (vpx_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
s->prob->mvc[i][j] = vp8_rac_get_nn(c);
}
static void update_refs(VP8Context *s)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
- int update_golden = vp8_rac_get(c);
- int update_altref = vp8_rac_get(c);
+ int update_golden = vp89_rac_get(c);
+ int update_altref = vp89_rac_get(c);
- s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
- s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
+ s->update_golden = ref_to_update(s, update_golden, VP8_FRAME_GOLDEN);
+ s->update_altref = ref_to_update(s, update_altref, VP8_FRAME_ALTREF);
}
-static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
+static void copy_chroma(AVFrame *dst, const AVFrame *src, int width, int height)
{
int i, j;
@@ -501,36 +535,32 @@ static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
}
}
-static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
+static int vp7_fade_frame(VP8Context *s, int alpha, int beta)
{
- int alpha = (int8_t) vp8_rac_get_uint(c, 8);
- int beta = (int8_t) vp8_rac_get_uint(c, 8);
int ret;
- if (c->end <= c->buffer && c->bits >= 0)
- return AVERROR_INVALIDDATA;
-
if (!s->keyframe && (alpha || beta)) {
int width = s->mb_width * 16;
int height = s->mb_height * 16;
- AVFrame *src, *dst;
+ const AVFrame *src;
+ AVFrame *dst;
- if (!s->framep[VP56_FRAME_PREVIOUS] ||
- !s->framep[VP56_FRAME_GOLDEN]) {
+ if (!s->framep[VP8_FRAME_PREVIOUS] ||
+ !s->framep[VP8_FRAME_GOLDEN]) {
av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
return AVERROR_INVALIDDATA;
}
- dst =
- src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
+ src =
+ dst = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
/* preserve the golden frame, write a new previous frame */
- if (s->framep[VP56_FRAME_GOLDEN] == s->framep[VP56_FRAME_PREVIOUS]) {
- s->framep[VP56_FRAME_PREVIOUS] = vp8_find_free_buffer(s);
- if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
+ if (s->framep[VP8_FRAME_GOLDEN] == s->framep[VP8_FRAME_PREVIOUS]) {
+ s->framep[VP8_FRAME_PREVIOUS] = vp8_find_free_buffer(s);
+ if ((ret = vp8_alloc_frame(s, s->framep[VP8_FRAME_PREVIOUS], 1)) < 0)
return ret;
- dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
+ dst = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
copy_chroma(dst, src, width, height);
}
@@ -545,10 +575,13 @@ static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int part1_size, hscale, vscale, i, j, ret;
int width = s->avctx->width;
int height = s->avctx->height;
+ int alpha = 0;
+ int beta = 0;
+ int fade_present = 1;
if (buf_size < 4) {
return AVERROR_INVALIDDATA;
@@ -574,7 +607,7 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
- ret = ff_vp56_init_range_decoder(c, buf, part1_size);
+ ret = ff_vpx_init_range_decoder(c, buf, part1_size);
if (ret < 0)
return ret;
buf += part1_size;
@@ -582,14 +615,14 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
/* A. Dimension information (keyframes only) */
if (s->keyframe) {
- width = vp8_rac_get_uint(c, 12);
- height = vp8_rac_get_uint(c, 12);
- hscale = vp8_rac_get_uint(c, 2);
- vscale = vp8_rac_get_uint(c, 2);
+ width = vp89_rac_get_uint(c, 12);
+ height = vp89_rac_get_uint(c, 12);
+ hscale = vp89_rac_get_uint(c, 2);
+ vscale = vp89_rac_get_uint(c, 2);
if (hscale || vscale)
avpriv_request_sample(s->avctx, "Upscaling");
- s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
+ s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
vp78_reset_probability_tables(s);
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
sizeof(s->prob->pred16x16));
@@ -608,18 +641,18 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
/* B. Decoding information for all four macroblock-level features */
for (i = 0; i < 4; i++) {
- s->feature_enabled[i] = vp8_rac_get(c);
+ s->feature_enabled[i] = vp89_rac_get(c);
if (s->feature_enabled[i]) {
- s->feature_present_prob[i] = vp8_rac_get_uint(c, 8);
+ s->feature_present_prob[i] = vp89_rac_get_uint(c, 8);
for (j = 0; j < 3; j++)
s->feature_index_prob[i][j] =
- vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
+ vp89_rac_get(c) ? vp89_rac_get_uint(c, 8) : 255;
if (vp7_feature_value_size[s->profile][i])
for (j = 0; j < 4; j++)
s->feature_value[i][j] =
- vp8_rac_get(c) ? vp8_rac_get_uint(c, vp7_feature_value_size[s->profile][i]) : 0;
+ vp89_rac_get(c) ? vp89_rac_get_uint(c, vp7_feature_value_size[s->profile][i]) : 0;
}
}
@@ -628,7 +661,7 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
s->lf_delta.enabled = 0;
s->num_coeff_partitions = 1;
- ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
+ ret = ff_vpx_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
if (ret < 0)
return ret;
@@ -644,45 +677,44 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
/* D. Golden frame update flag (a Flag) for interframes only */
if (!s->keyframe) {
- s->update_golden = vp8_rac_get(c) ? VP56_FRAME_CURRENT : VP56_FRAME_NONE;
- s->sign_bias[VP56_FRAME_GOLDEN] = 0;
+ s->update_golden = vp89_rac_get(c) ? VP8_FRAME_CURRENT : VP8_FRAME_NONE;
+ s->sign_bias[VP8_FRAME_GOLDEN] = 0;
}
s->update_last = 1;
s->update_probabilities = 1;
- s->fade_present = 1;
if (s->profile > 0) {
- s->update_probabilities = vp8_rac_get(c);
+ s->update_probabilities = vp89_rac_get(c);
if (!s->update_probabilities)
s->prob[1] = s->prob[0];
if (!s->keyframe)
- s->fade_present = vp8_rac_get(c);
+ fade_present = vp89_rac_get(c);
}
- if (vpX_rac_is_end(c))
+ if (vpx_rac_is_end(c))
return AVERROR_INVALIDDATA;
/* E. Fading information for previous frame */
- if (s->fade_present && vp8_rac_get(c)) {
- if ((ret = vp7_fade_frame(s ,c)) < 0)
- return ret;
+ if (fade_present && vp89_rac_get(c)) {
+ alpha = (int8_t) vp89_rac_get_uint(c, 8);
+ beta = (int8_t) vp89_rac_get_uint(c, 8);
}
/* F. Loop filter type */
if (!s->profile)
- s->filter.simple = vp8_rac_get(c);
+ s->filter.simple = vp89_rac_get(c);
/* G. DCT coefficient ordering specification */
- if (vp8_rac_get(c))
+ if (vp89_rac_get(c))
for (i = 1; i < 16; i++)
- s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
+ s->prob[0].scan[i] = ff_zigzag_scan[vp89_rac_get_uint(c, 4)];
/* H. Loop filter levels */
if (s->profile > 0)
- s->filter.simple = vp8_rac_get(c);
- s->filter.level = vp8_rac_get_uint(c, 6);
- s->filter.sharpness = vp8_rac_get_uint(c, 3);
+ s->filter.simple = vp89_rac_get(c);
+ s->filter.level = vp89_rac_get_uint(c, 6);
+ s->filter.sharpness = vp89_rac_get_uint(c, 3);
/* I. DCT coefficient probability update; 13.3 Token Probability Updates */
vp78_update_probability_tables(s);
@@ -691,17 +723,23 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
/* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
if (!s->keyframe) {
- s->prob->intra = vp8_rac_get_uint(c, 8);
- s->prob->last = vp8_rac_get_uint(c, 8);
+ s->prob->intra = vp89_rac_get_uint(c, 8);
+ s->prob->last = vp89_rac_get_uint(c, 8);
vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP7_MVC_SIZE);
}
+ if (vpx_rac_is_end(c))
+ return AVERROR_INVALIDDATA;
+
+ if ((ret = vp7_fade_frame(s, alpha, beta)) < 0)
+ return ret;
+
return 0;
}
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int header_size, hscale, vscale, ret;
int width = s->avctx->width;
int height = s->avctx->height;
@@ -751,7 +789,7 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
if (hscale || vscale)
avpriv_request_sample(s->avctx, "Upscaling");
- s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
+ s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
vp78_reset_probability_tables(s);
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
sizeof(s->prob->pred16x16));
@@ -763,30 +801,30 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
memset(&s->lf_delta, 0, sizeof(s->lf_delta));
}
- ret = ff_vp56_init_range_decoder(c, buf, header_size);
+ ret = ff_vpx_init_range_decoder(c, buf, header_size);
if (ret < 0)
return ret;
buf += header_size;
buf_size -= header_size;
if (s->keyframe) {
- s->colorspace = vp8_rac_get(c);
+ s->colorspace = vp89_rac_get(c);
if (s->colorspace)
av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
- s->fullrange = vp8_rac_get(c);
+ s->fullrange = vp89_rac_get(c);
}
- if ((s->segmentation.enabled = vp8_rac_get(c)))
+ if ((s->segmentation.enabled = vp89_rac_get(c)))
parse_segment_info(s);
else
s->segmentation.update_map = 0; // FIXME: move this to some init function?
- s->filter.simple = vp8_rac_get(c);
- s->filter.level = vp8_rac_get_uint(c, 6);
- s->filter.sharpness = vp8_rac_get_uint(c, 3);
+ s->filter.simple = vp89_rac_get(c);
+ s->filter.level = vp89_rac_get_uint(c, 6);
+ s->filter.sharpness = vp89_rac_get_uint(c, 3);
- if ((s->lf_delta.enabled = vp8_rac_get(c))) {
- s->lf_delta.update = vp8_rac_get(c);
+ if ((s->lf_delta.enabled = vp89_rac_get(c))) {
+ s->lf_delta.update = vp89_rac_get(c);
if (s->lf_delta.update)
update_lf_deltas(s);
}
@@ -806,31 +844,31 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
if (!s->keyframe) {
update_refs(s);
- s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
- s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
+ s->sign_bias[VP8_FRAME_GOLDEN] = vp89_rac_get(c);
+ s->sign_bias[VP8_FRAME_ALTREF] = vp89_rac_get(c);
}
// if we aren't saving this frame's probabilities for future frames,
// make a copy of the current probabilities
- if (!(s->update_probabilities = vp8_rac_get(c)))
+ if (!(s->update_probabilities = vp89_rac_get(c)))
s->prob[1] = s->prob[0];
- s->update_last = s->keyframe || vp8_rac_get(c);
+ s->update_last = s->keyframe || vp89_rac_get(c);
vp78_update_probability_tables(s);
- if ((s->mbskip_enabled = vp8_rac_get(c)))
- s->prob->mbskip = vp8_rac_get_uint(c, 8);
+ if ((s->mbskip_enabled = vp89_rac_get(c)))
+ s->prob->mbskip = vp89_rac_get_uint(c, 8);
if (!s->keyframe) {
- s->prob->intra = vp8_rac_get_uint(c, 8);
- s->prob->last = vp8_rac_get_uint(c, 8);
- s->prob->golden = vp8_rac_get_uint(c, 8);
+ s->prob->intra = vp89_rac_get_uint(c, 8);
+ s->prob->last = vp89_rac_get_uint(c, 8);
+ s->prob->golden = vp89_rac_get_uint(c, 8);
vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP8_MVC_SIZE);
}
// Record the entropy coder state here so that hwaccels can use it.
- s->c.code_word = vp56_rac_renorm(&s->c);
+ s->c.code_word = vpx_rac_renorm(&s->c);
s->coder_state_at_header_end.input = s->c.buffer - (-s->c.bits / 8);
s->coder_state_at_header_end.range = s->c.high;
s->coder_state_at_header_end.value = s->c.code_word >> 16;
@@ -840,7 +878,7 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
}
static av_always_inline
-void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
+void clamp_mv(const VP8mvbounds *s, VP8mv *dst, const VP8mv *src)
{
dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
@@ -851,40 +889,40 @@ void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
/**
* Motion vector coding, 17.1.
*/
-static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
+static av_always_inline int read_mv_component(VPXRangeCoder *c, const uint8_t *p, int vp7)
{
int bit, x = 0;
- if (vp56_rac_get_prob_branchy(c, p[0])) {
+ if (vpx_rac_get_prob_branchy(c, p[0])) {
int i;
for (i = 0; i < 3; i++)
- x += vp56_rac_get_prob(c, p[9 + i]) << i;
+ x += vpx_rac_get_prob(c, p[9 + i]) << i;
for (i = (vp7 ? 7 : 9); i > 3; i--)
- x += vp56_rac_get_prob(c, p[9 + i]) << i;
- if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
+ x += vpx_rac_get_prob(c, p[9 + i]) << i;
+ if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vpx_rac_get_prob(c, p[12]))
x += 8;
} else {
// small_mvtree
const uint8_t *ps = p + 2;
- bit = vp56_rac_get_prob(c, *ps);
+ bit = vpx_rac_get_prob(c, *ps);
ps += 1 + 3 * bit;
x += 4 * bit;
- bit = vp56_rac_get_prob(c, *ps);
+ bit = vpx_rac_get_prob(c, *ps);
ps += 1 + bit;
x += 2 * bit;
- x += vp56_rac_get_prob(c, *ps);
+ x += vpx_rac_get_prob(c, *ps);
}
- return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
+ return (x && vpx_rac_get_prob(c, p[1])) ? -x : x;
}
-static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
+static int vp7_read_mv_component(VPXRangeCoder *c, const uint8_t *p)
{
return read_mv_component(c, p, 1);
}
-static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
+static int vp8_read_mv_component(VPXRangeCoder *c, const uint8_t *p)
{
return read_mv_component(c, p, 0);
}
@@ -907,18 +945,18 @@ const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
* @returns the number of motion vectors parsed (2, 4 or 16)
*/
static av_always_inline
-int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
+int decode_splitmvs(const VP8Context *s, VPXRangeCoder *c, VP8Macroblock *mb,
int layout, int is_vp7)
{
int part_idx;
int n, num;
- VP8Macroblock *top_mb;
- VP8Macroblock *left_mb = &mb[-1];
+ const VP8Macroblock *top_mb;
+ const VP8Macroblock *left_mb = &mb[-1];
const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
- VP56mv *top_mv;
- VP56mv *left_mv = left_mb->bmv;
- VP56mv *cur_mv = mb->bmv;
+ const VP8mv *top_mv;
+ const VP8mv *left_mv = left_mb->bmv;
+ const VP8mv *cur_mv = mb->bmv;
if (!layout) // layout is inlined, s->mb_layout is not
top_mb = &mb[2];
@@ -927,9 +965,9 @@ int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
mbsplits_top = vp8_mbsplits[top_mb->partitioning];
top_mv = top_mb->bmv;
- if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
- if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1]))
- part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
+ if (vpx_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mbsplit_prob[1]))
+ part_idx = VP8_SPLITMVMODE_16x8 + vpx_rac_get_prob(c, vp8_mbsplit_prob[2]);
else
part_idx = VP8_SPLITMVMODE_8x8;
} else {
@@ -957,9 +995,9 @@ int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
submv_prob = get_submv_prob(left, above, is_vp7);
- if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
- if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
- if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
+ if (vpx_rac_get_prob_branchy(c, submv_prob[0])) {
+ if (vpx_rac_get_prob_branchy(c, submv_prob[1])) {
+ if (vpx_rac_get_prob_branchy(c, submv_prob[2])) {
mb->bmv[n].y = mb->mv.y +
read_mv_component(c, s->prob->mvc[0], is_vp7);
mb->bmv[n].x = mb->mv.x +
@@ -1002,7 +1040,7 @@ static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
return 1;
}
-static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
+static const VP8mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
{
return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
}
@@ -1011,13 +1049,12 @@ static av_always_inline
void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
int mb_x, int mb_y, int layout)
{
- VP8Macroblock *mb_edge[12];
enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
int idx = CNT_ZERO;
- VP56mv near_mv[3];
+ VP8mv near_mv[3];
uint8_t cnt[3] = { 0 };
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
int i;
AV_ZERO32(&near_mv[0]);
@@ -1030,11 +1067,11 @@ void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
pred->yoffset, !s->profile, &edge_x, &edge_y)) {
- VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
- ? s->macroblocks_base + 1 + edge_x +
- (s->mb_width + 1) * (edge_y + 1)
- : s->macroblocks + edge_x +
- (s->mb_height - edge_y - 1) * 2;
+ const VP8Macroblock *edge = (s->mb_layout == 1)
+ ? s->macroblocks_base + 1 + edge_x +
+ (s->mb_width + 1) * (edge_y + 1)
+ : s->macroblocks + edge_x +
+ (s->mb_height - edge_y - 1) * 2;
uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
if (mv) {
if (AV_RN32A(&near_mv[CNT_NEAREST])) {
@@ -1063,19 +1100,19 @@ void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
mb->partitioning = VP8_SPLITMVMODE_NONE;
- if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
mb->mode = VP8_MVMODE_MV;
- if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
- if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
else
AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
- if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
+ if (vpx_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
mb->mode = VP8_MVMODE_SPLIT;
mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
} else {
@@ -1099,7 +1136,7 @@ void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
}
static av_always_inline
-void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
+void vp8_decode_mvs(VP8Context *s, const VP8mvbounds *mv_bounds, VP8Macroblock *mb,
int mb_x, int mb_y, int layout)
{
VP8Macroblock *mb_edge[3] = { 0 /* top */,
@@ -1109,10 +1146,10 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
int idx = CNT_ZERO;
int cur_sign_bias = s->sign_bias[mb->ref_frame];
- int8_t *sign_bias = s->sign_bias;
- VP56mv near_mv[4];
+ const int8_t *sign_bias = s->sign_bias;
+ VP8mv near_mv[4];
uint8_t cnt[4] = { 0 };
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
if (!layout) { // layout is inlined (s->mb_layout is not)
mb_edge[0] = mb + 2;
@@ -1129,9 +1166,9 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
/* Process MB on top, left and top-left */
#define MV_EDGE_CHECK(n) \
{ \
- VP8Macroblock *edge = mb_edge[n]; \
+ const VP8Macroblock *edge = mb_edge[n]; \
int edge_ref = edge->ref_frame; \
- if (edge_ref != VP56_FRAME_CURRENT) { \
+ if (edge_ref != VP8_FRAME_CURRENT) { \
uint32_t mv = AV_RN32A(&edge->mv); \
if (mv) { \
if (cur_sign_bias != sign_bias[edge_ref]) { \
@@ -1153,7 +1190,7 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
MV_EDGE_CHECK(2)
mb->partitioning = VP8_SPLITMVMODE_NONE;
- if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
mb->mode = VP8_MVMODE_MV;
/* If we have three distinct MVs, merge first and last if they're the same */
@@ -1164,18 +1201,18 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
/* Swap near and nearest if necessary */
if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
- FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
+ FFSWAP(VP8mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
}
- if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
- if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
/* Choose the best mv out of 0,0 and the nearest mv */
clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
(mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
(mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
- if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
+ if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
mb->mode = VP8_MVMODE_SPLIT;
mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
} else {
@@ -1199,7 +1236,7 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
}
static av_always_inline
-void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
+void decode_intra4x4_modes(VP8Context *s, VPXRangeCoder *c, VP8Macroblock *mb,
int mb_x, int keyframe, int layout)
{
uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
@@ -1220,7 +1257,7 @@ void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
for (x = 0; x < 4; x++) {
const uint8_t *ctx;
ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
- *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
+ *intra4x4 = vp89_rac_get_tree(c, vp8_pred4x4_tree, ctx);
left[y] = top[x] = *intra4x4;
intra4x4++;
}
@@ -1228,17 +1265,17 @@ void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
} else {
int i;
for (i = 0; i < 16; i++)
- intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
- vp8_pred4x4_prob_inter);
+ intra4x4[i] = vp89_rac_get_tree(c, vp8_pred4x4_tree,
+ vp8_pred4x4_prob_inter);
}
}
static av_always_inline
-void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
+void decode_mb_mode(VP8Context *s, const VP8mvbounds *mv_bounds,
VP8Macroblock *mb, int mb_x, int mb_y,
- uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
+ uint8_t *segment, const uint8_t *ref, int layout, int is_vp7)
{
- VP56RangeCoder *c = &s->c;
+ VPXRangeCoder *c = &s->c;
static const char * const vp7_feature_name[] = { "q-index",
"lf-delta",
"partial-golden-update",
@@ -1248,9 +1285,9 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
*segment = 0;
for (i = 0; i < 4; i++) {
if (s->feature_enabled[i]) {
- if (vp56_rac_get_prob_branchy(c, s->feature_present_prob[i])) {
- int index = vp8_rac_get_tree(c, vp7_feature_index_tree,
- s->feature_index_prob[i]);
+ if (vpx_rac_get_prob_branchy(c, s->feature_present_prob[i])) {
+ int index = vp89_rac_get_tree(c, vp7_feature_index_tree,
+ s->feature_index_prob[i]);
av_log(s->avctx, AV_LOG_WARNING,
"Feature %s present in macroblock (value 0x%x)\n",
vp7_feature_name[i], s->feature_value[i][index]);
@@ -1258,17 +1295,17 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
}
}
} else if (s->segmentation.update_map) {
- int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
- *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
+ int bit = vpx_rac_get_prob(c, s->prob->segmentid[0]);
+ *segment = vpx_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
} else if (s->segmentation.enabled)
*segment = ref ? *ref : *segment;
mb->segment = *segment;
- mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
+ mb->skip = s->mbskip_enabled ? vpx_rac_get_prob(c, s->prob->mbskip) : 0;
if (s->keyframe) {
- mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra,
- vp8_pred16x16_prob_intra);
+ mb->mode = vp89_rac_get_tree(c, vp8_pred16x16_tree_intra,
+ vp8_pred16x16_prob_intra);
if (mb->mode == MODE_I4x4) {
decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
@@ -1282,17 +1319,17 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
AV_WN32A(s->intra4x4_pred_mode_left, modes);
}
- mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree,
- vp8_pred8x8c_prob_intra);
- mb->ref_frame = VP56_FRAME_CURRENT;
- } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
+ mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
+ vp8_pred8x8c_prob_intra);
+ mb->ref_frame = VP8_FRAME_CURRENT;
+ } else if (vpx_rac_get_prob_branchy(c, s->prob->intra)) {
// inter MB, 16.2
- if (vp56_rac_get_prob_branchy(c, s->prob->last))
+ if (vpx_rac_get_prob_branchy(c, s->prob->last))
mb->ref_frame =
- (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
- : VP56_FRAME_GOLDEN;
+ (!is_vp7 && vpx_rac_get_prob(c, s->prob->golden)) ? VP8_FRAME_ALTREF
+ : VP8_FRAME_GOLDEN;
else
- mb->ref_frame = VP56_FRAME_PREVIOUS;
+ mb->ref_frame = VP8_FRAME_PREVIOUS;
s->ref_count[mb->ref_frame - 1]++;
// motion vectors, 16.3
@@ -1302,14 +1339,15 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
} else {
// intra MB, 16.1
- mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
+ mb->mode = vp89_rac_get_tree(c, vp8_pred16x16_tree_inter,
+ s->prob->pred16x16);
if (mb->mode == MODE_I4x4)
decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
- mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree,
- s->prob->pred8x8c);
- mb->ref_frame = VP56_FRAME_CURRENT;
+ mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
+ s->prob->pred8x8c);
+ mb->ref_frame = VP8_FRAME_CURRENT;
mb->partitioning = VP8_SPLITMVMODE_NONE;
AV_ZERO32(&mb->bmv[0]);
}
@@ -1326,21 +1364,21 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
* otherwise, the index of the last coeff decoded plus one
*/
static av_always_inline
-int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16],
+int decode_block_coeffs_internal(VPXRangeCoder *r, int16_t block[16],
uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
- int i, uint8_t *token_prob, int16_t qmul[2],
+ int i, const uint8_t *token_prob, const int16_t qmul[2],
const uint8_t scan[16], int vp7)
{
- VP56RangeCoder c = *r;
+ VPXRangeCoder c = *r;
goto skip_eob;
do {
int coeff;
restart:
- if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
break;
skip_eob:
- if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
if (++i == 16)
break; // invalid input; blocks should end with EOB
token_prob = probs[i][0];
@@ -1349,28 +1387,28 @@ skip_eob:
goto skip_eob;
}
- if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
coeff = 1;
token_prob = probs[i + 1][1];
} else {
- if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
- coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
+ coeff = vpx_rac_get_prob_branchy(&c, token_prob[4]);
if (coeff)
- coeff += vp56_rac_get_prob(&c, token_prob[5]);
+ coeff += vpx_rac_get_prob(&c, token_prob[5]);
coeff += 2;
} else {
// DCT_CAT*
- if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
- if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
- coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[6])) {
+ if (!vpx_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
+ coeff = 5 + vpx_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
} else { // DCT_CAT2
coeff = 7;
- coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
- coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
+ coeff += vpx_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
+ coeff += vpx_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
}
} else { // DCT_CAT3 and up
- int a = vp56_rac_get_prob(&c, token_prob[8]);
- int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
+ int a = vpx_rac_get_prob(&c, token_prob[8]);
+ int b = vpx_rac_get_prob(&c, token_prob[9 + a]);
int cat = (a << 1) + b;
coeff = 3 + (8 << cat);
coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
@@ -1378,7 +1416,7 @@ skip_eob:
}
token_prob = probs[i + 1][2];
}
- block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
+ block[scan[i]] = (vp89_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
} while (++i < 16);
*r = c;
@@ -1408,11 +1446,11 @@ int inter_predict_dc(int16_t block[16], int16_t pred[2])
return ret;
}
-static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r,
+static int vp7_decode_block_coeffs_internal(VPXRangeCoder *r,
int16_t block[16],
uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
- int i, uint8_t *token_prob,
- int16_t qmul[2],
+ int i, const uint8_t *token_prob,
+ const int16_t qmul[2],
const uint8_t scan[16])
{
return decode_block_coeffs_internal(r, block, probs, i,
@@ -1420,11 +1458,11 @@ static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r,
}
#ifndef vp8_decode_block_coeffs_internal
-static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r,
+static int vp8_decode_block_coeffs_internal(VPXRangeCoder *r,
int16_t block[16],
uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
- int i, uint8_t *token_prob,
- int16_t qmul[2])
+ int i, const uint8_t *token_prob,
+ const int16_t qmul[2])
{
return decode_block_coeffs_internal(r, block, probs, i,
token_prob, qmul, ff_zigzag_scan, IS_VP8);
@@ -1445,13 +1483,13 @@ static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r,
* otherwise, the index of the last coeff decoded plus one
*/
static av_always_inline
-int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16],
+int decode_block_coeffs(VPXRangeCoder *c, int16_t block[16],
uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
- int i, int zero_nhood, int16_t qmul[2],
+ int i, int zero_nhood, const int16_t qmul[2],
const uint8_t scan[16], int vp7)
{
- uint8_t *token_prob = probs[i][zero_nhood];
- if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
+ const uint8_t *token_prob = probs[i][zero_nhood];
+ if (!vpx_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
return 0;
return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
token_prob, qmul, scan)
@@ -1460,7 +1498,7 @@ int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16],
}
static av_always_inline
-void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c,
+void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VPXRangeCoder *c,
VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
int is_vp7)
{
@@ -1535,8 +1573,8 @@ void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c,
}
static av_always_inline
-void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
- uint8_t *src_cb, uint8_t *src_cr,
+void backup_mb_border(uint8_t *top_border, const uint8_t *src_y,
+ const uint8_t *src_cb, const uint8_t *src_cr,
ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
{
AV_COPY128(top_border, src_y + 15 * linesize);
@@ -1661,7 +1699,7 @@ int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
}
static av_always_inline
-void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
+void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
{
int x, y, mode, nnz;
@@ -1679,14 +1717,14 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
s->hpc.pred16x16[mode](dst[0], s->linesize);
} else {
uint8_t *ptr = dst[0];
- uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
+ const uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
const uint8_t lo = is_vp7 ? 128 : 127;
const uint8_t hi = is_vp7 ? 128 : 129;
- uint8_t tr_top[4] = { lo, lo, lo, lo };
+ const uint8_t tr_top[4] = { lo, lo, lo, lo };
// all blocks on the right edge of the macroblock use bottom edge
// the top macroblock for their topright edge
- uint8_t *tr_right = ptr - s->linesize + 16;
+ const uint8_t *tr_right = ptr - s->linesize + 16;
// if we're on the right edge of the frame, said edge is extended
// from the top macroblock
@@ -1699,7 +1737,7 @@ void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
AV_ZERO128(td->non_zero_count_cache);
for (y = 0; y < 4; y++) {
- uint8_t *topright = ptr + 4 - s->linesize;
+ const uint8_t *topright = ptr + 4 - s->linesize;
for (x = 0; x < 4; x++) {
int copy = 0;
ptrdiff_t linesize = s->linesize;
@@ -1800,12 +1838,12 @@ static const uint8_t subpel_idx[3][8] = {
*/
static av_always_inline
void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
- ThreadFrame *ref, const VP56mv *mv,
+ const ThreadFrame *ref, const VP8mv *mv,
int x_off, int y_off, int block_w, int block_h,
int width, int height, ptrdiff_t linesize,
vp8_mc_func mc_func[3][3])
{
- uint8_t *src = ref->f->data[0];
+ const uint8_t *src = ref->f->data[0];
if (AV_RN32A(mv)) {
ptrdiff_t src_linesize = linesize;
@@ -1858,12 +1896,12 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
*/
static av_always_inline
void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
- uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
+ uint8_t *dst2, const ThreadFrame *ref, const VP8mv *mv,
int x_off, int y_off, int block_w, int block_h,
int width, int height, ptrdiff_t linesize,
vp8_mc_func mc_func[3][3])
{
- uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
+ const uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
if (AV_RN32A(mv)) {
int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
@@ -1907,12 +1945,12 @@ void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
}
static av_always_inline
-void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
- ThreadFrame *ref_frame, int x_off, int y_off,
+void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ const ThreadFrame *ref_frame, int x_off, int y_off,
int bx_off, int by_off, int block_w, int block_h,
- int width, int height, VP56mv *mv)
+ int width, int height, const VP8mv *mv)
{
- VP56mv uvmv = *mv;
+ VP8mv uvmv = *mv;
/* Y */
vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
@@ -1945,8 +1983,8 @@ void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
/* Fetch pixels for estimated mv 4 macroblocks ahead.
* Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
static av_always_inline
-void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
- int mb_xy, int ref)
+void prefetch_motion(const VP8Context *s, const VP8Macroblock *mb,
+ int mb_x, int mb_y, int mb_xy, int ref)
{
/* Don't prefetch refs that haven't been used very often this frame. */
if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
@@ -1968,13 +2006,13 @@ void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
* Apply motion vectors to prediction buffer, chapter 18.
*/
static av_always_inline
-void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
+void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
VP8Macroblock *mb, int mb_x, int mb_y)
{
int x_off = mb_x << 4, y_off = mb_y << 4;
int width = 16 * s->mb_width, height = 16 * s->mb_height;
- ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
- VP56mv *bmv = mb->bmv;
+ const ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
+ const VP8mv *bmv = mb->bmv;
switch (mb->partitioning) {
case VP8_SPLITMVMODE_NONE:
@@ -1983,7 +2021,7 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
break;
case VP8_SPLITMVMODE_4x4: {
int x, y;
- VP56mv uvmv;
+ VP8mv uvmv;
/* Y */
for (y = 0; y < 4; y++) {
@@ -2052,7 +2090,8 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
}
static av_always_inline
-void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
+void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *const dst[3],
+ const VP8Macroblock *mb)
{
int x, y, ch;
@@ -2114,7 +2153,7 @@ chroma_idct_end:
}
static av_always_inline
-void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb,
+void filter_level_for_mb(const VP8Context *s, const VP8Macroblock *mb,
VP8FilterStrength *f, int is_vp7)
{
int interior_limit, filter_level;
@@ -2147,7 +2186,7 @@ void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb,
}
static av_always_inline
-void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f,
+void filter_mb(const VP8Context *s, uint8_t *const dst[3], const VP8FilterStrength *f,
int mb_x, int mb_y, int is_vp7)
{
int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
@@ -2234,7 +2273,7 @@ void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f,
}
static av_always_inline
-void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f,
+void filter_mb_simple(const VP8Context *s, uint8_t *dst, const VP8FilterStrength *f,
int mb_x, int mb_y)
{
int mbedge_lim, bedge_lim;
@@ -2269,7 +2308,7 @@ void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f,
#define MARGIN (16 << 2)
static av_always_inline
int vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
- VP8Frame *prev_frame, int is_vp7)
+ const VP8Frame *prev_frame, int is_vp7)
{
VP8Context *s = avctx->priv_data;
int mb_x, mb_y;
@@ -2287,7 +2326,7 @@ int vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
- if (vpX_rac_is_end(&s->c)) {
+ if (vpx_rac_is_end(&s->c)) {
return AVERROR_INVALIDDATA;
}
if (mb_y == 0)
@@ -2306,13 +2345,13 @@ int vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
}
static int vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
- VP8Frame *prev_frame)
+ const VP8Frame *prev_frame)
{
return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
}
static int vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
- VP8Frame *prev_frame)
+ const VP8Frame *prev_frame)
{
return vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
}
@@ -2363,8 +2402,10 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
int mb_x, mb_xy = mb_y * s->mb_width;
int num_jobs = s->num_jobs;
- VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
- VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
+ const VP8Frame *prev_frame = s->prev_frame;
+ VP8Frame *curframe = s->curframe;
+ VPXRangeCoder *coeff_c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
+
VP8Macroblock *mb;
uint8_t *dst[3] = {
curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
@@ -2372,7 +2413,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
};
- if (vpX_rac_is_end(c))
+ if (vpx_rac_is_end(&s->c))
return AVERROR_INVALIDDATA;
if (mb_y == 0)
@@ -2403,7 +2444,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
- if (vpX_rac_is_end(c))
+ if (vpx_rac_is_end(&s->c))
return AVERROR_INVALIDDATA;
// Wait for previous thread to read mb_x+2, and reach mb_y-1.
if (prev_td != td) {
@@ -2428,17 +2469,20 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
prev_frame && prev_frame->seg_map ?
prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
- prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_PREVIOUS);
- if (!mb->skip)
- decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
+ if (!mb->skip) {
+ if (vpx_rac_is_end(coeff_c))
+ return AVERROR_INVALIDDATA;
+ decode_mb_coeffs(s, td, coeff_c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
+ }
if (mb->mode <= MODE_I4x4)
intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
else
inter_predict(s, td, dst, mb, mb_x, mb_y);
- prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_GOLDEN);
if (!mb->skip) {
idct_mb(s, td, dst, mb);
@@ -2466,7 +2510,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
dst[1], dst[2], s->linesize, s->uvlinesize, 0);
}
- prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
+ prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_ALTREF);
dst[0] += 16;
dst[1] += 8;
@@ -2525,7 +2569,7 @@ static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
next_td = &s->thread_data[(jobnr + 1) % num_jobs];
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
- VP8FilterStrength *f = &td->filter_strength[mb_x];
+ const VP8FilterStrength *f = &td->filter_strength[mb_x];
if (prev_td != td)
check_thread_pos(td, prev_td,
(mb_x + 1) + (s->mb_width + 3), mb_y - 1);
@@ -2570,7 +2614,7 @@ static av_always_inline
int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
int threadnr, int is_vp7)
{
- VP8Context *s = avctx->priv_data;
+ const VP8Context *s = avctx->priv_data;
VP8ThreadData *td = &s->thread_data[jobnr];
VP8ThreadData *next_td = NULL, *prev_td = NULL;
VP8Frame *curframe = s->curframe;
@@ -2614,8 +2658,8 @@ static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
}
static av_always_inline
-int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
- AVPacket *avpkt, int is_vp7)
+int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
+ const AVPacket *avpkt, int is_vp7)
{
VP8Context *s = avctx->priv_data;
int ret, i, referenced, num_jobs;
@@ -2641,10 +2685,10 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
avctx->pix_fmt = s->pix_fmt;
}
- prev_frame = s->framep[VP56_FRAME_CURRENT];
+ prev_frame = s->framep[VP8_FRAME_CURRENT];
- referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
- s->update_altref == VP56_FRAME_CURRENT;
+ referenced = s->update_last || s->update_golden == VP8_FRAME_CURRENT ||
+ s->update_altref == VP8_FRAME_CURRENT;
skip_thresh = !referenced ? AVDISCARD_NONREF
: !s->keyframe ? AVDISCARD_NONKEY
@@ -2661,12 +2705,12 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
for (i = 0; i < 5; i++)
if (s->frames[i].tf.f->buf[0] &&
&s->frames[i] != prev_frame &&
- &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
- &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
+ &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+ &s->frames[i] != s->framep[VP8_FRAME_GOLDEN] &&
+ &s->frames[i] != s->framep[VP8_FRAME_ALTREF])
vp8_release_frame(s, &s->frames[i]);
- curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
+ curframe = s->framep[VP8_FRAME_CURRENT] = vp8_find_free_buffer(s);
if (!s->colorspace)
avctx->colorspace = AVCOL_SPC_BT470BG;
@@ -2679,9 +2723,9 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
* likely that the values we have on a random interframe are complete
* junk if we didn't start decode on a keyframe. So just don't display
* anything rather than junk. */
- if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
- !s->framep[VP56_FRAME_GOLDEN] ||
- !s->framep[VP56_FRAME_GOLDEN2])) {
+ if (!s->keyframe && (!s->framep[VP8_FRAME_PREVIOUS] ||
+ !s->framep[VP8_FRAME_GOLDEN] ||
+ !s->framep[VP8_FRAME_ALTREF])) {
av_log(avctx, AV_LOG_WARNING,
"Discarding interframe without a prior keyframe!\n");
ret = AVERROR_INVALIDDATA;
@@ -2695,24 +2739,25 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
goto err;
// check if golden and altref are swapped
- if (s->update_altref != VP56_FRAME_NONE)
- s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
+ if (s->update_altref != VP8_FRAME_NONE)
+ s->next_framep[VP8_FRAME_ALTREF] = s->framep[s->update_altref];
else
- s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
+ s->next_framep[VP8_FRAME_ALTREF] = s->framep[VP8_FRAME_ALTREF];
- if (s->update_golden != VP56_FRAME_NONE)
- s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
+ if (s->update_golden != VP8_FRAME_NONE)
+ s->next_framep[VP8_FRAME_GOLDEN] = s->framep[s->update_golden];
else
- s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
+ s->next_framep[VP8_FRAME_GOLDEN] = s->framep[VP8_FRAME_GOLDEN];
if (s->update_last)
- s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
+ s->next_framep[VP8_FRAME_PREVIOUS] = curframe;
else
- s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
+ s->next_framep[VP8_FRAME_PREVIOUS] = s->framep[VP8_FRAME_PREVIOUS];
- s->next_framep[VP56_FRAME_CURRENT] = curframe;
+ s->next_framep[VP8_FRAME_CURRENT] = curframe;
- ff_thread_finish_setup(avctx);
+ if (ffcodec(avctx->codec)->update_thread_context)
+ ff_thread_finish_setup(avctx);
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
@@ -2788,7 +2833,7 @@ skip_decode:
s->prob[0] = s->prob[1];
if (!s->invisible) {
- if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
+ if ((ret = av_frame_ref(rframe, curframe->tf.f)) < 0)
return ret;
*got_frame = 1;
}
@@ -2799,17 +2844,17 @@ err:
return ret;
}
-int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
- AVPacket *avpkt)
+int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt)
{
- return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
+ return vp78_decode_frame(avctx, frame, got_frame, avpkt, IS_VP8);
}
#if CONFIG_VP7_DECODER
-static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
- AVPacket *avpkt)
+static int vp7_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt)
{
- return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
+ return vp78_decode_frame(avctx, frame, got_frame, avpkt, IS_VP7);
}
#endif /* CONFIG_VP7_DECODER */
@@ -2818,9 +2863,6 @@ av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
VP8Context *s = avctx->priv_data;
int i;
- if (!s)
- return 0;
-
vp8_decode_flush_impl(avctx, 1);
for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
av_frame_free(&s->frames[i].tf.f);
@@ -2849,7 +2891,6 @@ int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
s->pix_fmt = AV_PIX_FMT_NONE;
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx->internal->allocate_progress = 1;
ff_videodsp_init(&s->vdsp, 8);
@@ -2891,21 +2932,6 @@ av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
#if CONFIG_VP8_DECODER
#if HAVE_THREADS
-static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
-{
- VP8Context *s = avctx->priv_data;
- int ret;
-
- s->avctx = avctx;
-
- if ((ret = vp8_init_frames(s)) < 0) {
- ff_vp8_decode_free(avctx);
- return ret;
- }
-
- return 0;
-}
-
#define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
static int vp8_decode_update_thread_context(AVCodecContext *dst,
@@ -2946,36 +2972,36 @@ static int vp8_decode_update_thread_context(AVCodecContext *dst,
#endif /* CONFIG_VP8_DECODER */
#if CONFIG_VP7_DECODER
-AVCodec ff_vp7_decoder = {
- .name = "vp7",
- .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_VP7,
+const FFCodec ff_vp7_decoder = {
+ .p.name = "vp7",
+ CODEC_LONG_NAME("On2 VP7"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP7,
.priv_data_size = sizeof(VP8Context),
.init = vp7_decode_init,
.close = ff_vp8_decode_free,
- .decode = vp7_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ FF_CODEC_DECODE_CB(vp7_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1,
.flush = vp8_decode_flush,
};
#endif /* CONFIG_VP7_DECODER */
#if CONFIG_VP8_DECODER
-AVCodec ff_vp8_decoder = {
- .name = "vp8",
- .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_VP8,
+const FFCodec ff_vp8_decoder = {
+ .p.name = "vp8",
+ CODEC_LONG_NAME("On2 VP8"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP8,
.priv_data_size = sizeof(VP8Context),
.init = ff_vp8_decode_init,
.close = ff_vp8_decode_free,
- .decode = ff_vp8_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
+ FF_CODEC_DECODE_CB(ff_vp8_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
+ .caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS,
.flush = vp8_decode_flush,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
- .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
- .hw_configs = (const AVCodecHWConfigInternal*[]) {
+ UPDATE_THREAD_CONTEXT(vp8_decode_update_thread_context),
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
#if CONFIG_VP8_VAAPI_HWACCEL
HWACCEL_VAAPI(vp8),
#endif
diff --git a/media/ffvpx/libavcodec/vp8.h b/media/ffvpx/libavcodec/vp8.h
index 70d21e3c60..6f29156b53 100644
--- a/media/ffvpx/libavcodec/vp8.h
+++ b/media/ffvpx/libavcodec/vp8.h
@@ -29,15 +29,25 @@
#include <stdatomic.h>
#include "libavutil/buffer.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/thread.h"
#include "h264pred.h"
-#include "thread.h"
-#include "vp56.h"
+#include "threadframe.h"
+#include "videodsp.h"
#include "vp8dsp.h"
+#include "vpx_rac.h"
#define VP8_MAX_QUANT 127
+typedef enum {
+ VP8_FRAME_NONE = -1,
+ VP8_FRAME_CURRENT = 0,
+ VP8_FRAME_PREVIOUS = 1,
+ VP8_FRAME_GOLDEN = 2,
+ VP8_FRAME_ALTREF = 3,
+} VP8FrameType;
+
enum dct_token {
DCT_0,
DCT_1,
@@ -72,6 +82,11 @@ enum inter_splitmvmode {
VP8_SPLITMVMODE_NONE, ///< (only used in prediction) no split MVs
};
+typedef struct VP8mv {
+ DECLARE_ALIGNED(4, int16_t, x);
+ int16_t y;
+} VP8mv;
+
typedef struct VP8FilterStrength {
uint8_t filter_level;
uint8_t inner_limit;
@@ -89,8 +104,8 @@ typedef struct VP8Macroblock {
uint8_t segment;
uint8_t intra4x4_pred_mode_mb[16];
DECLARE_ALIGNED(4, uint8_t, intra4x4_pred_mode_top)[4];
- VP56mv mv;
- VP56mv bmv[16];
+ VP8mv mv;
+ VP8mv bmv[16];
} VP8Macroblock;
typedef struct VP8intmv {
@@ -233,10 +248,10 @@ typedef struct VP8Context {
/**
* filter strength adjustment for macroblocks that reference:
- * [0] - intra / VP56_FRAME_CURRENT
- * [1] - VP56_FRAME_PREVIOUS
- * [2] - VP56_FRAME_GOLDEN
- * [3] - altref / VP56_FRAME_GOLDEN2
+ * [0] - intra / VP8_FRAME_CURRENT
+ * [1] - VP8_FRAME_PREVIOUS
+ * [2] - VP8_FRAME_GOLDEN
+ * [3] - altref / VP8_FRAME_ALTREF
*/
int8_t ref[4];
} lf_delta;
@@ -244,7 +259,7 @@ typedef struct VP8Context {
uint8_t (*top_border)[16 + 8 + 8];
uint8_t (*top_nnz)[9];
- VP56RangeCoder c; ///< header context, includes mb modes and motion vectors
+ VPXRangeCoder c; ///< header context, includes mb modes and motion vectors
/* This contains the entropy coder state at the end of the header
* block, in the form specified by the standard. For use by
@@ -281,8 +296,8 @@ typedef struct VP8Context {
VP8Macroblock *macroblocks_base;
int invisible;
- int update_last; ///< update VP56_FRAME_PREVIOUS with the current one
- int update_golden; ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
+ int update_last; ///< update VP8_FRAME_PREVIOUS with the current one
+ int update_golden; ///< VP8_FRAME_NONE if not updated, or which frame to copy if so
int update_altref;
/**
@@ -296,7 +311,7 @@ typedef struct VP8Context {
* There can be 1, 2, 4, or 8 of these after the header context.
*/
int num_coeff_partitions;
- VP56RangeCoder coeff_partition[8];
+ VPXRangeCoder coeff_partition[8];
int coeff_partition_size[8];
VideoDSPContext vdsp;
VP8DSPContext vp8dsp;
@@ -321,14 +336,9 @@ typedef struct VP8Context {
int vp7;
/**
- * Fade bit present in bitstream (VP7)
- */
- int fade_present;
-
- /**
* Interframe DC prediction (VP7)
- * [0] VP56_FRAME_PREVIOUS
- * [1] VP56_FRAME_GOLDEN
+ * [0] VP8_FRAME_PREVIOUS
+ * [1] VP8_FRAME_GOLDEN
*/
uint16_t inter_dc_pred[2][2];
@@ -343,8 +353,8 @@ typedef struct VP8Context {
int ff_vp8_decode_init(AVCodecContext *avctx);
-int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
- AVPacket *avpkt);
+int ff_vp8_decode_frame(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, AVPacket *avpkt);
int ff_vp8_decode_free(AVCodecContext *avctx);
diff --git a/media/ffvpx/libavcodec/vp89_rac.h b/media/ffvpx/libavcodec/vp89_rac.h
new file mode 100644
index 0000000000..bc0924c387
--- /dev/null
+++ b/media/ffvpx/libavcodec/vp89_rac.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Range decoder functions common to VP8 and VP9
+ */
+
+#ifndef AVCODEC_VP89_RAC_H
+#define AVCODEC_VP89_RAC_H
+
+#include <stdint.h>
+
+#include "libavutil/attributes.h"
+
+#include "vpx_rac.h"
+
+// rounding is different than vpx_rac_get, is vpx_rac_get wrong?
+static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
+{
+ return vpx_rac_get_prob(c, 128);
+}
+
+static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
+{
+ int value = 0;
+
+ while (bits--) {
+ value = (value << 1) | vp89_rac_get(c);
+ }
+
+ return value;
+}
+
+// how probabilities are associated with decisions is different I think
+// well, the new scheme fits in the old but this way has one fewer branches per decision
+static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t (*tree)[2],
+ const uint8_t *probs)
+{
+ int i = 0;
+
+ do {
+ i = tree[i][vpx_rac_get_prob(c, probs[i])];
+ } while (i > 0);
+
+ return -i;
+}
+
+#endif /* AVCODEC_VP89_RAC_H */
diff --git a/media/ffvpx/libavcodec/vp8_parser.c b/media/ffvpx/libavcodec/vp8_parser.c
index 7ce35e7535..98b752bfb9 100644
--- a/media/ffvpx/libavcodec/vp8_parser.c
+++ b/media/ffvpx/libavcodec/vp8_parser.c
@@ -73,7 +73,7 @@ static int parse(AVCodecParserContext *s,
return buf_size;
}
-AVCodecParser ff_vp8_parser = {
+const AVCodecParser ff_vp8_parser = {
.codec_ids = { AV_CODEC_ID_VP8 },
.parser_parse = parse,
};
diff --git a/media/ffvpx/libavcodec/vp8data.h b/media/ffvpx/libavcodec/vp8data.h
index 5e6dea7617..1fcce134eb 100644
--- a/media/ffvpx/libavcodec/vp8data.h
+++ b/media/ffvpx/libavcodec/vp8data.h
@@ -316,12 +316,6 @@ static const uint8_t vp8_pred4x4_prob_intra[10][10][9] = {
},
};
-static const int8_t vp8_segmentid_tree[][2] = {
- { 1, 2 },
- { -0, -1 }, // '00', '01'
- { -2, -3 }, // '10', '11'
-};
-
static const uint8_t vp8_coeff_band[16] = {
0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7
};
diff --git a/media/ffvpx/libavcodec/vp8dsp.c b/media/ffvpx/libavcodec/vp8dsp.c
index efaf3e71a7..4bffdea52f 100644
--- a/media/ffvpx/libavcodec/vp8dsp.c
+++ b/media/ffvpx/libavcodec/vp8dsp.c
@@ -25,6 +25,8 @@
* VP8 compatible video decoder
*/
+#include "config_components.h"
+
#include "libavutil/common.h"
#include "libavutil/intreadwrite.h"
@@ -466,7 +468,7 @@ static const uint8_t subpel_filters[7][6] = {
#define PUT_PIXELS(WIDTH) \
static void put_vp8_pixels ## WIDTH ## _c(uint8_t *dst, ptrdiff_t dststride, \
- uint8_t *src, ptrdiff_t srcstride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
int h, int x, int y) \
{ \
int i; \
@@ -490,7 +492,7 @@ PUT_PIXELS(4)
#define VP8_EPEL_H(SIZE, TAPS) \
static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
ptrdiff_t dststride, \
- uint8_t *src, \
+ const uint8_t *src, \
ptrdiff_t srcstride, \
int h, int mx, int my) \
{ \
@@ -508,7 +510,7 @@ static void put_vp8_epel ## SIZE ## _h ## TAPS ## _c(uint8_t *dst, \
#define VP8_EPEL_V(SIZE, TAPS) \
static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
ptrdiff_t dststride, \
- uint8_t *src, \
+ const uint8_t *src, \
ptrdiff_t srcstride, \
int h, int mx, int my) \
{ \
@@ -527,7 +529,7 @@ static void put_vp8_epel ## SIZE ## _v ## TAPS ## _c(uint8_t *dst, \
static void \
put_vp8_epel ## SIZE ## _h ## HTAPS ## v ## VTAPS ## _c(uint8_t *dst, \
ptrdiff_t dststride, \
- uint8_t *src, \
+ const uint8_t *src, \
ptrdiff_t srcstride, \
int h, int mx, \
int my) \
@@ -584,7 +586,7 @@ VP8_EPEL_HV(4, 6, 6)
#define VP8_BILINEAR(SIZE) \
static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
- uint8_t *src, ptrdiff_t sstride, \
+ const uint8_t *src, ptrdiff_t sstride, \
int h, int mx, int my) \
{ \
int a = 8 - mx, b = mx; \
@@ -598,7 +600,7 @@ static void put_vp8_bilinear ## SIZE ## _h_c(uint8_t *dst, ptrdiff_t dstride, \
} \
\
static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
- uint8_t *src, ptrdiff_t sstride, \
+ const uint8_t *src, ptrdiff_t sstride, \
int h, int mx, int my) \
{ \
int c = 8 - my, d = my; \
@@ -613,7 +615,7 @@ static void put_vp8_bilinear ## SIZE ## _v_c(uint8_t *dst, ptrdiff_t dstride, \
\
static void put_vp8_bilinear ## SIZE ## _hv_c(uint8_t *dst, \
ptrdiff_t dstride, \
- uint8_t *src, \
+ const uint8_t *src, \
ptrdiff_t sstride, \
int h, int mx, int my) \
{ \
@@ -673,15 +675,15 @@ av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
VP78_BILINEAR_MC_FUNC(1, 8);
VP78_BILINEAR_MC_FUNC(2, 4);
- #if ARCH_AARCH64 == 1
- ff_vp78dsp_init_aarch64(dsp);
- #elif ARCH_ARM == 1
- ff_vp78dsp_init_arm(dsp);
- #elif ARCH_PPC == 1
- ff_vp78dsp_init_ppc(dsp);
- #elif ARCH_X86 == 1
- ff_vp78dsp_init_x86(dsp);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_vp78dsp_init_aarch64(dsp);
+#elif ARCH_ARM == 1
+ ff_vp78dsp_init_arm(dsp);
+#elif ARCH_PPC == 1
+ ff_vp78dsp_init_ppc(dsp);
+#elif ARCH_X86 == 1
+ ff_vp78dsp_init_x86(dsp);
+#endif
}
#if CONFIG_VP7_DECODER
@@ -736,14 +738,16 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
dsp->vp8_v_loop_filter_simple = vp8_v_loop_filter_simple_c;
dsp->vp8_h_loop_filter_simple = vp8_h_loop_filter_simple_c;
- #if ARCH_AARCH64 == 1
- ff_vp8dsp_init_aarch64(dsp);
- #elif ARCH_ARM == 1
- ff_vp8dsp_init_arm(dsp);
- #elif ARCH_X86 == 1
- ff_vp8dsp_init_x86(dsp);
- #elif ARCH_MIPS == 1
- ff_vp8dsp_init_mips(dsp);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_vp8dsp_init_aarch64(dsp);
+#elif ARCH_ARM == 1
+ ff_vp8dsp_init_arm(dsp);
+#elif ARCH_X86 == 1
+ ff_vp8dsp_init_x86(dsp);
+#elif ARCH_MIPS == 1
+ ff_vp8dsp_init_mips(dsp);
+#elif ARCH_LOONGARCH == 1
+ ff_vp8dsp_init_loongarch(dsp);
+#endif
}
#endif /* CONFIG_VP8_DECODER */
diff --git a/media/ffvpx/libavcodec/vp8dsp.h b/media/ffvpx/libavcodec/vp8dsp.h
index cfe1524b0b..16b5e9c35b 100644
--- a/media/ffvpx/libavcodec/vp8dsp.h
+++ b/media/ffvpx/libavcodec/vp8dsp.h
@@ -31,7 +31,7 @@
#include <stdint.h>
typedef void (*vp8_mc_func)(uint8_t *dst /* align 8 */, ptrdiff_t dstStride,
- uint8_t *src /* align 1 */, ptrdiff_t srcStride,
+ const uint8_t *src /* align 1 */, ptrdiff_t srcStride,
int h, int x, int y);
typedef struct VP8DSPContext {
@@ -81,13 +81,6 @@ typedef struct VP8DSPContext {
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3];
} VP8DSPContext;
-void ff_put_vp8_pixels16_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
- int h, int x, int y);
-void ff_put_vp8_pixels8_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
- int h, int x, int y);
-void ff_put_vp8_pixels4_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride,
- int h, int x, int y);
-
void ff_vp7dsp_init(VP8DSPContext *c);
void ff_vp78dsp_init(VP8DSPContext *c);
@@ -101,6 +94,7 @@ void ff_vp8dsp_init_aarch64(VP8DSPContext *c);
void ff_vp8dsp_init_arm(VP8DSPContext *c);
void ff_vp8dsp_init_x86(VP8DSPContext *c);
void ff_vp8dsp_init_mips(VP8DSPContext *c);
+void ff_vp8dsp_init_loongarch(VP8DSPContext *c);
#define IS_VP7 1
#define IS_VP8 0
diff --git a/media/ffvpx/libavcodec/vp9.c b/media/ffvpx/libavcodec/vp9.c
index f16462b1e9..7c0a246446 100644
--- a/media/ffvpx/libavcodec/vp9.c
+++ b/media/ffvpx/libavcodec/vp9.c
@@ -21,32 +21,34 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config_components.h"
+
#include "avcodec.h"
+#include "codec_internal.h"
+#include "decode.h"
#include "get_bits.h"
-#include "hwaccel.h"
-#include "internal.h"
+#include "hwconfig.h"
#include "profiles.h"
#include "thread.h"
+#include "threadframe.h"
+#include "pthread_internal.h"
+
#include "videodsp.h"
-#include "vp56.h"
+#include "vp89_rac.h"
#include "vp9.h"
#include "vp9data.h"
#include "vp9dec.h"
+#include "vpx_rac.h"
#include "libavutil/avassert.h"
#include "libavutil/pixdesc.h"
+#include "libavutil/video_enc_params.h"
#define VP9_SYNCCODE 0x498342
#if HAVE_THREADS
-static void vp9_free_entries(AVCodecContext *avctx) {
- VP9Context *s = avctx->priv_data;
-
- if (avctx->active_thread_type & FF_THREAD_SLICE) {
- pthread_mutex_destroy(&s->progress_mutex);
- pthread_cond_destroy(&s->progress_cond);
- av_freep(&s->entries);
- }
-}
+DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
+ (offsetof(VP9Context, progress_mutex)),
+ (offsetof(VP9Context, progress_cond)));
static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
VP9Context *s = avctx->priv_data;
@@ -57,17 +59,11 @@ static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
av_freep(&s->entries);
s->entries = av_malloc_array(n, sizeof(atomic_int));
-
- if (!s->entries) {
- av_freep(&s->entries);
+ if (!s->entries)
return AVERROR(ENOMEM);
- }
for (i = 0; i < n; i++)
atomic_init(&s->entries[i], 0);
-
- pthread_mutex_init(&s->progress_mutex, NULL);
- pthread_cond_init(&s->progress_cond, NULL);
}
return 0;
}
@@ -89,13 +85,19 @@ static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
pthread_mutex_unlock(&s->progress_mutex);
}
#else
-static void vp9_free_entries(AVCodecContext *avctx) {}
static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
#endif
+static void vp9_tile_data_free(VP9TileData *td)
+{
+ av_freep(&td->b_base);
+ av_freep(&td->block_base);
+ av_freep(&td->block_structure);
+}
+
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
{
- ff_thread_release_buffer(avctx, &f->tf);
+ ff_thread_release_ext_buffer(avctx, &f->tf);
av_buffer_unref(&f->extradata);
av_buffer_unref(&f->hwaccel_priv_buf);
f->segmentation_map = NULL;
@@ -107,15 +109,25 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
VP9Context *s = avctx->priv_data;
int ret, sz;
- ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
+ ret = ff_thread_get_ext_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
if (ret < 0)
return ret;
sz = 64 * s->sb_cols * s->sb_rows;
- f->extradata = av_buffer_allocz(sz * (1 + sizeof(VP9mvrefPair)));
+ if (sz != s->frame_extradata_pool_size) {
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
+ s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
+ if (!s->frame_extradata_pool) {
+ s->frame_extradata_pool_size = 0;
+ goto fail;
+ }
+ s->frame_extradata_pool_size = sz;
+ }
+ f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
if (!f->extradata) {
goto fail;
}
+ memset(f->extradata->data, 0, f->extradata->size);
f->segmentation_map = f->extradata->data;
f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
@@ -173,7 +185,9 @@ static int update_size(AVCodecContext *avctx, int w, int h)
#define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
CONFIG_VP9_NVDEC_HWACCEL + \
- CONFIG_VP9_VAAPI_HWACCEL)
+ CONFIG_VP9_VAAPI_HWACCEL + \
+ CONFIG_VP9_VDPAU_HWACCEL + \
+ CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
VP9Context *s = avctx->priv_data;
uint8_t *p;
@@ -202,6 +216,12 @@ static int update_size(AVCodecContext *avctx, int w, int h)
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+#if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
+#endif
break;
case AV_PIX_FMT_YUV420P12:
#if CONFIG_VP9_NVDEC_HWACCEL
@@ -210,6 +230,16 @@ static int update_size(AVCodecContext *avctx, int w, int h)
#if CONFIG_VP9_VAAPI_HWACCEL
*fmtp++ = AV_PIX_FMT_VAAPI;
#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VDPAU;
+#endif
+ break;
+ case AV_PIX_FMT_YUV444P:
+ case AV_PIX_FMT_YUV444P10:
+ case AV_PIX_FMT_YUV444P12:
+#if CONFIG_VP9_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
break;
}
@@ -252,7 +282,7 @@ static int update_size(AVCodecContext *avctx, int w, int h)
assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
assign(s->above_y_nnz_ctx, uint8_t *, 16);
assign(s->above_mode_ctx, uint8_t *, 16);
- assign(s->above_mv_ctx, VP56mv(*)[2], 16);
+ assign(s->above_mv_ctx, VP9mv(*)[2], 16);
assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
assign(s->above_partition_ctx, uint8_t *, 8);
@@ -267,10 +297,8 @@ static int update_size(AVCodecContext *avctx, int w, int h)
#undef assign
if (s->td) {
- for (i = 0; i < s->active_tile_cols; i++) {
- av_freep(&s->td[i].b_base);
- av_freep(&s->td[i].block_base);
- }
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
}
if (s->s.h.bpp != s->last_bpp) {
@@ -292,8 +320,7 @@ static int update_block_buffers(AVCodecContext *avctx)
if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
return 0;
- av_free(td->b_base);
- av_free(td->block_base);
+ vp9_tile_data_free(td);
chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
if (s->s.frames[CUR_FRAME].uses_2pass) {
@@ -309,13 +336,16 @@ static int update_block_buffers(AVCodecContext *avctx)
td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
- } else {
- for (i = 1; i < s->active_tile_cols; i++) {
- if (s->td[i].b_base && s->td[i].block_base) {
- av_free(s->td[i].b_base);
- av_free(s->td[i].block_base);
- }
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!td->block_structure)
+ return AVERROR(ENOMEM);
}
+ } else {
+ for (i = 1; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+
for (i = 0; i < s->active_tile_cols; i++) {
s->td[i].b_base = av_malloc(sizeof(VP9Block));
s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
@@ -327,6 +357,12 @@ static int update_block_buffers(AVCodecContext *avctx)
s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
+
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
+ if (!s->td[i].block_structure)
+ return AVERROR(ENOMEM);
+ }
}
}
s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
@@ -351,7 +387,7 @@ static av_always_inline int inv_recenter_nonneg(int v, int m)
}
// differential forward probability updates
-static int update_prob(VP56RangeCoder *c, int p)
+static int update_prob(VPXRangeCoder *c, int p)
{
static const uint8_t inv_map_table[255] = {
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
@@ -391,16 +427,16 @@ static int update_prob(VP56RangeCoder *c, int p)
* updates vs. the 'fine, exact' updates further down the range, which
* adds one extra dimension to this differential update model. */
- if (!vp8_rac_get(c)) {
- d = vp8_rac_get_uint(c, 4) + 0;
- } else if (!vp8_rac_get(c)) {
- d = vp8_rac_get_uint(c, 4) + 16;
- } else if (!vp8_rac_get(c)) {
- d = vp8_rac_get_uint(c, 5) + 32;
+ if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 4) + 0;
+ } else if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 4) + 16;
+ } else if (!vp89_rac_get(c)) {
+ d = vp89_rac_get_uint(c, 5) + 32;
} else {
- d = vp8_rac_get_uint(c, 7);
+ d = vp89_rac_get_uint(c, 7);
if (d >= 65)
- d = (d << 1) - 65 + vp8_rac_get(c);
+ d = (d << 1) - 65 + vp89_rac_get(c);
d += 64;
av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
}
@@ -510,7 +546,7 @@ static int decode_frame_header(AVCodecContext *avctx,
s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
if (s->s.h.keyframe) {
- if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
return AVERROR_INVALIDDATA;
}
@@ -526,7 +562,7 @@ static int decode_frame_header(AVCodecContext *avctx,
s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
if (s->s.h.intraonly) {
- if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
+ if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
return AVERROR_INVALIDDATA;
}
@@ -756,18 +792,15 @@ static int decode_frame_header(AVCodecContext *avctx,
s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
int n_range_coders;
- VP56RangeCoder *rc;
+ VPXRangeCoder *rc;
if (s->td) {
- for (i = 0; i < s->active_tile_cols; i++) {
- av_free(s->td[i].b_base);
- av_free(s->td[i].block_base);
- }
- av_free(s->td);
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
+ av_freep(&s->td);
}
s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
- vp9_free_entries(avctx);
s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
s->s.h.tiling.tile_cols : 1;
vp9_alloc_entries(avctx, s->sb_rows);
@@ -776,11 +809,11 @@ static int decode_frame_header(AVCodecContext *avctx,
} else {
n_range_coders = s->s.h.tiling.tile_cols;
}
- s->td = av_mallocz_array(s->active_tile_cols, sizeof(VP9TileData) +
- n_range_coders * sizeof(VP56RangeCoder));
+ s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
+ n_range_coders * sizeof(VPXRangeCoder));
if (!s->td)
return AVERROR(ENOMEM);
- rc = (VP56RangeCoder *) &s->td[s->active_tile_cols];
+ rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
for (i = 0; i < s->active_tile_cols; i++) {
s->td[i].s = s;
s->td[i].c_b = rc;
@@ -790,6 +823,7 @@ static int decode_frame_header(AVCodecContext *avctx,
/* check reference frames */
if (!s->s.h.keyframe && !s->s.h.intraonly) {
+ int valid_ref_frame = 0;
for (i = 0; i < 3; i++) {
AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
int refw = ref->width, refh = ref->height;
@@ -803,17 +837,25 @@ static int decode_frame_header(AVCodecContext *avctx,
} else if (refw == w && refh == h) {
s->mvscale[i][0] = s->mvscale[i][1] = 0;
} else {
+ /* Check to make sure at least one of frames that */
+ /* this frame references has valid dimensions */
if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
- av_log(avctx, AV_LOG_ERROR,
+ av_log(avctx, AV_LOG_WARNING,
"Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
refw, refh, w, h);
- return AVERROR_INVALIDDATA;
+ s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
+ continue;
}
s->mvscale[i][0] = (refw << 14) / w;
s->mvscale[i][1] = (refh << 14) / h;
s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
}
+ valid_ref_frame++;
+ }
+ if (!valid_ref_frame) {
+ av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
+ return AVERROR_INVALIDDATA;
}
}
@@ -843,11 +885,11 @@ static int decode_frame_header(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
return AVERROR_INVALIDDATA;
}
- ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
+ ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
if (ret < 0)
return ret;
- if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
+ if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
return AVERROR_INVALIDDATA;
}
@@ -859,6 +901,7 @@ static int decode_frame_header(AVCodecContext *avctx,
} else {
memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
}
+ s->td[i].nb_block_structure = 0;
}
/* FIXME is it faster to not copy here, but do it down in the fw updates
@@ -870,22 +913,22 @@ static int decode_frame_header(AVCodecContext *avctx,
if (s->s.h.lossless) {
s->s.h.txfmmode = TX_4X4;
} else {
- s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
+ s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
if (s->s.h.txfmmode == 3)
- s->s.h.txfmmode += vp8_rac_get(&s->c);
+ s->s.h.txfmmode += vp89_rac_get(&s->c);
if (s->s.h.txfmmode == TX_SWITCHABLE) {
for (i = 0; i < 2; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.tx16p[i][j] =
update_prob(&s->c, s->prob.p.tx16p[i][j]);
for (i = 0; i < 2; i++)
for (j = 0; j < 3; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.tx32p[i][j] =
update_prob(&s->c, s->prob.p.tx32p[i][j]);
}
@@ -894,7 +937,7 @@ static int decode_frame_header(AVCodecContext *avctx,
// coef updates
for (i = 0; i < 4; i++) {
uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
- if (vp8_rac_get(&s->c)) {
+ if (vp89_rac_get(&s->c)) {
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
for (l = 0; l < 6; l++)
@@ -904,7 +947,7 @@ static int decode_frame_header(AVCodecContext *avctx,
if (m >= 3 && l == 0) // dc only has 3 pt
break;
for (n = 0; n < 3; n++) {
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
p[n] = update_prob(&s->c, r[n]);
else
p[n] = r[n];
@@ -930,33 +973,33 @@ static int decode_frame_header(AVCodecContext *avctx,
// mode updates
for (i = 0; i < 3; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
if (!s->s.h.keyframe && !s->s.h.intraonly) {
for (i = 0; i < 7; i++)
for (j = 0; j < 3; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_mode[i][j] =
update_prob(&s->c, s->prob.p.mv_mode[i][j]);
if (s->s.h.filtermode == FILTER_SWITCHABLE)
for (i = 0; i < 4; i++)
for (j = 0; j < 2; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.filter[i][j] =
update_prob(&s->c, s->prob.p.filter[i][j]);
for (i = 0; i < 4; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
if (s->s.h.allowcompinter) {
- s->s.h.comppredmode = vp8_rac_get(&s->c);
+ s->s.h.comppredmode = vp89_rac_get(&s->c);
if (s->s.h.comppredmode)
- s->s.h.comppredmode += vp8_rac_get(&s->c);
+ s->s.h.comppredmode += vp89_rac_get(&s->c);
if (s->s.h.comppredmode == PRED_SWITCHABLE)
for (i = 0; i < 5; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.comp[i] =
update_prob(&s->c, s->prob.p.comp[i]);
} else {
@@ -965,10 +1008,10 @@ static int decode_frame_header(AVCodecContext *avctx,
if (s->s.h.comppredmode != PRED_COMPREF) {
for (i = 0; i < 5; i++) {
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.single_ref[i][0] =
update_prob(&s->c, s->prob.p.single_ref[i][0]);
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.single_ref[i][1] =
update_prob(&s->c, s->prob.p.single_ref[i][1]);
}
@@ -976,72 +1019,72 @@ static int decode_frame_header(AVCodecContext *avctx,
if (s->s.h.comppredmode != PRED_SINGLEREF) {
for (i = 0; i < 5; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.comp_ref[i] =
update_prob(&s->c, s->prob.p.comp_ref[i]);
}
for (i = 0; i < 4; i++)
for (j = 0; j < 9; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.y_mode[i][j] =
update_prob(&s->c, s->prob.p.y_mode[i][j]);
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
for (k = 0; k < 3; k++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.partition[3 - i][j][k] =
update_prob(&s->c,
s->prob.p.partition[3 - i][j][k]);
// mv fields don't use the update_prob subexp model for some reason
for (i = 0; i < 3; i++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
- s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
+ s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
for (i = 0; i < 2; i++) {
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].sign =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].classes[j] =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].class0 =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].bits[j] =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++)
for (k = 0; k < 3; k++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].class0_fp[j][k] =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 3; j++)
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].fp[j] =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
}
if (s->s.h.highprecisionmvs) {
for (i = 0; i < 2; i++) {
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].class0_hp =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
- if (vp56_rac_get_prob_branchy(&s->c, 252))
+ if (vpx_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].hp =
- (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
+ (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
}
}
}
@@ -1064,11 +1107,11 @@ static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
int bytesperpixel = s->bytesperpixel;
if (bl == BL_8X8) {
- bp = vp8_rac_get_tree(td->c, ff_vp9_partition_tree, p);
+ bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
} else if (col + hbs < s->cols) { // FIXME why not <=?
if (row + hbs < s->rows) { // FIXME why not <=?
- bp = vp8_rac_get_tree(td->c, ff_vp9_partition_tree, p);
+ bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
switch (bp) {
case PARTITION_NONE:
ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
@@ -1100,7 +1143,7 @@ static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
default:
av_assert0(0);
}
- } else if (vp56_rac_get_prob_branchy(td->c, p[1])) {
+ } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
bp = PARTITION_SPLIT;
decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
decode_sb(td, row, col + hbs, lflvl,
@@ -1111,7 +1154,7 @@ static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
}
} else if (row + hbs < s->rows) { // FIXME why not <=?
- if (vp56_rac_get_prob_branchy(td->c, p[2])) {
+ if (vpx_rac_get_prob_branchy(td->c, p[2])) {
bp = PARTITION_SPLIT;
decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
yoff += hbs * 8 * y_stride;
@@ -1190,10 +1233,8 @@ static void free_buffers(VP9Context *s)
int i;
av_freep(&s->intra_pred_data[0]);
- for (i = 0; i < s->active_tile_cols; i++) {
- av_freep(&s->td[i].b_base);
- av_freep(&s->td[i].block_base);
- }
+ for (i = 0; i < s->active_tile_cols; i++)
+ vp9_tile_data_free(&s->td[i]);
}
static av_cold int vp9_decode_free(AVCodecContext *avctx)
@@ -1202,21 +1243,22 @@ static av_cold int vp9_decode_free(AVCodecContext *avctx)
int i;
for (i = 0; i < 3; i++) {
- if (s->s.frames[i].tf.f->buf[0])
- vp9_frame_unref(avctx, &s->s.frames[i]);
+ vp9_frame_unref(avctx, &s->s.frames[i]);
av_frame_free(&s->s.frames[i].tf.f);
}
+ av_buffer_pool_uninit(&s->frame_extradata_pool);
for (i = 0; i < 8; i++) {
- if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
av_frame_free(&s->s.refs[i].f);
- if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
av_frame_free(&s->next_refs[i].f);
}
free_buffers(s);
- vp9_free_entries(avctx);
+#if HAVE_THREADS
+ av_freep(&s->entries);
+ ff_pthread_free(s, vp9_context_offsets);
+#endif
av_freep(&s->td);
return 0;
}
@@ -1253,17 +1295,13 @@ static int decode_tiles(AVCodecContext *avctx,
data += 4;
size -= 4;
}
- if (tile_size > size) {
- ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+ if (tile_size > size)
return AVERROR_INVALIDDATA;
- }
- ret = ff_vp56_init_range_decoder(&td->c_b[tile_col], data, tile_size);
+ ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
if (ret < 0)
return ret;
- if (vp56_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
- ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+ if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
return AVERROR_INVALIDDATA;
- }
data += tile_size;
size -= tile_size;
}
@@ -1306,7 +1344,7 @@ static int decode_tiles(AVCodecContext *avctx,
decode_sb_mem(td, row, col, lflvl_ptr,
yoff2, uvoff2, BL_64X64);
} else {
- if (vpX_rac_is_end(td->c)) {
+ if (vpx_rac_is_end(td->c)) {
return AVERROR_INVALIDDATA;
}
decode_sb(td, row, col, lflvl_ptr,
@@ -1464,7 +1502,59 @@ int loopfilter_proc(AVCodecContext *avctx)
}
#endif
-static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
+static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
+{
+ AVVideoEncParams *par;
+ unsigned int tile, nb_blocks = 0;
+
+ if (s->s.h.segmentation.enabled) {
+ for (tile = 0; tile < s->active_tile_cols; tile++)
+ nb_blocks += s->td[tile].nb_block_structure;
+ }
+
+ par = av_video_enc_params_create_side_data(frame->tf.f,
+ AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
+ if (!par)
+ return AVERROR(ENOMEM);
+
+ par->qp = s->s.h.yac_qi;
+ par->delta_qp[0][0] = s->s.h.ydc_qdelta;
+ par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
+ par->delta_qp[1][1] = s->s.h.uvac_qdelta;
+ par->delta_qp[2][1] = s->s.h.uvac_qdelta;
+
+ if (nb_blocks) {
+ unsigned int block = 0;
+ unsigned int tile, block_tile;
+
+ for (tile = 0; tile < s->active_tile_cols; tile++) {
+ VP9TileData *td = &s->td[tile];
+
+ for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
+ AVVideoBlockParams *b = av_video_enc_params_block(par, block++);
+ unsigned int row = td->block_structure[block_tile].row;
+ unsigned int col = td->block_structure[block_tile].col;
+ uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
+
+ b->src_x = col * 8;
+ b->src_y = row * 8;
+ b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
+ b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
+
+ if (s->s.h.segmentation.feat[seg_id].q_enabled) {
+ b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
+ if (s->s.h.segmentation.absolute_vals)
+ b->delta_qp -= par->qp;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, AVPacket *pkt)
{
const uint8_t *data = pkt->data;
@@ -1484,16 +1574,11 @@ static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
}
if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
return ret;
- ((AVFrame *)frame)->pts = pkt->pts;
-#if FF_API_PKT_PTS
-FF_DISABLE_DEPRECATION_WARNINGS
- ((AVFrame *)frame)->pkt_pts = pkt->pts;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- ((AVFrame *)frame)->pkt_dts = pkt->dts;
+ frame->pts = pkt->pts;
+ frame->pkt_dts = pkt->dts;
for (i = 0; i < 8; i++) {
if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
if (s->s.refs[i].f->buf[0] &&
(ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
return ret;
@@ -1533,7 +1618,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
// ref frame setup
for (i = 0; i < 8; i++) {
if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
if (s->s.h.refreshrefmask & (1 << i)) {
ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
} else if (s->s.refs[i].f->buf[0]) {
@@ -1610,6 +1695,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
s->td[i].eob = s->td[i].eob_base;
s->td[i].uveob[0] = s->td[i].uveob_base[0];
s->td[i].uveob[1] = s->td[i].uveob_base[1];
+ s->td[i].error_info = 0;
}
#if HAVE_THREADS
@@ -1632,10 +1718,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
if (tile_size > size)
return AVERROR_INVALIDDATA;
- ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
+ ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
if (ret < 0)
return ret;
- if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
+ if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
return AVERROR_INVALIDDATA;
data += tile_size;
size -= tile_size;
@@ -1666,11 +1752,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
} while (s->pass++ == 1);
ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
+ if (s->td->error_info < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
+ s->td->error_info = 0;
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
+ ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
+ if (ret < 0)
+ return ret;
+ }
+
finish:
// ref frame setup
for (i = 0; i < 8; i++) {
if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
if (s->next_refs[i].f->buf[0] &&
(ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
return ret;
@@ -1693,52 +1790,40 @@ static void vp9_decode_flush(AVCodecContext *avctx)
for (i = 0; i < 3; i++)
vp9_frame_unref(avctx, &s->s.frames[i]);
for (i = 0; i < 8; i++)
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
}
-static int init_frames(AVCodecContext *avctx)
+static av_cold int vp9_decode_init(AVCodecContext *avctx)
{
VP9Context *s = avctx->priv_data;
- int i;
+ int ret;
- for (i = 0; i < 3; i++) {
+ s->last_bpp = 0;
+ s->s.h.filter.sharpness = -1;
+
+#if HAVE_THREADS
+ if (avctx->active_thread_type & FF_THREAD_SLICE) {
+ ret = ff_pthread_init(s, vp9_context_offsets);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ for (int i = 0; i < 3; i++) {
s->s.frames[i].tf.f = av_frame_alloc();
- if (!s->s.frames[i].tf.f) {
- vp9_decode_free(avctx);
- av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
+ if (!s->s.frames[i].tf.f)
return AVERROR(ENOMEM);
- }
}
- for (i = 0; i < 8; i++) {
- s->s.refs[i].f = av_frame_alloc();
- s->next_refs[i].f = av_frame_alloc();
- if (!s->s.refs[i].f || !s->next_refs[i].f) {
- vp9_decode_free(avctx);
- av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
+ for (int i = 0; i < 8; i++) {
+ s->s.refs[i].f = av_frame_alloc();
+ s->next_refs[i].f = av_frame_alloc();
+ if (!s->s.refs[i].f || !s->next_refs[i].f)
return AVERROR(ENOMEM);
- }
}
-
return 0;
}
-static av_cold int vp9_decode_init(AVCodecContext *avctx)
-{
- VP9Context *s = avctx->priv_data;
-
- avctx->internal->allocate_progress = 1;
- s->last_bpp = 0;
- s->s.h.filter.sharpness = -1;
-
- return init_frames(avctx);
-}
-
#if HAVE_THREADS
-static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
-{
- return init_frames(avctx);
-}
-
static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
{
int i, ret;
@@ -1754,7 +1839,7 @@ static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
}
for (i = 0; i < 8; i++) {
if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(dst, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
if (ssrc->next_refs[i].f->buf[0]) {
if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
return ret;
@@ -1785,23 +1870,24 @@ static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
}
#endif
-AVCodec ff_vp9_decoder = {
- .name = "vp9",
- .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_VP9,
+const FFCodec ff_vp9_decoder = {
+ .p.name = "vp9",
+ CODEC_LONG_NAME("Google VP9"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_VP9,
.priv_data_size = sizeof(VP9Context),
.init = vp9_decode_init,
.close = vp9_decode_free,
- .decode = vp9_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
- .caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF,
+ FF_CODEC_DECODE_CB(vp9_decode_frame),
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
+ FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
+ FF_CODEC_CAP_ALLOCATE_PROGRESS,
.flush = vp9_decode_flush,
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
- .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
- .profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
+ UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
.bsfs = "vp9_superframe_split",
- .hw_configs = (const AVCodecHWConfigInternal*[]) {
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
#if CONFIG_VP9_DXVA2_HWACCEL
HWACCEL_DXVA2(vp9),
#endif
@@ -1817,6 +1903,12 @@ AVCodec ff_vp9_decoder = {
#if CONFIG_VP9_VAAPI_HWACCEL
HWACCEL_VAAPI(vp9),
#endif
+#if CONFIG_VP9_VDPAU_HWACCEL
+ HWACCEL_VDPAU(vp9),
+#endif
+#if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
+ HWACCEL_VIDEOTOOLBOX(vp9),
+#endif
NULL
},
};
diff --git a/media/ffvpx/libavcodec/vp9_mc_template.c b/media/ffvpx/libavcodec/vp9_mc_template.c
index 31e692f362..e654c0e5ed 100644
--- a/media/ffvpx/libavcodec/vp9_mc_template.c
+++ b/media/ffvpx/libavcodec/vp9_mc_template.c
@@ -22,9 +22,9 @@
*/
#define ROUNDED_DIV_MVx2(a, b) \
- (VP56mv) { .x = ROUNDED_DIV(a.x + b.x, 2), .y = ROUNDED_DIV(a.y + b.y, 2) }
+ (VP9mv) { .x = ROUNDED_DIV(a.x + b.x, 2), .y = ROUNDED_DIV(a.y + b.y, 2) }
#define ROUNDED_DIV_MVx4(a, b, c, d) \
- (VP56mv) { .x = ROUNDED_DIV(a.x + b.x + c.x + d.x, 4), \
+ (VP9mv) { .x = ROUNDED_DIV(a.x + b.x + c.x + d.x, 4), \
.y = ROUNDED_DIV(a.y + b.y + c.y + d.y, 4) }
static void FN(inter_pred)(VP9TileData *td)
@@ -33,11 +33,11 @@ static void FN(inter_pred)(VP9TileData *td)
{ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
{ 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4 },
};
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col;
- ThreadFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2;
- AVFrame *ref1 = tref1->f, *ref2;
+ const ThreadFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2;
+ const AVFrame *ref1 = tref1->f, *ref2;
int w1 = ref1->width, h1 = ref1->height, w2, h2;
ptrdiff_t ls_y = td->y_stride, ls_uv = td->uv_stride;
int bytesperpixel = BYTES_PER_PIXEL;
@@ -51,7 +51,7 @@ static void FN(inter_pred)(VP9TileData *td)
// y inter pred
if (b->bs > BS_8x8) {
- VP56mv uvmv;
+ VP9mv uvmv;
#if SCALED == 0
if (b->bs == BS_8x4) {
diff --git a/media/ffvpx/libavcodec/vp9_parser.c b/media/ffvpx/libavcodec/vp9_parser.c
index c957a75667..ffcb93505f 100644
--- a/media/ffvpx/libavcodec/vp9_parser.c
+++ b/media/ffvpx/libavcodec/vp9_parser.c
@@ -64,7 +64,7 @@ static int parse(AVCodecParserContext *ctx,
return size;
}
-AVCodecParser ff_vp9_parser = {
+const AVCodecParser ff_vp9_parser = {
.codec_ids = { AV_CODEC_ID_VP9 },
.parser_parse = parse,
};
diff --git a/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c b/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c
index 13e85c3ca4..cddd48119c 100644
--- a/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c
+++ b/media/ffvpx/libavcodec/vp9_superframe_split_bsf.c
@@ -24,8 +24,8 @@
#include <stddef.h>
-#include "avcodec.h"
#include "bsf.h"
+#include "bsf_internal.h"
#include "bytestream.h"
#include "get_bits.h"
@@ -51,6 +51,9 @@ static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out)
return ret;
in = s->buffer_pkt;
+ if (!in->size)
+ goto passthrough;
+
marker = in->data[in->size - 1];
if ((marker & 0xe0) == 0xc0) {
int length_size = 1 + ((marker >> 3) & 0x3);
@@ -70,7 +73,7 @@ static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out)
frame_size |= bytestream2_get_byte(&bc) << (j * 8);
total_size += frame_size;
- if (frame_size < 0 || total_size > in->size - idx_size) {
+ if (frame_size <= 0 || total_size > in->size - idx_size) {
av_log(ctx, AV_LOG_ERROR,
"Invalid frame size in a superframe: %d\n", frame_size);
ret = AVERROR(EINVAL);
@@ -121,6 +124,7 @@ static int vp9_superframe_split_filter(AVBSFContext *ctx, AVPacket *out)
out->pts = AV_NOPTS_VALUE;
} else {
+passthrough:
av_packet_move_ref(out, s->buffer_pkt);
}
@@ -155,12 +159,12 @@ static void vp9_superframe_split_uninit(AVBSFContext *ctx)
av_packet_free(&s->buffer_pkt);
}
-const AVBitStreamFilter ff_vp9_superframe_split_bsf = {
- .name = "vp9_superframe_split",
+const FFBitStreamFilter ff_vp9_superframe_split_bsf = {
+ .p.name = "vp9_superframe_split",
+ .p.codec_ids = (const enum AVCodecID []){ AV_CODEC_ID_VP9, AV_CODEC_ID_NONE },
.priv_data_size = sizeof(VP9SFSplitContext),
.init = vp9_superframe_split_init,
.flush = vp9_superframe_split_flush,
.close = vp9_superframe_split_uninit,
.filter = vp9_superframe_split_filter,
- .codec_ids = (const enum AVCodecID []){ AV_CODEC_ID_VP9, AV_CODEC_ID_NONE },
};
diff --git a/media/ffvpx/libavcodec/vp9block.c b/media/ffvpx/libavcodec/vp9block.c
index 1c3f7a7225..5743f048cc 100644
--- a/media/ffvpx/libavcodec/vp9block.c
+++ b/media/ffvpx/libavcodec/vp9block.c
@@ -23,13 +23,12 @@
#include "libavutil/avassert.h"
-#include "avcodec.h"
-#include "internal.h"
-#include "videodsp.h"
-#include "vp56.h"
+#include "threadframe.h"
+#include "vp89_rac.h"
#include "vp9.h"
#include "vp9data.h"
#include "vp9dec.h"
+#include "vpx_rac.h"
static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h,
ptrdiff_t stride, int v)
@@ -89,7 +88,7 @@ static void decode_mode(VP9TileData *td)
TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
};
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col, row7 = td->row7;
enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
@@ -102,10 +101,11 @@ static void decode_mode(VP9TileData *td)
b->seg_id = 0;
} else if (s->s.h.keyframe || s->s.h.intraonly) {
b->seg_id = !s->s.h.segmentation.update_map ? 0 :
- vp8_rac_get_tree(td->c, ff_vp9_segmentation_tree, s->s.h.segmentation.prob);
+ vp89_rac_get_tree(td->c, ff_vp9_segmentation_tree,
+ s->s.h.segmentation.prob);
} else if (!s->s.h.segmentation.update_map ||
(s->s.h.segmentation.temporal &&
- vp56_rac_get_prob_branchy(td->c,
+ vpx_rac_get_prob_branchy(td->c,
s->s.h.segmentation.pred_prob[s->above_segpred_ctx[col] +
td->left_segpred_ctx[row7]]))) {
if (!s->s.h.errorres && s->s.frames[REF_FRAME_SEGMAP].segmentation_map) {
@@ -128,8 +128,8 @@ static void decode_mode(VP9TileData *td)
memset(&s->above_segpred_ctx[col], 1, w4);
memset(&td->left_segpred_ctx[row7], 1, h4);
} else {
- b->seg_id = vp8_rac_get_tree(td->c, ff_vp9_segmentation_tree,
- s->s.h.segmentation.prob);
+ b->seg_id = vp89_rac_get_tree(td->c, ff_vp9_segmentation_tree,
+ s->s.h.segmentation.prob);
memset(&s->above_segpred_ctx[col], 0, w4);
memset(&td->left_segpred_ctx[row7], 0, h4);
@@ -144,7 +144,7 @@ static void decode_mode(VP9TileData *td)
s->s.h.segmentation.feat[b->seg_id].skip_enabled;
if (!b->skip) {
int c = td->left_skip_ctx[row7] + s->above_skip_ctx[col];
- b->skip = vp56_rac_get_prob(td->c, s->prob.p.skip[c]);
+ b->skip = vpx_rac_get_prob(td->c, s->prob.p.skip[c]);
td->counts.skip[c][b->skip]++;
}
@@ -162,7 +162,7 @@ static void decode_mode(VP9TileData *td)
c = have_a ? 2 * s->above_intra_ctx[col] :
have_l ? 2 * td->left_intra_ctx[row7] : 0;
}
- bit = vp56_rac_get_prob(td->c, s->prob.p.intra[c]);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.intra[c]);
td->counts.intra[c][bit]++;
b->intra = !bit;
}
@@ -187,22 +187,22 @@ static void decode_mode(VP9TileData *td)
}
switch (max_tx) {
case TX_32X32:
- b->tx = vp56_rac_get_prob(td->c, s->prob.p.tx32p[c][0]);
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][0]);
if (b->tx) {
- b->tx += vp56_rac_get_prob(td->c, s->prob.p.tx32p[c][1]);
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][1]);
if (b->tx == 2)
- b->tx += vp56_rac_get_prob(td->c, s->prob.p.tx32p[c][2]);
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx32p[c][2]);
}
td->counts.tx32p[c][b->tx]++;
break;
case TX_16X16:
- b->tx = vp56_rac_get_prob(td->c, s->prob.p.tx16p[c][0]);
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx16p[c][0]);
if (b->tx)
- b->tx += vp56_rac_get_prob(td->c, s->prob.p.tx16p[c][1]);
+ b->tx += vpx_rac_get_prob(td->c, s->prob.p.tx16p[c][1]);
td->counts.tx16p[c][b->tx]++;
break;
case TX_8X8:
- b->tx = vp56_rac_get_prob(td->c, s->prob.p.tx8p[c]);
+ b->tx = vpx_rac_get_prob(td->c, s->prob.p.tx8p[c]);
td->counts.tx8p[c][b->tx]++;
break;
case TX_4X4:
@@ -223,11 +223,11 @@ static void decode_mode(VP9TileData *td)
// necessary, they're just there to make the code slightly
// simpler for now
b->mode[0] =
- a[0] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
+ a[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
if (b->bs != BS_8x4) {
- b->mode[1] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
l[0] =
a[1] = b->mode[1];
} else {
@@ -237,11 +237,11 @@ static void decode_mode(VP9TileData *td)
}
if (b->bs != BS_4x8) {
b->mode[2] =
- a[0] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
+ a[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
if (b->bs != BS_8x4) {
- b->mode[3] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
l[1] =
a[1] = b->mode[3];
} else {
@@ -256,8 +256,8 @@ static void decode_mode(VP9TileData *td)
b->mode[3] = b->mode[1];
}
} else {
- b->mode[0] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_ymode_probs[*a][*l]);
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_ymode_probs[*a][*l]);
b->mode[3] =
b->mode[2] =
b->mode[1] = b->mode[0];
@@ -265,28 +265,28 @@ static void decode_mode(VP9TileData *td)
memset(a, b->mode[0], ff_vp9_bwh_tab[0][b->bs][0]);
memset(l, b->mode[0], ff_vp9_bwh_tab[0][b->bs][1]);
}
- b->uvmode = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- ff_vp9_default_kf_uvmode_probs[b->mode[3]]);
+ b->uvmode = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ ff_vp9_default_kf_uvmode_probs[b->mode[3]]);
} else if (b->intra) {
b->comp = 0;
if (b->bs > BS_8x8) {
- b->mode[0] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.y_mode[0]);
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
td->counts.y_mode[0][b->mode[0]]++;
if (b->bs != BS_8x4) {
- b->mode[1] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.y_mode[0]);
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
td->counts.y_mode[0][b->mode[1]]++;
} else {
b->mode[1] = b->mode[0];
}
if (b->bs != BS_4x8) {
- b->mode[2] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.y_mode[0]);
+ b->mode[2] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
td->counts.y_mode[0][b->mode[2]]++;
if (b->bs != BS_8x4) {
- b->mode[3] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.y_mode[0]);
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[0]);
td->counts.y_mode[0][b->mode[3]]++;
} else {
b->mode[3] = b->mode[2];
@@ -301,15 +301,15 @@ static void decode_mode(VP9TileData *td)
};
int sz = size_group[b->bs];
- b->mode[0] = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.y_mode[sz]);
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.y_mode[sz]);
b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
td->counts.y_mode[sz][b->mode[3]]++;
}
- b->uvmode = vp8_rac_get_tree(td->c, ff_vp9_intramode_tree,
- s->prob.p.uv_mode[b->mode[3]]);
+ b->uvmode = vp89_rac_get_tree(td->c, ff_vp9_intramode_tree,
+ s->prob.p.uv_mode[b->mode[3]]);
td->counts.uv_mode[b->mode[3]][b->uvmode]++;
} else {
static const uint8_t inter_mode_ctx_lut[14][14] = {
@@ -367,7 +367,7 @@ static void decode_mode(VP9TileData *td)
} else {
c = 1;
}
- b->comp = vp56_rac_get_prob(td->c, s->prob.p.comp[c]);
+ b->comp = vpx_rac_get_prob(td->c, s->prob.p.comp[c]);
td->counts.comp[c][b->comp]++;
}
@@ -439,7 +439,7 @@ static void decode_mode(VP9TileData *td)
} else {
c = 2;
}
- bit = vp56_rac_get_prob(td->c, s->prob.p.comp_ref[c]);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.comp_ref[c]);
b->ref[var_idx] = s->s.h.varcompref[bit];
td->counts.comp_ref[c][bit]++;
} else /* single reference */ {
@@ -479,7 +479,7 @@ static void decode_mode(VP9TileData *td)
} else {
c = 2;
}
- bit = vp56_rac_get_prob(td->c, s->prob.p.single_ref[c][0]);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.single_ref[c][0]);
td->counts.single_ref[c][0][bit]++;
if (!bit) {
b->ref[0] = 0;
@@ -566,7 +566,7 @@ static void decode_mode(VP9TileData *td)
} else {
c = 2;
}
- bit = vp56_rac_get_prob(td->c, s->prob.p.single_ref[c][1]);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.single_ref[c][1]);
td->counts.single_ref[c][1][bit]++;
b->ref[0] = 1 + bit;
}
@@ -589,8 +589,8 @@ static void decode_mode(VP9TileData *td)
int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
[td->left_mode_ctx[row7 + off[b->bs]]];
- b->mode[0] = vp8_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
- s->prob.p.mv_mode[c]);
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
@@ -614,8 +614,8 @@ static void decode_mode(VP9TileData *td)
c = 3;
}
- filter_id = vp8_rac_get_tree(td->c, ff_vp9_filter_tree,
- s->prob.p.filter[c]);
+ filter_id = vp89_rac_get_tree(td->c, ff_vp9_filter_tree,
+ s->prob.p.filter[c]);
td->counts.filter[c][filter_id]++;
b->filter = ff_vp9_filter_lut[filter_id];
} else {
@@ -625,14 +625,14 @@ static void decode_mode(VP9TileData *td)
if (b->bs > BS_8x8) {
int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][td->left_mode_ctx[row7]];
- b->mode[0] = vp8_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
- s->prob.p.mv_mode[c]);
+ b->mode[0] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
td->counts.mv_mode[c][b->mode[0] - 10]++;
ff_vp9_fill_mv(td, b->mv[0], b->mode[0], 0);
if (b->bs != BS_8x4) {
- b->mode[1] = vp8_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
- s->prob.p.mv_mode[c]);
+ b->mode[1] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
td->counts.mv_mode[c][b->mode[1] - 10]++;
ff_vp9_fill_mv(td, b->mv[1], b->mode[1], 1);
} else {
@@ -642,14 +642,14 @@ static void decode_mode(VP9TileData *td)
}
if (b->bs != BS_4x8) {
- b->mode[2] = vp8_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
- s->prob.p.mv_mode[c]);
+ b->mode[2] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
td->counts.mv_mode[c][b->mode[2] - 10]++;
ff_vp9_fill_mv(td, b->mv[2], b->mode[2], 2);
if (b->bs != BS_8x4) {
- b->mode[3] = vp8_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
- s->prob.p.mv_mode[c]);
+ b->mode[3] = vp89_rac_get_tree(td->c, ff_vp9_inter_mode_tree,
+ s->prob.p.mv_mode[c]);
td->counts.mv_mode[c][b->mode[3] - 10]++;
ff_vp9_fill_mv(td, b->mv[3], b->mode[3], 3);
} else {
@@ -802,11 +802,11 @@ static void decode_mode(VP9TileData *td)
// FIXME merge cnt/eob arguments?
static av_always_inline int
-decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
+decode_coeffs_b_generic(VPXRangeCoder *c, int16_t *coef, int n_coeffs,
int is_tx32x32, int is8bitsperpixel, int bpp, unsigned (*cnt)[6][3],
- unsigned (*eob)[6][2], uint8_t (*p)[6][11],
+ unsigned (*eob)[6][2], const uint8_t (*p)[6][11],
int nnz, const int16_t *scan, const int16_t (*nb)[2],
- const int16_t *band_counts, int16_t *qmul)
+ const int16_t *band_counts, const int16_t *qmul)
{
int i = 0, band = 0, band_left = band_counts[band];
const uint8_t *tp = p[0][nnz];
@@ -815,13 +815,13 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
do {
int val, rc;
- val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
+ val = vpx_rac_get_prob_branchy(c, tp[0]); // eob
eob[band][nnz][val]++;
if (!val)
break;
skip_eob:
- if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
+ if (!vpx_rac_get_prob_branchy(c, tp[1])) { // zero
cnt[band][nnz][0]++;
if (!--band_left)
band_left = band_counts[++band];
@@ -834,70 +834,70 @@ skip_eob:
}
rc = scan[i];
- if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
+ if (!vpx_rac_get_prob_branchy(c, tp[2])) { // one
cnt[band][nnz][1]++;
val = 1;
cache[rc] = 1;
} else {
cnt[band][nnz][2]++;
- if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
- if (!vp56_rac_get_prob_branchy(c, tp[4])) {
+ if (!vpx_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
+ if (!vpx_rac_get_prob_branchy(c, tp[4])) {
cache[rc] = val = 2;
} else {
- val = 3 + vp56_rac_get_prob(c, tp[5]);
+ val = 3 + vpx_rac_get_prob(c, tp[5]);
cache[rc] = 3;
}
- } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
+ } else if (!vpx_rac_get_prob_branchy(c, tp[6])) { // cat1/2
cache[rc] = 4;
- if (!vp56_rac_get_prob_branchy(c, tp[7])) {
- val = vp56_rac_get_prob(c, 159) + 5;
+ if (!vpx_rac_get_prob_branchy(c, tp[7])) {
+ val = vpx_rac_get_prob(c, 159) + 5;
} else {
- val = (vp56_rac_get_prob(c, 165) << 1) + 7;
- val += vp56_rac_get_prob(c, 145);
+ val = (vpx_rac_get_prob(c, 165) << 1) + 7;
+ val += vpx_rac_get_prob(c, 145);
}
} else { // cat 3-6
cache[rc] = 5;
- if (!vp56_rac_get_prob_branchy(c, tp[8])) {
- if (!vp56_rac_get_prob_branchy(c, tp[9])) {
- val = 11 + (vp56_rac_get_prob(c, 173) << 2);
- val += (vp56_rac_get_prob(c, 148) << 1);
- val += vp56_rac_get_prob(c, 140);
+ if (!vpx_rac_get_prob_branchy(c, tp[8])) {
+ if (!vpx_rac_get_prob_branchy(c, tp[9])) {
+ val = 11 + (vpx_rac_get_prob(c, 173) << 2);
+ val += (vpx_rac_get_prob(c, 148) << 1);
+ val += vpx_rac_get_prob(c, 140);
} else {
- val = 19 + (vp56_rac_get_prob(c, 176) << 3);
- val += (vp56_rac_get_prob(c, 155) << 2);
- val += (vp56_rac_get_prob(c, 140) << 1);
- val += vp56_rac_get_prob(c, 135);
+ val = 19 + (vpx_rac_get_prob(c, 176) << 3);
+ val += (vpx_rac_get_prob(c, 155) << 2);
+ val += (vpx_rac_get_prob(c, 140) << 1);
+ val += vpx_rac_get_prob(c, 135);
}
- } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
- val = (vp56_rac_get_prob(c, 180) << 4) + 35;
- val += (vp56_rac_get_prob(c, 157) << 3);
- val += (vp56_rac_get_prob(c, 141) << 2);
- val += (vp56_rac_get_prob(c, 134) << 1);
- val += vp56_rac_get_prob(c, 130);
+ } else if (!vpx_rac_get_prob_branchy(c, tp[10])) {
+ val = (vpx_rac_get_prob(c, 180) << 4) + 35;
+ val += (vpx_rac_get_prob(c, 157) << 3);
+ val += (vpx_rac_get_prob(c, 141) << 2);
+ val += (vpx_rac_get_prob(c, 134) << 1);
+ val += vpx_rac_get_prob(c, 130);
} else {
val = 67;
if (!is8bitsperpixel) {
if (bpp == 12) {
- val += vp56_rac_get_prob(c, 255) << 17;
- val += vp56_rac_get_prob(c, 255) << 16;
+ val += vpx_rac_get_prob(c, 255) << 17;
+ val += vpx_rac_get_prob(c, 255) << 16;
}
- val += (vp56_rac_get_prob(c, 255) << 15);
- val += (vp56_rac_get_prob(c, 255) << 14);
+ val += (vpx_rac_get_prob(c, 255) << 15);
+ val += (vpx_rac_get_prob(c, 255) << 14);
}
- val += (vp56_rac_get_prob(c, 254) << 13);
- val += (vp56_rac_get_prob(c, 254) << 12);
- val += (vp56_rac_get_prob(c, 254) << 11);
- val += (vp56_rac_get_prob(c, 252) << 10);
- val += (vp56_rac_get_prob(c, 249) << 9);
- val += (vp56_rac_get_prob(c, 243) << 8);
- val += (vp56_rac_get_prob(c, 230) << 7);
- val += (vp56_rac_get_prob(c, 196) << 6);
- val += (vp56_rac_get_prob(c, 177) << 5);
- val += (vp56_rac_get_prob(c, 153) << 4);
- val += (vp56_rac_get_prob(c, 140) << 3);
- val += (vp56_rac_get_prob(c, 133) << 2);
- val += (vp56_rac_get_prob(c, 130) << 1);
- val += vp56_rac_get_prob(c, 129);
+ val += (vpx_rac_get_prob(c, 254) << 13);
+ val += (vpx_rac_get_prob(c, 254) << 12);
+ val += (vpx_rac_get_prob(c, 254) << 11);
+ val += (vpx_rac_get_prob(c, 252) << 10);
+ val += (vpx_rac_get_prob(c, 249) << 9);
+ val += (vpx_rac_get_prob(c, 243) << 8);
+ val += (vpx_rac_get_prob(c, 230) << 7);
+ val += (vpx_rac_get_prob(c, 196) << 6);
+ val += (vpx_rac_get_prob(c, 177) << 5);
+ val += (vpx_rac_get_prob(c, 153) << 4);
+ val += (vpx_rac_get_prob(c, 140) << 3);
+ val += (vpx_rac_get_prob(c, 133) << 2);
+ val += (vpx_rac_get_prob(c, 130) << 1);
+ val += vpx_rac_get_prob(c, 129);
}
}
}
@@ -911,9 +911,9 @@ skip_eob:
if (!--band_left)
band_left = band_counts[++band];
if (is_tx32x32)
- STORE_COEF(coef, rc, (int)((vp8_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]) / 2);
+ STORE_COEF(coef, rc, (int)((vp89_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]) / 2);
else
- STORE_COEF(coef, rc, (vp8_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]);
+ STORE_COEF(coef, rc, (vp89_rac_get(c) ? -val : val) * (unsigned)qmul[!!i]);
nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
tp = p[band][nnz];
} while (++i < n_coeffs);
@@ -923,9 +923,9 @@ skip_eob:
static int decode_coeffs_b_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
- uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
const int16_t (*nb)[2], const int16_t *band_counts,
- int16_t *qmul)
+ const int16_t *qmul)
{
return decode_coeffs_b_generic(td->c, coef, n_coeffs, 0, 1, 8, cnt, eob, p,
nnz, scan, nb, band_counts, qmul);
@@ -933,9 +933,9 @@ static int decode_coeffs_b_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
static int decode_coeffs_b32_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
- uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
const int16_t (*nb)[2], const int16_t *band_counts,
- int16_t *qmul)
+ const int16_t *qmul)
{
return decode_coeffs_b_generic(td->c, coef, n_coeffs, 1, 1, 8, cnt, eob, p,
nnz, scan, nb, band_counts, qmul);
@@ -943,9 +943,9 @@ static int decode_coeffs_b32_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
static int decode_coeffs_b_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
- uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
const int16_t (*nb)[2], const int16_t *band_counts,
- int16_t *qmul)
+ const int16_t *qmul)
{
return decode_coeffs_b_generic(td->c, coef, n_coeffs, 0, 0, td->s->s.h.bpp, cnt, eob, p,
nnz, scan, nb, band_counts, qmul);
@@ -953,9 +953,9 @@ static int decode_coeffs_b_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
static int decode_coeffs_b32_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
unsigned (*cnt)[6][3], unsigned (*eob)[6][2],
- uint8_t (*p)[6][11], int nnz, const int16_t *scan,
+ const uint8_t (*p)[6][11], int nnz, const int16_t *scan,
const int16_t (*nb)[2], const int16_t *band_counts,
- int16_t *qmul)
+ const int16_t *qmul)
{
return decode_coeffs_b_generic(td->c, coef, n_coeffs, 1, 0, td->s->s.h.bpp, cnt, eob, p,
nnz, scan, nb, band_counts, qmul);
@@ -963,17 +963,17 @@ static int decode_coeffs_b32_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs,
static av_always_inline int decode_coeffs(VP9TileData *td, int is8bitsperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col;
- uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
+ const uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
unsigned (*c)[6][3] = td->counts.coef[b->tx][0 /* y */][!b->intra];
unsigned (*e)[6][2] = td->counts.eob[b->tx][0 /* y */][!b->intra];
int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1;
int end_x = FFMIN(2 * (s->cols - col), w4);
int end_y = FFMIN(2 * (s->rows - row), h4);
int n, pl, x, y, ret;
- int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
+ const int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
int tx = 4 * s->s.h.lossless + b->tx;
const int16_t * const *yscans = ff_vp9_scans[tx];
const int16_t (* const * ynbs)[2] = ff_vp9_scans_nb[tx];
@@ -1264,7 +1264,7 @@ void ff_vp9_decode_block(VP9TileData *td, int row, int col,
VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
enum BlockLevel bl, enum BlockPartition bp)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
enum BlockSize bs = bl * 3 + bp;
int bytesperpixel = s->bytesperpixel;
@@ -1290,6 +1290,14 @@ void ff_vp9_decode_block(VP9TileData *td, int row, int col,
b->uvtx = b->tx - ((s->ss_h && w4 * 2 == (1 << b->tx)) ||
(s->ss_v && h4 * 2 == (1 << b->tx)));
+ if (td->block_structure) {
+ td->block_structure[td->nb_block_structure].row = row;
+ td->block_structure[td->nb_block_structure].col = col;
+ td->block_structure[td->nb_block_structure].block_size_idx_x = av_log2(w4);
+ td->block_structure[td->nb_block_structure].block_size_idx_y = av_log2(h4);
+ td->nb_block_structure++;
+ }
+
if (!b->skip) {
int has_coeffs;
diff --git a/media/ffvpx/libavcodec/vp9dec.h b/media/ffvpx/libavcodec/vp9dec.h
index 66573edc79..de7aba0458 100644
--- a/media/ffvpx/libavcodec/vp9dec.h
+++ b/media/ffvpx/libavcodec/vp9dec.h
@@ -29,12 +29,18 @@
#include <stdatomic.h>
#include "libavutil/buffer.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/thread.h"
#include "libavutil/internal.h"
+#include "get_bits.h"
+#include "videodsp.h"
#include "vp9.h"
#include "vp9dsp.h"
#include "vp9shared.h"
+#include "vpx_rac.h"
+
+#define REF_INVALID_SCALE 0xFFFF
enum MVJoint {
MV_JOINT_ZERO,
@@ -79,7 +85,7 @@ typedef struct VP9Filter {
typedef struct VP9Block {
uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
enum FilterMode filter;
- VP56mv mv[4 /* b_idx */][2 /* ref */];
+ VP9mv mv[4 /* b_idx */][2 /* ref */];
enum BlockSize bs;
enum TxfmMode tx, uvtx;
enum BlockLevel bl;
@@ -95,13 +101,14 @@ typedef struct VP9Context {
VP9DSPContext dsp;
VideoDSPContext vdsp;
GetBitContext gb;
- VP56RangeCoder c;
+ VPXRangeCoder c;
int pass, active_tile_cols;
#if HAVE_THREADS
pthread_mutex_t progress_mutex;
pthread_cond_t progress_cond;
atomic_int *entries;
+ unsigned pthread_init_cnt;
#endif
uint8_t ss_h, ss_v;
@@ -142,7 +149,7 @@ typedef struct VP9Context {
uint8_t *above_comp_ctx; // 1bit
uint8_t *above_ref_ctx; // 2bit
uint8_t *above_filter_ctx;
- VP56mv (*above_mv_ctx)[2];
+ VP9mv (*above_mv_ctx)[2];
// whole-frame cache
uint8_t *intra_pred_data[3];
@@ -152,14 +159,16 @@ typedef struct VP9Context {
int block_alloc_using_2pass;
uint16_t mvscale[3][2];
uint8_t mvstep[3][2];
+
+ // frame specific buffer pools
+ AVBufferPool *frame_extradata_pool;
+ int frame_extradata_pool_size;
} VP9Context;
struct VP9TileData {
- //VP9Context should be const, but because of the threading API(generates
- //a lot of warnings) it's not.
- VP9Context *s;
- VP56RangeCoder *c_b;
- VP56RangeCoder *c;
+ const VP9Context *s;
+ VPXRangeCoder *c_b;
+ VPXRangeCoder *c;
int row, row7, col, col7;
uint8_t *dst[3];
ptrdiff_t y_stride, uv_stride;
@@ -201,7 +210,7 @@ struct VP9TileData {
// contextual (left) cache
DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
- DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
+ DECLARE_ALIGNED(16, VP9mv, left_mv_ctx)[16][2];
DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
@@ -217,9 +226,19 @@ struct VP9TileData {
struct { int x, y; } min_mv, max_mv;
int16_t *block_base, *block, *uvblock_base[2], *uvblock[2];
uint8_t *eob_base, *uveob_base[2], *eob, *uveob[2];
+
+ // error message
+ int error_info;
+ struct {
+ unsigned int row:13;
+ unsigned int col:13;
+ unsigned int block_size_idx_x:2;
+ unsigned int block_size_idx_y:2;
+ } *block_structure;
+ unsigned int nb_block_structure;
};
-void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb);
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb);
void ff_vp9_adapt_probs(VP9Context *s);
diff --git a/media/ffvpx/libavcodec/vp9dsp.c b/media/ffvpx/libavcodec/vp9dsp.c
index b8fa3be36d..8de952343a 100644
--- a/media/ffvpx/libavcodec/vp9dsp.c
+++ b/media/ffvpx/libavcodec/vp9dsp.c
@@ -21,8 +21,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+
+#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
-#include "libavutil/common.h"
+#include "libavutil/mem_internal.h"
+
#include "vp9dsp.h"
const DECLARE_ALIGNED(16, int16_t, ff_vp9_subpel_filters)[3][16][8] = {
@@ -92,13 +96,15 @@ av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
ff_vp9dsp_init_12(dsp);
}
- #if ARCH_AARCH64 == 1
- ff_vp9dsp_init_aarch64(dsp, bpp);
- #elif ARCH_ARM == 1
- ff_vp9dsp_init_arm(dsp, bpp);
- #elif ARCH_X86 == 1
- ff_vp9dsp_init_x86(dsp, bpp, bitexact);
- #elif ARCH_MIPS == 1
- ff_vp9dsp_init_mips(dsp, bpp);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_vp9dsp_init_aarch64(dsp, bpp);
+#elif ARCH_ARM == 1
+ ff_vp9dsp_init_arm(dsp, bpp);
+#elif ARCH_X86 == 1
+ ff_vp9dsp_init_x86(dsp, bpp, bitexact);
+#elif ARCH_MIPS == 1
+ ff_vp9dsp_init_mips(dsp, bpp);
+#elif ARCH_LOONGARCH == 1
+ ff_vp9dsp_init_loongarch(dsp, bpp);
+#endif
}
diff --git a/media/ffvpx/libavcodec/vp9dsp.h b/media/ffvpx/libavcodec/vp9dsp.h
index e2256316a8..be0ac0b181 100644
--- a/media/ffvpx/libavcodec/vp9dsp.h
+++ b/media/ffvpx/libavcodec/vp9dsp.h
@@ -28,6 +28,7 @@
#include <stdint.h>
#include "libavcodec/vp9.h"
+#include "libavutil/attributes_internal.h"
typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
@@ -120,7 +121,7 @@ typedef struct VP9DSPContext {
vp9_scaled_mc_func smc[5][N_FILTERS][2];
} VP9DSPContext;
-extern const int16_t ff_vp9_subpel_filters[3][16][8];
+extern const int16_t attribute_visibility_hidden ff_vp9_subpel_filters[3][16][8];
void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact);
@@ -132,5 +133,6 @@ void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
#endif /* AVCODEC_VP9DSP_H */
diff --git a/media/ffvpx/libavcodec/vp9mvs.c b/media/ffvpx/libavcodec/vp9mvs.c
index 88db1c341c..b93d878d6f 100644
--- a/media/ffvpx/libavcodec/vp9mvs.c
+++ b/media/ffvpx/libavcodec/vp9mvs.c
@@ -21,13 +21,13 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "internal.h"
-#include "vp56.h"
-#include "vp9.h"
+#include "threadframe.h"
+#include "vp89_rac.h"
#include "vp9data.h"
#include "vp9dec.h"
+#include "vpx_rac.h"
-static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
+static av_always_inline void clamp_mv(VP9mv *dst, const VP9mv *src,
VP9TileData *td)
{
dst->x = av_clip(src->x, td->min_mv.x, td->max_mv.x);
@@ -35,7 +35,7 @@ static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
}
static void find_ref_mvs(VP9TileData *td,
- VP56mv *pmv, int ref, int z, int idx, int sb)
+ VP9mv *pmv, int ref, int z, int idx, int sb)
{
static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
[BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
@@ -65,7 +65,7 @@ static void find_ref_mvs(VP9TileData *td,
[BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
};
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col, row7 = td->row7;
const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
@@ -99,7 +99,7 @@ static void find_ref_mvs(VP9TileData *td,
#define RETURN_MV(mv) \
do { \
if (sb > 0) { \
- VP56mv tmp; \
+ VP9mv tmp; \
uint32_t m; \
av_assert2(idx == 1); \
av_assert2(mem != INVALID_MV); \
@@ -185,7 +185,7 @@ static void find_ref_mvs(VP9TileData *td,
#define RETURN_SCALE_MV(mv, scale) \
do { \
if (scale) { \
- VP56mv mv_temp = { -mv.x, -mv.y }; \
+ VP9mv mv_temp = { -mv.x, -mv.y }; \
RETURN_MV(mv_temp); \
} else { \
RETURN_MV(mv); \
@@ -235,10 +235,10 @@ static void find_ref_mvs(VP9TileData *td,
static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
{
- VP9Context *s = td->s;
- int bit, sign = vp56_rac_get_prob(td->c, s->prob.p.mv_comp[idx].sign);
- int n, c = vp8_rac_get_tree(td->c, ff_vp9_mv_class_tree,
- s->prob.p.mv_comp[idx].classes);
+ const VP9Context *s = td->s;
+ int bit, sign = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].sign);
+ int n, c = vp89_rac_get_tree(td->c, ff_vp9_mv_class_tree,
+ s->prob.p.mv_comp[idx].classes);
td->counts.mv_comp[idx].sign[sign]++;
td->counts.mv_comp[idx].classes[c]++;
@@ -246,17 +246,17 @@ static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
int m;
for (n = 0, m = 0; m < c; m++) {
- bit = vp56_rac_get_prob(td->c, s->prob.p.mv_comp[idx].bits[m]);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].bits[m]);
n |= bit << m;
td->counts.mv_comp[idx].bits[m][bit]++;
}
n <<= 3;
- bit = vp8_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
- s->prob.p.mv_comp[idx].fp);
+ bit = vp89_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
+ s->prob.p.mv_comp[idx].fp);
n |= bit << 1;
td->counts.mv_comp[idx].fp[bit]++;
if (hp) {
- bit = vp56_rac_get_prob(td->c, s->prob.p.mv_comp[idx].hp);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].hp);
td->counts.mv_comp[idx].hp[bit]++;
n |= bit;
} else {
@@ -267,14 +267,14 @@ static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
}
n += 8 << c;
} else {
- n = vp56_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0);
+ n = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0);
td->counts.mv_comp[idx].class0[n]++;
- bit = vp8_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
- s->prob.p.mv_comp[idx].class0_fp[n]);
+ bit = vp89_rac_get_tree(td->c, ff_vp9_mv_fp_tree,
+ s->prob.p.mv_comp[idx].class0_fp[n]);
td->counts.mv_comp[idx].class0_fp[n][bit]++;
n = (n << 3) | (bit << 1);
if (hp) {
- bit = vp56_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0_hp);
+ bit = vpx_rac_get_prob(td->c, s->prob.p.mv_comp[idx].class0_hp);
td->counts.mv_comp[idx].class0_hp[bit]++;
n |= bit;
} else {
@@ -288,9 +288,9 @@ static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
return sign ? -(n + 1) : (n + 1);
}
-void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
if (mode == ZEROMV) {
@@ -319,8 +319,8 @@ void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
}
}
if (mode == NEWMV) {
- enum MVJoint j = vp8_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
- s->prob.p.mv_joint);
+ enum MVJoint j = vp89_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
+ s->prob.p.mv_joint);
td->counts.mv_joint[j]++;
if (j >= MV_JOINT_V)
@@ -350,8 +350,8 @@ void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
}
}
if (mode == NEWMV) {
- enum MVJoint j = vp8_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
- s->prob.p.mv_joint);
+ enum MVJoint j = vp89_rac_get_tree(td->c, ff_vp9_mv_joint_tree,
+ s->prob.p.mv_joint);
td->counts.mv_joint[j]++;
if (j >= MV_JOINT_V)
diff --git a/media/ffvpx/libavcodec/vp9prob.c b/media/ffvpx/libavcodec/vp9prob.c
index fb295b482d..69a5180770 100644
--- a/media/ffvpx/libavcodec/vp9prob.c
+++ b/media/ffvpx/libavcodec/vp9prob.c
@@ -21,9 +21,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "vp56.h"
#include "vp9.h"
-#include "vp9data.h"
#include "vp9dec.h"
static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
diff --git a/media/ffvpx/libavcodec/vp9recon.c b/media/ffvpx/libavcodec/vp9recon.c
index 49bb04e1f4..073c04b47d 100644
--- a/media/ffvpx/libavcodec/vp9recon.c
+++ b/media/ffvpx/libavcodec/vp9recon.c
@@ -22,9 +22,9 @@
*/
#include "libavutil/avassert.h"
+#include "libavutil/mem_internal.h"
-#include "avcodec.h"
-#include "internal.h"
+#include "threadframe.h"
#include "videodsp.h"
#include "vp9data.h"
#include "vp9dec.h"
@@ -36,7 +36,7 @@ static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t
int row, int y, enum TxfmMode tx,
int p, int ss_h, int ss_v, int bytesperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
int have_top = row > 0 || y > 0;
int have_left = col > td->tile_col_start || x > 0;
int have_right = x < w - 1;
@@ -218,7 +218,7 @@ static av_always_inline int check_intra_mode(VP9TileData *td, int mode, uint8_t
static av_always_inline void intra_recon(VP9TileData *td, ptrdiff_t y_off,
ptrdiff_t uv_off, int bytesperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col;
int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
@@ -295,14 +295,14 @@ void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off
intra_recon(td, y_off, uv_off, 2);
}
-static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func (*mc)[2],
+static av_always_inline void mc_luma_unscaled(VP9TileData *td, const vp9_mc_func (*mc)[2],
uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
- ThreadFrame *ref_frame,
- ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
int bw, int bh, int w, int h, int bytesperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
int mx = mv->x, my = mv->y, th;
y += my >> 3;
@@ -331,16 +331,16 @@ static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func (*mc)
mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
}
-static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func (*mc)[2],
+static av_always_inline void mc_chroma_unscaled(VP9TileData *td, const vp9_mc_func (*mc)[2],
uint8_t *dst_u, uint8_t *dst_v,
ptrdiff_t dst_stride,
const uint8_t *ref_u, ptrdiff_t src_stride_u,
const uint8_t *ref_v, ptrdiff_t src_stride_v,
- ThreadFrame *ref_frame,
- ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
int bw, int bh, int w, int h, int bytesperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
y += my >> 4;
@@ -404,16 +404,16 @@ static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func (*m
#undef SCALED
static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
- vp9_mc_func (*mc)[2],
+ const vp9_mc_func (*mc)[2],
uint8_t *dst, ptrdiff_t dst_stride,
const uint8_t *ref, ptrdiff_t ref_stride,
- ThreadFrame *ref_frame,
- ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
int px, int py, int pw, int ph,
int bw, int bh, int w, int h, int bytesperpixel,
const uint16_t *scale, const uint8_t *step)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
mc_luma_unscaled(td, mc, dst, dst_stride, ref, ref_stride, ref_frame,
@@ -423,7 +423,7 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
int mx, my;
int refbw_m1, refbh_m1;
int th;
- VP56mv mv;
+ VP9mv mv;
mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
@@ -462,18 +462,18 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
}
static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_func smc,
- vp9_mc_func (*mc)[2],
+ const vp9_mc_func (*mc)[2],
uint8_t *dst_u, uint8_t *dst_v,
ptrdiff_t dst_stride,
const uint8_t *ref_u, ptrdiff_t src_stride_u,
const uint8_t *ref_v, ptrdiff_t src_stride_v,
- ThreadFrame *ref_frame,
- ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
+ const ThreadFrame *ref_frame,
+ ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
int px, int py, int pw, int ph,
int bw, int bh, int w, int h, int bytesperpixel,
const uint16_t *scale, const uint8_t *step)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
mc_chroma_unscaled(td, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
@@ -483,7 +483,7 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
int mx, my;
int refbw_m1, refbh_m1;
int th;
- VP56mv mv;
+ VP9mv mv;
if (s->ss_h) {
// BUG https://code.google.com/p/webm/issues/detail?id=820
@@ -568,10 +568,20 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
static av_always_inline void inter_recon(VP9TileData *td, int bytesperpixel)
{
- VP9Context *s = td->s;
+ const VP9Context *s = td->s;
VP9Block *b = td->b;
int row = td->row, col = td->col;
+ if (s->mvscale[b->ref[0]][0] == REF_INVALID_SCALE ||
+ (b->comp && s->mvscale[b->ref[1]][0] == REF_INVALID_SCALE)) {
+ if (!s->td->error_info) {
+ s->td->error_info = AVERROR_INVALIDDATA;
+ av_log(NULL, AV_LOG_ERROR, "Bitstream not supported, "
+ "reference frame has invalid dimensions\n");
+ }
+ return;
+ }
+
if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
if (bytesperpixel == 1) {
inter_pred_scaled_8bpp(td);
diff --git a/media/ffvpx/libavcodec/vp9shared.h b/media/ffvpx/libavcodec/vp9shared.h
index 54726df742..543a496df8 100644
--- a/media/ffvpx/libavcodec/vp9shared.h
+++ b/media/ffvpx/libavcodec/vp9shared.h
@@ -27,9 +27,10 @@
#include <stddef.h>
#include <stdint.h>
+#include "libavutil/mem_internal.h"
+
#include "vp9.h"
-#include "thread.h"
-#include "vp56.h"
+#include "threadframe.h"
enum BlockPartition {
PARTITION_NONE, // [ ] <-.
@@ -51,8 +52,13 @@ enum CompPredMode {
PRED_SWITCHABLE,
};
+typedef struct VP9mv {
+ DECLARE_ALIGNED(4, int16_t, x);
+ int16_t y;
+} VP9mv;
+
typedef struct VP9mvrefPair {
- VP56mv mv[2];
+ VP9mv mv[2];
int8_t ref[2];
} VP9mvrefPair;
diff --git a/media/ffvpx/libavcodec/vp56rac.c b/media/ffvpx/libavcodec/vpx_rac.c
index 64fb6a99b4..cf02e9a19c 100644
--- a/media/ffvpx/libavcodec/vp56rac.c
+++ b/media/ffvpx/libavcodec/vpx_rac.c
@@ -19,10 +19,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/common.h"
-#include "vp56.h"
+#include <stdint.h>
+#include "libavutil/error.h"
+#include "bytestream.h"
+#include "vpx_rac.h"
-const uint8_t ff_vp56_norm_shift[256]= {
+const uint8_t ff_vpx_norm_shift[256]= {
8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
@@ -37,7 +39,7 @@ const uint8_t ff_vp56_norm_shift[256]= {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
};
-int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
+int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
{
c->high = 255;
c->bits = -16;
diff --git a/media/ffvpx/libavcodec/vpx_rac.h b/media/ffvpx/libavcodec/vpx_rac.h
new file mode 100644
index 0000000000..b158cc0754
--- /dev/null
+++ b/media/ffvpx/libavcodec/vpx_rac.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Common VP5-VP9 range decoder stuff
+ */
+
+#ifndef AVCODEC_VPX_RAC_H
+#define AVCODEC_VPX_RAC_H
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "bytestream.h"
+
+typedef struct VPXRangeCoder {
+ int high;
+ int bits; /* stored negated (i.e. negative "bits" is a positive number of
+ bits left) in order to eliminate a negate in cache refilling */
+ const uint8_t *buffer;
+ const uint8_t *end;
+ unsigned int code_word;
+ int end_reached;
+} VPXRangeCoder;
+
+extern const uint8_t ff_vpx_norm_shift[256];
+int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size);
+
+/**
+ * returns 1 if the end of the stream has been reached, 0 otherwise.
+ */
+static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
+{
+ if (c->end <= c->buffer && c->bits >= 0)
+ c->end_reached ++;
+ return c->end_reached > 10;
+}
+
+static av_always_inline unsigned int vpx_rac_renorm(VPXRangeCoder *c)
+{
+ int shift = ff_vpx_norm_shift[c->high];
+ int bits = c->bits;
+ unsigned int code_word = c->code_word;
+
+ c->high <<= shift;
+ code_word <<= shift;
+ bits += shift;
+ if(bits >= 0 && c->buffer < c->end) {
+ code_word |= bytestream_get_be16(&c->buffer) << bits;
+ bits -= 16;
+ }
+ c->bits = bits;
+ return code_word;
+}
+
+#if ARCH_ARM
+#include "arm/vpx_arith.h"
+#elif ARCH_X86
+#include "x86/vpx_arith.h"
+#endif
+
+#ifndef vpx_rac_get_prob
+#define vpx_rac_get_prob vpx_rac_get_prob
+static av_always_inline int vpx_rac_get_prob(VPXRangeCoder *c, uint8_t prob)
+{
+ unsigned int code_word = vpx_rac_renorm(c);
+ unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned int low_shift = low << 16;
+ int bit = code_word >= low_shift;
+
+ c->high = bit ? c->high - low : low;
+ c->code_word = bit ? code_word - low_shift : code_word;
+
+ return bit;
+}
+#endif
+
+#ifndef vpx_rac_get_prob_branchy
+// branchy variant, to be used where there's a branch based on the bit decoded
+static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
+{
+ unsigned long code_word = vpx_rac_renorm(c);
+ unsigned low = 1 + (((c->high - 1) * prob) >> 8);
+ unsigned low_shift = low << 16;
+
+ if (code_word >= low_shift) {
+ c->high -= low;
+ c->code_word = code_word - low_shift;
+ return 1;
+ }
+
+ c->high = low;
+ c->code_word = code_word;
+ return 0;
+}
+#endif
+
+static av_always_inline int vpx_rac_get(VPXRangeCoder *c)
+{
+ unsigned int code_word = vpx_rac_renorm(c);
+ /* equiprobable */
+ int low = (c->high + 1) >> 1;
+ unsigned int low_shift = low << 16;
+ int bit = code_word >= low_shift;
+ if (bit) {
+ c->high -= low;
+ code_word -= low_shift;
+ } else {
+ c->high = low;
+ }
+
+ c->code_word = code_word;
+ return bit;
+}
+
+#endif /* AVCODEC_VPX_RAC_H */
diff --git a/media/ffvpx/libavcodec/x86/constants.c b/media/ffvpx/libavcodec/x86/constants.c
index 4bfb78cc36..bc7f2b17b8 100644
--- a/media/ffvpx/libavcodec/x86/constants.c
+++ b/media/ffvpx/libavcodec/x86/constants.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/mem.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/x86/asm.h" // for xmm_reg
#include "constants.h"
@@ -34,7 +34,6 @@ DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x000
DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
-DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
DECLARE_ASM_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_20) = { 0x0014001400140014ULL, 0x0014001400140014ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
diff --git a/media/ffvpx/libavcodec/x86/fft.asm b/media/ffvpx/libavcodec/x86/fft.asm
index a671e8f48e..34c3fc9a0f 100644
--- a/media/ffvpx/libavcodec/x86/fft.asm
+++ b/media/ffvpx/libavcodec/x86/fft.asm
@@ -1,5 +1,5 @@
;******************************************************************************
-;* FFT transform with SSE/3DNow optimizations
+;* FFT transform with SSE/AVX optimizations
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2011 Vitor Sessak
;*
@@ -92,29 +92,6 @@ cextern cos_ %+ i
SECTION .text
-%macro T2_3DNOW 4 ; z0, z1, mem0, mem1
- mova %1, %3
- mova %2, %1
- pfadd %1, %4
- pfsub %2, %4
-%endmacro
-
-%macro T4_3DNOW 6 ; z0, z1, z2, z3, tmp0, tmp1
- mova %5, %3
- pfsub %3, %4
- pfadd %5, %4 ; {t6,t5}
- pxor %3, [ps_m1p1] ; {t8,t7}
- mova %6, %1
- movd [r0+12], %3
- punpckhdq %3, [r0+8]
- pfadd %1, %5 ; {r0,i0}
- pfsub %6, %5 ; {r2,i2}
- mova %4, %2
- pfadd %2, %3 ; {r1,i1}
- pfsub %4, %3 ; {r3,i3}
- SWAP %3, %6
-%endmacro
-
; in: %1 = {r0,i0,r2,i2,r4,i4,r6,i6}
; %2 = {r1,i1,r3,i3,r5,i5,r7,i7}
; %3, %4, %5 tmp
@@ -199,7 +176,7 @@ SECTION .text
vextractf128 %4 %+ H(%5), %3, 0
vextractf128 %4(%5 + 1), %2, 1
vextractf128 %4 %+ H(%5 + 1), %3, 1
-%elif cpuflag(sse) || cpuflag(3dnow)
+%elif cpuflag(sse)
mova %3, %2
unpcklps %2, %1
unpckhps %3, %1
@@ -310,12 +287,6 @@ IF%1 mova Z(1), m5
%endif
%endmacro
-%macro PUNPCK 3
- mova %3, %1
- punpckldq %1, %2
- punpckhdq %3, %2
-%endmacro
-
%define Z(x) [r0+mmsize*x]
%define Z2(x) [r0+mmsize*x]
%define ZH(x) [r0+mmsize*x+mmsize/2]
@@ -462,68 +433,6 @@ fft16_sse:
ret
-%macro FFT48_3DNOW 0
-align 16
-fft4 %+ SUFFIX:
- T2_3DNOW m0, m1, Z(0), Z(1)
- mova m2, Z(2)
- mova m3, Z(3)
- T4_3DNOW m0, m1, m2, m3, m4, m5
- PUNPCK m0, m1, m4
- PUNPCK m2, m3, m5
- mova Z(0), m0
- mova Z(1), m4
- mova Z(2), m2
- mova Z(3), m5
- ret
-
-align 16
-fft8 %+ SUFFIX:
- T2_3DNOW m0, m1, Z(0), Z(1)
- mova m2, Z(2)
- mova m3, Z(3)
- T4_3DNOW m0, m1, m2, m3, m4, m5
- mova Z(0), m0
- mova Z(2), m2
- T2_3DNOW m4, m5, Z(4), Z(5)
- T2_3DNOW m6, m7, Z2(6), Z2(7)
- PSWAPD m0, m5
- PSWAPD m2, m7
- pxor m0, [ps_m1p1]
- pxor m2, [ps_m1p1]
- pfsub m5, m0
- pfadd m7, m2
- pfmul m5, [ps_root2]
- pfmul m7, [ps_root2]
- T4_3DNOW m1, m3, m5, m7, m0, m2
- mova Z(5), m5
- mova Z2(7), m7
- mova m0, Z(0)
- mova m2, Z(2)
- T4_3DNOW m0, m2, m4, m6, m5, m7
- PUNPCK m0, m1, m5
- PUNPCK m2, m3, m7
- mova Z(0), m0
- mova Z(1), m5
- mova Z(2), m2
- mova Z(3), m7
- PUNPCK m4, Z(5), m5
- PUNPCK m6, Z2(7), m7
- mova Z(4), m4
- mova Z(5), m5
- mova Z2(6), m6
- mova Z2(7), m7
- ret
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX 3dnowext
-FFT48_3DNOW
-
-INIT_MMX 3dnow
-FFT48_3DNOW
-%endif
-
%define Z(x) [zcq + o1q*(x&6) + mmsize*(x&1)]
%define Z2(x) [zcq + o3q + mmsize*(x&1)]
%define ZH(x) [zcq + o1q*(x&6) + mmsize*(x&1) + mmsize/2]
@@ -566,7 +475,7 @@ cglobal fft_calc, 2,5,8
mov r0, r1
mov r1, r3
FFT_DISPATCH _interleave %+ SUFFIX, r1
- REP_RET
+ RET
%endif
@@ -575,7 +484,7 @@ INIT_XMM sse
DECL_PASS pass_sse, PASS_BIG 1
DECL_PASS pass_interleave_sse, PASS_BIG 0
-%macro FFT_CALC_FUNC 0
+INIT_XMM sse
cglobal fft_calc, 2,5,8
mov r3d, [r0 + FFTContext.nbits]
PUSH r1
@@ -592,36 +501,16 @@ cglobal fft_calc, 2,5,8
shl r2, cl
sub r4, r2
.loop:
-%if mmsize == 8
- PSWAPD m0, [r4 + r2 + 4]
- mova [r4 + r2 + 4], m0
-%else
movaps xmm0, [r4 + r2]
movaps xmm1, xmm0
unpcklps xmm0, [r4 + r2 + 16]
unpckhps xmm1, [r4 + r2 + 16]
movaps [r4 + r2], xmm0
movaps [r4 + r2 + 16], xmm1
-%endif
add r2, mmsize*2
jl .loop
.end:
-%if cpuflag(3dnow)
- femms
RET
-%else
- REP_RET
-%endif
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX 3dnow
-FFT_CALC_FUNC
-INIT_MMX 3dnowext
-FFT_CALC_FUNC
-%endif
-INIT_XMM sse
-FFT_CALC_FUNC
cglobal fft_permute, 2,7,1
mov r4, [r0 + FFTContext.revtab]
@@ -654,9 +543,9 @@ cglobal fft_permute, 2,7,1
movaps [r1 + r2 + 16], xmm1
add r2, 32
jl .loopcopy
- REP_RET
+ RET
-%macro IMDCT_CALC_FUNC 0
+INIT_XMM sse
cglobal imdct_calc, 3,5,3
mov r3d, [r0 + FFTContext.mdctsize]
mov r4, [r0 + FFTContext.imdcthalf]
@@ -684,52 +573,17 @@ cglobal imdct_calc, 3,5,3
neg r2
mova m2, [ps_neg]
.loop:
-%if mmsize == 8
- PSWAPD m0, [r1 + r3]
- PSWAPD m1, [r0 + r2]
- pxor m0, m2
-%else
mova m0, [r1 + r3]
mova m1, [r0 + r2]
shufps m0, m0, 0x1b
shufps m1, m1, 0x1b
xorps m0, m2
-%endif
mova [r0 + r3], m1
mova [r1 + r2], m0
sub r3, mmsize
add r2, mmsize
jl .loop
-%if cpuflag(3dnow)
- femms
RET
-%else
- REP_RET
-%endif
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX 3dnow
-IMDCT_CALC_FUNC
-INIT_MMX 3dnowext
-IMDCT_CALC_FUNC
-%endif
-
-INIT_XMM sse
-IMDCT_CALC_FUNC
-
-%if ARCH_X86_32
-INIT_MMX 3dnow
-%define mulps pfmul
-%define addps pfadd
-%define subps pfsub
-%define unpcklps punpckldq
-%define unpckhps punpckhdq
-DECL_PASS pass_3dnow, PASS_SMALL 1, [wq], [wq+o1q]
-DECL_PASS pass_interleave_3dnow, PASS_BIG 0
-%define pass_3dnowext pass_3dnow
-%define pass_interleave_3dnowext pass_interleave_3dnow
-%endif
%ifdef PIC
%define SECTION_REL - $$
@@ -785,14 +639,6 @@ DECL_FFT 6, _interleave
INIT_XMM sse
DECL_FFT 5
DECL_FFT 5, _interleave
-%if ARCH_X86_32
-INIT_MMX 3dnow
-DECL_FFT 4
-DECL_FFT 4, _interleave
-INIT_MMX 3dnowext
-DECL_FFT 4
-DECL_FFT 4, _interleave
-%endif
INIT_XMM sse
%undef mulps
@@ -802,37 +648,6 @@ INIT_XMM sse
%undef unpckhps
%macro PREROTATER 5 ;-2*k, 2*k, input+n4, tcos+n8, tsin+n8
-%if mmsize == 8 ; j*2+2-n4, n4-2-j*2, input+n4, tcos+n8, tsin+n8
- PSWAPD m0, [%3+%2*4]
- movq m2, [%3+%1*4-8]
- movq m3, m0
- punpckldq m0, m2
- punpckhdq m2, m3
- movd m1, [%4+%1*2-4] ; tcos[j]
- movd m3, [%4+%2*2] ; tcos[n4-j-1]
- punpckldq m1, [%5+%1*2-4] ; tsin[j]
- punpckldq m3, [%5+%2*2] ; tsin[n4-j-1]
-
- mova m4, m0
- PSWAPD m5, m1
- pfmul m0, m1
- pfmul m4, m5
- mova m6, m2
- PSWAPD m5, m3
- pfmul m2, m3
- pfmul m6, m5
-%if cpuflag(3dnowext)
- pfpnacc m0, m4
- pfpnacc m2, m6
-%else
- SBUTTERFLY dq, 0, 4, 1
- SBUTTERFLY dq, 2, 6, 3
- pxor m4, m7
- pxor m6, m7
- pfadd m0, m4
- pfadd m2, m6
-%endif
-%else
movaps xmm0, [%3+%2*4]
movaps xmm1, [%3+%1*4-0x10]
movaps xmm2, xmm0
@@ -853,29 +668,15 @@ INIT_XMM sse
movaps xmm0, xmm1
unpcklps xmm1, xmm2
unpckhps xmm0, xmm2
-%endif
%endmacro
%macro CMUL 6 ;j, xmm0, xmm1, 3, 4, 5
-%if cpuflag(sse)
mulps m6, %3, [%5+%1]
mulps m7, %2, [%5+%1]
mulps %2, %2, [%6+%1]
mulps %3, %3, [%6+%1]
subps %2, %2, m6
addps %3, %3, m7
-%elif cpuflag(3dnow)
- mova m6, [%1+%2*2]
- mova %3, [%1+%2*2+8]
- mova %4, m6
- mova m7, %3
- pfmul m6, [%5+%2]
- pfmul %3, [%6+%2]
- pfmul %4, [%6+%2]
- pfmul m7, [%5+%2]
- pfsub %3, m6
- pfadd %4, m7
-%endif
%endmacro
%macro POSROTATESHUF 5 ;j, k, z+n8, tcos+n8, tsin+n8
@@ -909,7 +710,7 @@ INIT_XMM sse
sub %2, 0x20
add %1, 0x20
jl .post
-%elif cpuflag(sse)
+%else
movaps xmm1, [%3+%1*2]
movaps xmm0, [%3+%1*2+0x10]
CMUL %1, xmm0, xmm1, %3, %4, %5
@@ -931,24 +732,6 @@ INIT_XMM sse
sub %2, 0x10
add %1, 0x10
jl .post
-%elif cpuflag(3dnow)
- CMUL %3, %1, m0, m1, %4, %5
- CMUL %3, %2, m2, m3, %4, %5
- movd [%3+%1*2+ 0], m0
- movd [%3+%2*2+12], m1
- movd [%3+%2*2+ 0], m2
- movd [%3+%1*2+12], m3
- psrlq m0, 32
- psrlq m1, 32
- psrlq m2, 32
- psrlq m3, 32
- movd [%3+%1*2+ 8], m0
- movd [%3+%2*2+ 4], m1
- movd [%3+%2*2+ 8], m2
- movd [%3+%1*2+ 4], m3
- sub %2, 8
- add %1, 8
- jl .post
%endif
%endmacro
@@ -981,39 +764,21 @@ cglobal imdct_half, 3,12,8; FFTContext *s, FFTSample *output, const FFTSample *i
push rrevtab
%endif
-%if mmsize == 8
- sub r3, 2
-%else
sub r3, 4
-%endif
-%if ARCH_X86_64 || mmsize == 8
+%if ARCH_X86_64
xor r4, r4
sub r4, r3
%endif
-%if notcpuflag(3dnowext) && mmsize == 8
- movd m7, [ps_neg]
-%endif
.pre:
%if ARCH_X86_64 == 0
;unspill
-%if mmsize != 8
xor r4, r4
sub r4, r3
-%endif
mov rtcos, [esp+8]
mov rtsin, [esp+4]
%endif
PREROTATER r4, r3, r2, rtcos, rtsin
-%if mmsize == 8
- mov r6, [esp] ; rrevtab = ptr+n8
- movzx r5, word [rrevtab+r4-2] ; rrevtab[j]
- movzx r6, word [rrevtab+r3] ; rrevtab[n4-j-1]
- mova [r1+r5*8], m0
- mova [r1+r6*8], m2
- add r4, 2
- sub r3, 2
-%else
%if ARCH_X86_64
movzx r5, word [rrevtab+r4-4]
movzx r6, word [rrevtab+r4-2]
@@ -1036,7 +801,6 @@ cglobal imdct_half, 3,12,8; FFTContext *s, FFTSample *output, const FFTSample *i
movhps [r1+r4*8], xmm1
%endif
sub r3, 4
-%endif
jns .pre
mov r5, r0
@@ -1062,22 +826,11 @@ cglobal imdct_half, 3,12,8; FFTContext *s, FFTSample *output, const FFTSample *i
%if ARCH_X86_64 == 0
add esp, 12
%endif
-%if mmsize == 8
- femms
-%endif
RET
%endmacro
DECL_IMDCT
-%if ARCH_X86_32
-INIT_MMX 3dnow
-DECL_IMDCT
-
-INIT_MMX 3dnowext
-DECL_IMDCT
-%endif
-
INIT_YMM avx
%if HAVE_AVX_EXTERNAL
diff --git a/media/ffvpx/libavcodec/x86/fft.h b/media/ffvpx/libavcodec/x86/fft.h
index 398091eb1f..37418ec1f4 100644
--- a/media/ffvpx/libavcodec/x86/fft.h
+++ b/media/ffvpx/libavcodec/x86/fft.h
@@ -24,13 +24,7 @@
void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
void ff_fft_calc_avx(FFTContext *s, FFTComplex *z);
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
-void ff_fft_calc_3dnow(FFTContext *s, FFTComplex *z);
-void ff_fft_calc_3dnowext(FFTContext *s, FFTComplex *z);
-void ff_imdct_calc_3dnow(FFTContext *s, FFTSample *output, const FFTSample *input);
-void ff_imdct_half_3dnow(FFTContext *s, FFTSample *output, const FFTSample *input);
-void ff_imdct_calc_3dnowext(FFTContext *s, FFTSample *output, const FFTSample *input);
-void ff_imdct_half_3dnowext(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_avx(FFTContext *s, FFTSample *output, const FFTSample *input);
diff --git a/media/ffvpx/libavcodec/x86/fft_init.c b/media/ffvpx/libavcodec/x86/fft_init.c
index 928f1dcda7..df79d57dc7 100644
--- a/media/ffvpx/libavcodec/x86/fft_init.c
+++ b/media/ffvpx/libavcodec/x86/fft_init.c
@@ -31,20 +31,6 @@ av_cold void ff_fft_init_x86(FFTContext *s)
if (s->nbits > 16)
return;
-#if ARCH_X86_32
- if (EXTERNAL_AMD3DNOW(cpu_flags)) {
- s->imdct_calc = ff_imdct_calc_3dnow;
- s->imdct_half = ff_imdct_half_3dnow;
- s->fft_calc = ff_fft_calc_3dnow;
- }
-
- if (EXTERNAL_AMD3DNOWEXT(cpu_flags)) {
- s->imdct_calc = ff_imdct_calc_3dnowext;
- s->imdct_half = ff_imdct_half_3dnowext;
- s->fft_calc = ff_fft_calc_3dnowext;
- }
-#endif /* ARCH_X86_32 */
-
if (EXTERNAL_SSE(cpu_flags)) {
s->imdct_calc = ff_imdct_calc_sse;
s->imdct_half = ff_imdct_half_sse;
diff --git a/media/ffvpx/libavcodec/x86/flacdsp.asm b/media/ffvpx/libavcodec/x86/flacdsp.asm
index 7138611526..44416e4dfd 100644
--- a/media/ffvpx/libavcodec/x86/flacdsp.asm
+++ b/media/ffvpx/libavcodec/x86/flacdsp.asm
@@ -23,6 +23,10 @@
%include "libavutil/x86/x86util.asm"
+SECTION_RODATA
+
+vector: db 0,1,4,5,8,9,12,13,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,0,1,4,5,8,9,12,13,
+
SECTION .text
%macro PMACSDQL 5
@@ -75,7 +79,7 @@ ALIGN 16
movd [decodedq+4], m1
jg .loop_sample
.ret:
- REP_RET
+ RET
%endmacro
%if HAVE_XOP_EXTERNAL
@@ -89,6 +93,9 @@ LPC_32 sse4
;----------------------------------------------------------------------------------
%macro FLAC_DECORRELATE_16 3-4
cglobal flac_decorrelate_%1_16, 2, 4, 4, out, in0, in1, len
+%ifidn %1, indep2
+ VBROADCASTI128 m2, [vector]
+%endif
%if ARCH_X86_32
mov lend, lenm
%endif
@@ -112,15 +119,21 @@ align 16
%endif
%ifnidn %1, indep2
p%4d m2, m0, m1
+ packssdw m%2, m%2
+ packssdw m%3, m%3
+ punpcklwd m%2, m%3
+ psllw m%2, m3
+%else
+ pslld m%2, m3
+ pslld m%3, m3
+ pshufb m%2, m%2, m2
+ pshufb m%3, m%3, m2
+ punpcklwd m%2, m%3
%endif
- packssdw m%2, m%2
- packssdw m%3, m%3
- punpcklwd m%2, m%3
- psllw m%2, m3
mova [outq + lenq], m%2
add lenq, 16
jl .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -164,7 +177,7 @@ align 16
add outq, mmsize*2
sub lend, mmsize/4
jg .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -289,10 +302,10 @@ align 16
add outq, mmsize*REPCOUNT
sub lend, mmsize/4
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_XMM sse2
+INIT_XMM ssse3
FLAC_DECORRELATE_16 indep2, 0, 1 ; Reuse stereo 16bits macro
FLAC_DECORRELATE_INDEP 32, 2, 3, d
FLAC_DECORRELATE_INDEP 16, 4, 3, w
diff --git a/media/ffvpx/libavcodec/x86/flacdsp_init.c b/media/ffvpx/libavcodec/x86/flacdsp_init.c
index 1971f81b8d..87daed7005 100644
--- a/media/ffvpx/libavcodec/x86/flacdsp_init.c
+++ b/media/ffvpx/libavcodec/x86/flacdsp_init.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/attributes.h"
#include "libavcodec/flacdsp.h"
#include "libavutil/x86/cpu.h"
#include "config.h"
@@ -27,15 +28,15 @@ void ff_flac_lpc_32_sse4(int32_t *samples, const int coeffs[32], int order,
void ff_flac_lpc_32_xop(int32_t *samples, const int coeffs[32], int order,
int qlevel, int len);
-void ff_flac_enc_lpc_16_sse4(int32_t *, const int32_t *, int, int, const int32_t *,int);
-
#define DECORRELATE_FUNCS(fmt, opt) \
void ff_flac_decorrelate_ls_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_rs_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_ms_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
- int len, int shift); \
+ int len, int shift)
+
+#define DECORRELATE_IFUNCS(fmt, opt) \
void ff_flac_decorrelate_indep2_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_indep4_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
@@ -49,39 +50,46 @@ DECORRELATE_FUNCS(16, sse2);
DECORRELATE_FUNCS(16, avx);
DECORRELATE_FUNCS(32, sse2);
DECORRELATE_FUNCS(32, avx);
+DECORRELATE_IFUNCS(16, ssse3);
+DECORRELATE_IFUNCS(16, avx);
+DECORRELATE_IFUNCS(32, ssse3);
+DECORRELATE_IFUNCS(32, avx);
-av_cold void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels,
- int bps)
+av_cold void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels)
{
#if HAVE_X86ASM
int cpu_flags = av_get_cpu_flags();
-#if CONFIG_FLAC_DECODER
if (EXTERNAL_SSE2(cpu_flags)) {
if (fmt == AV_SAMPLE_FMT_S16) {
+ c->decorrelate[1] = ff_flac_decorrelate_ls_16_sse2;
+ c->decorrelate[2] = ff_flac_decorrelate_rs_16_sse2;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_16_sse2;
+ } else if (fmt == AV_SAMPLE_FMT_S32) {
+ c->decorrelate[1] = ff_flac_decorrelate_ls_32_sse2;
+ c->decorrelate[2] = ff_flac_decorrelate_rs_32_sse2;
+ c->decorrelate[3] = ff_flac_decorrelate_ms_32_sse2;
+ }
+ }
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ if (fmt == AV_SAMPLE_FMT_S16) {
if (channels == 2)
- c->decorrelate[0] = ff_flac_decorrelate_indep2_16_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep2_16_ssse3;
else if (channels == 4)
- c->decorrelate[0] = ff_flac_decorrelate_indep4_16_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep4_16_ssse3;
else if (channels == 6)
- c->decorrelate[0] = ff_flac_decorrelate_indep6_16_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep6_16_ssse3;
else if (ARCH_X86_64 && channels == 8)
- c->decorrelate[0] = ff_flac_decorrelate_indep8_16_sse2;
- c->decorrelate[1] = ff_flac_decorrelate_ls_16_sse2;
- c->decorrelate[2] = ff_flac_decorrelate_rs_16_sse2;
- c->decorrelate[3] = ff_flac_decorrelate_ms_16_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_16_ssse3;
} else if (fmt == AV_SAMPLE_FMT_S32) {
if (channels == 2)
- c->decorrelate[0] = ff_flac_decorrelate_indep2_32_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep2_32_ssse3;
else if (channels == 4)
- c->decorrelate[0] = ff_flac_decorrelate_indep4_32_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep4_32_ssse3;
else if (channels == 6)
- c->decorrelate[0] = ff_flac_decorrelate_indep6_32_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep6_32_ssse3;
else if (ARCH_X86_64 && channels == 8)
- c->decorrelate[0] = ff_flac_decorrelate_indep8_32_sse2;
- c->decorrelate[1] = ff_flac_decorrelate_ls_32_sse2;
- c->decorrelate[2] = ff_flac_decorrelate_rs_32_sse2;
- c->decorrelate[3] = ff_flac_decorrelate_ms_32_sse2;
+ c->decorrelate[0] = ff_flac_decorrelate_indep8_32_ssse3;
}
}
if (EXTERNAL_SSE4(cpu_flags)) {
@@ -103,13 +111,5 @@ av_cold void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int
if (EXTERNAL_XOP(cpu_flags)) {
c->lpc32 = ff_flac_lpc_32_xop;
}
-#endif
-
-#if CONFIG_FLAC_ENCODER
- if (EXTERNAL_SSE4(cpu_flags)) {
- if (CONFIG_GPL)
- c->lpc16_encode = ff_flac_enc_lpc_16_sse4;
- }
-#endif
#endif /* HAVE_X86ASM */
}
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred.asm b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
index f3aa3172f0..8a38ba2bb5 100644
--- a/media/ffvpx/libavcodec/x86/h264_intrapred.asm
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred.asm
@@ -42,32 +42,12 @@ SECTION .text
cextern pb_1
cextern pb_3
cextern pw_4
-cextern pw_5
cextern pw_8
-cextern pw_16
-cextern pw_17
-cextern pw_32
;-----------------------------------------------------------------------------
; void ff_pred16x16_vertical_8(uint8_t *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-INIT_MMX mmx
-cglobal pred16x16_vertical_8, 2,3
- sub r0, r1
- mov r2, 8
- movq mm0, [r0+0]
- movq mm1, [r0+8]
-.loop:
- movq [r0+r1*1+0], mm0
- movq [r0+r1*1+8], mm1
- movq [r0+r1*2+0], mm0
- movq [r0+r1*2+8], mm1
- lea r0, [r0+r1*2]
- dec r2
- jg .loop
- REP_RET
-
INIT_XMM sse
cglobal pred16x16_vertical_8, 2,3
sub r0, r1
@@ -82,7 +62,7 @@ cglobal pred16x16_vertical_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_horizontal_8(uint8_t *src, ptrdiff_t stride)
@@ -115,11 +95,9 @@ cglobal pred16x16_horizontal_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_MMX mmx
-PRED16x16_H
INIT_MMX mmxext
PRED16x16_H
INIT_XMM ssse3
@@ -158,14 +136,6 @@ cglobal pred16x16_dc_8, 2,7
%endif
SPLATB_REG m0, r2, m1
-%if mmsize==8
- mov r3d, 8
-.loop:
- mova [r4+r1*0+0], m0
- mova [r4+r1*0+8], m0
- mova [r4+r1*1+0], m0
- mova [r4+r1*1+8], m0
-%else
mov r3d, 4
.loop:
mova [r4+r1*0], m0
@@ -173,15 +143,12 @@ cglobal pred16x16_dc_8, 2,7
lea r4, [r4+r1*2]
mova [r4+r1*0], m0
mova [r4+r1*1], m0
-%endif
lea r4, [r4+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_MMX mmxext
-PRED16x16_DC
INIT_XMM sse2
PRED16x16_DC
INIT_XMM ssse3
@@ -191,47 +158,6 @@ PRED16x16_DC
; void ff_pred16x16_tm_vp8_8(uint8_t *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_TM 0
-cglobal pred16x16_tm_vp8_8, 2,5
- sub r0, r1
- pxor mm7, mm7
- movq mm0, [r0+0]
- movq mm2, [r0+8]
- movq mm1, mm0
- movq mm3, mm2
- punpcklbw mm0, mm7
- punpckhbw mm1, mm7
- punpcklbw mm2, mm7
- punpckhbw mm3, mm7
- movzx r3d, byte [r0-1]
- mov r4d, 16
-.loop:
- movzx r2d, byte [r0+r1-1]
- sub r2d, r3d
- movd mm4, r2d
- SPLATW mm4, mm4, 0
- movq mm5, mm4
- movq mm6, mm4
- movq mm7, mm4
- paddw mm4, mm0
- paddw mm5, mm1
- paddw mm6, mm2
- paddw mm7, mm3
- packuswb mm4, mm5
- packuswb mm6, mm7
- movq [r0+r1+0], mm4
- movq [r0+r1+8], mm6
- add r0, r1
- dec r4d
- jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmx
-PRED16x16_TM
-INIT_MMX mmxext
-PRED16x16_TM
-
INIT_XMM sse2
cglobal pred16x16_tm_vp8_8, 2,6,6
sub r0, r1
@@ -266,7 +192,7 @@ cglobal pred16x16_tm_vp8_8, 2,6,6
lea r0, [r0+r1*2]
dec r5d
jg .loop
- REP_RET
+ RET
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
@@ -302,7 +228,7 @@ cglobal pred16x16_tm_vp8_8, 2, 4, 5, dst, stride, stride3, iteration
lea dstq, [dstq+strideq*4]
dec iterationd
jg .loop
- REP_RET
+ RET
%endif
;-----------------------------------------------------------------------------
@@ -315,22 +241,6 @@ cglobal pred16x16_plane_%1_8, 2,9,7
neg r1 ; -stride
movh m0, [r0+r1 -1]
-%if mmsize == 8
- pxor m4, m4
- movh m1, [r0+r1 +3 ]
- movh m2, [r0+r1 +8 ]
- movh m3, [r0+r1 +12]
- punpcklbw m0, m4
- punpcklbw m1, m4
- punpcklbw m2, m4
- punpcklbw m3, m4
- pmullw m0, [pw_m8tom1 ]
- pmullw m1, [pw_m8tom1+8]
- pmullw m2, [pw_1to8 ]
- pmullw m3, [pw_1to8 +8]
- paddw m0, m2
- paddw m1, m3
-%else ; mmsize == 16
%if cpuflag(ssse3)
movhps m0, [r0+r1 +8]
pmaddubsw m0, [plane_shuf] ; H coefficients
@@ -344,21 +254,10 @@ cglobal pred16x16_plane_%1_8, 2,9,7
paddw m0, m1
%endif
movhlps m1, m0
-%endif
paddw m0, m1
-%if cpuflag(mmxext)
PSHUFLW m1, m0, 0xE
-%elif cpuflag(mmx)
- mova m1, m0
- psrlq m1, 32
-%endif
paddw m0, m1
-%if cpuflag(mmxext)
PSHUFLW m1, m0, 0x1
-%elif cpuflag(mmx)
- mova m1, m0
- psrlq m1, 16
-%endif
paddw m0, m1 ; sum of H coefficients
lea r4, [r0+r2*8-1]
@@ -500,24 +399,10 @@ cglobal pred16x16_plane_%1_8, 2,9,7
SWAP 0, 1
%endif
mova m2, m0
-%if mmsize == 8
- mova m5, m0
-%endif
pmullw m0, [pw_0to7] ; 0*H, 1*H, ..., 7*H (words)
-%if mmsize == 16
psllw m2, 3
-%else
- psllw m5, 3
- psllw m2, 2
- mova m6, m5
- paddw m6, m2
-%endif
paddw m0, m3 ; a + {0,1,2,3,4,5,6,7}*H
paddw m2, m0 ; a + {8,9,10,11,12,13,14,15}*H
-%if mmsize == 8
- paddw m5, m0 ; a + {8,9,10,11}*H
- paddw m6, m0 ; a + {12,13,14,15}*H
-%endif
mov r4, 8
.loop:
@@ -527,20 +412,8 @@ cglobal pred16x16_plane_%1_8, 2,9,7
psraw m4, 5
packuswb m3, m4
mova [r0], m3
-%if mmsize == 8
- mova m3, m5 ; b[8..11]
- mova m4, m6 ; b[12..15]
- psraw m3, 5
- psraw m4, 5
- packuswb m3, m4
- mova [r0+8], m3
-%endif
paddw m0, m1
paddw m2, m1
-%if mmsize == 8
- paddw m5, m1
- paddw m6, m1
-%endif
mova m3, m0 ; b[0..7]
mova m4, m2 ; b[8..15]
@@ -548,35 +421,15 @@ cglobal pred16x16_plane_%1_8, 2,9,7
psraw m4, 5
packuswb m3, m4
mova [r0+r2], m3
-%if mmsize == 8
- mova m3, m5 ; b[8..11]
- mova m4, m6 ; b[12..15]
- psraw m3, 5
- psraw m4, 5
- packuswb m3, m4
- mova [r0+r2+8], m3
-%endif
paddw m0, m1
paddw m2, m1
-%if mmsize == 8
- paddw m5, m1
- paddw m6, m1
-%endif
lea r0, [r0+r2*2]
dec r4
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_MMX mmx
-H264_PRED16x16_PLANE h264
-H264_PRED16x16_PLANE rv40
-H264_PRED16x16_PLANE svq3
-INIT_MMX mmxext
-H264_PRED16x16_PLANE h264
-H264_PRED16x16_PLANE rv40
-H264_PRED16x16_PLANE svq3
INIT_XMM sse2
H264_PRED16x16_PLANE h264
H264_PRED16x16_PLANE rv40
@@ -596,14 +449,6 @@ cglobal pred8x8_plane_8, 2,9,7
neg r1 ; -stride
movd m0, [r0+r1 -1]
-%if mmsize == 8
- pxor m2, m2
- movh m1, [r0+r1 +4 ]
- punpcklbw m0, m2
- punpcklbw m1, m2
- pmullw m0, [pw_m4to4]
- pmullw m1, [pw_m4to4+8]
-%else ; mmsize == 16
%if cpuflag(ssse3)
movhps m0, [r0+r1 +4] ; this reads 4 bytes more than necessary
pmaddubsw m0, [plane8_shuf] ; H coefficients
@@ -615,25 +460,14 @@ cglobal pred8x8_plane_8, 2,9,7
pmullw m0, [pw_m4to4]
%endif
movhlps m1, m0
-%endif
paddw m0, m1
%if notcpuflag(ssse3)
-%if cpuflag(mmxext)
PSHUFLW m1, m0, 0xE
-%elif cpuflag(mmx)
- mova m1, m0
- psrlq m1, 32
-%endif
paddw m0, m1
%endif ; !ssse3
-%if cpuflag(mmxext)
PSHUFLW m1, m0, 0x1
-%elif cpuflag(mmx)
- mova m1, m0
- psrlq m1, 16
-%endif
paddw m0, m1 ; sum of H coefficients
lea r4, [r0+r2*4-1]
@@ -703,20 +537,12 @@ cglobal pred8x8_plane_8, 2,9,7
SPLATW m0, m0, 0 ; H
SPLATW m1, m1, 0 ; V
SPLATW m3, m3, 0 ; a
-%if mmsize == 8
- mova m2, m0
-%endif
pmullw m0, [pw_0to7] ; 0*H, 1*H, ..., 7*H (words)
paddw m0, m3 ; a + {0,1,2,3,4,5,6,7}*H
-%if mmsize == 8
- psllw m2, 2
- paddw m2, m0 ; a + {4,5,6,7}*H
-%endif
mov r4, 4
ALIGN 16
.loop:
-%if mmsize == 16
mova m3, m0 ; b[0..7]
paddw m0, m1
psraw m3, 5
@@ -726,35 +552,13 @@ ALIGN 16
packuswb m3, m4
movh [r0], m3
movhps [r0+r2], m3
-%else ; mmsize == 8
- mova m3, m0 ; b[0..3]
- mova m4, m2 ; b[4..7]
- paddw m0, m1
- paddw m2, m1
- psraw m3, 5
- psraw m4, 5
- mova m5, m0 ; V+b[0..3]
- mova m6, m2 ; V+b[4..7]
- paddw m0, m1
- paddw m2, m1
- psraw m5, 5
- psraw m6, 5
- packuswb m3, m4
- packuswb m5, m6
- mova [r0], m3
- mova [r0+r2], m5
-%endif
lea r0, [r0+r2*2]
dec r4
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_MMX mmx
-H264_PRED8x8_PLANE
-INIT_MMX mmxext
-H264_PRED8x8_PLANE
INIT_XMM sse2
H264_PRED8x8_PLANE
INIT_XMM ssse3
@@ -795,11 +599,9 @@ cglobal pred8x8_horizontal_8, 2,3
lea r0, [r0+r1*2]
dec r2
jg .loop
- REP_RET
+ RET
%endmacro
-INIT_MMX mmx
-PRED8x8_H
INIT_MMX mmxext
PRED8x8_H
INIT_MMX ssse3
@@ -935,52 +737,12 @@ cglobal pred8x8_dc_rv40_8, 2,7
lea r4, [r4+r1*2]
dec r3d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred8x8_tm_vp8_8(uint8_t *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED8x8_TM 0
-cglobal pred8x8_tm_vp8_8, 2,6
- sub r0, r1
- pxor mm7, mm7
- movq mm0, [r0]
- movq mm1, mm0
- punpcklbw mm0, mm7
- punpckhbw mm1, mm7
- movzx r4d, byte [r0-1]
- mov r5d, 4
-.loop:
- movzx r2d, byte [r0+r1*1-1]
- movzx r3d, byte [r0+r1*2-1]
- sub r2d, r4d
- sub r3d, r4d
- movd mm2, r2d
- movd mm4, r3d
- SPLATW mm2, mm2, 0
- SPLATW mm4, mm4, 0
- movq mm3, mm2
- movq mm5, mm4
- paddw mm2, mm0
- paddw mm3, mm1
- paddw mm4, mm0
- paddw mm5, mm1
- packuswb mm2, mm3
- packuswb mm4, mm5
- movq [r0+r1*1], mm2
- movq [r0+r1*2], mm4
- lea r0, [r0+r1*2]
- dec r5d
- jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmx
-PRED8x8_TM
-INIT_MMX mmxext
-PRED8x8_TM
-
INIT_XMM sse2
cglobal pred8x8_tm_vp8_8, 2,6,4
sub r0, r1
@@ -1008,7 +770,7 @@ cglobal pred8x8_tm_vp8_8, 2,6,4
lea r0, [r0+r1*2]
dec r5d
jg .loop
- REP_RET
+ RET
INIT_XMM ssse3
cglobal pred8x8_tm_vp8_8, 2,3,6
@@ -1035,7 +797,7 @@ cglobal pred8x8_tm_vp8_8, 2,3,6
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
; dest, left, right, src, tmp
; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
@@ -1337,114 +1099,6 @@ PRED8x8L_VERTICAL
; int has_topright, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-INIT_MMX mmxext
-cglobal pred8x8l_down_left_8, 4,5
- sub r0, r3
- movq mm0, [r0-8]
- movq mm3, [r0]
- movq mm1, [r0+8]
- movq mm2, mm3
- movq mm4, mm3
- PALIGNR mm2, mm0, 7, mm0
- PALIGNR mm1, mm4, 1, mm4
- test r1d, r1d
- jz .fix_lt_2
- test r2d, r2d
- jz .fix_tr_1
- jmp .do_top
-.fix_lt_2:
- movq mm5, mm3
- pxor mm5, mm2
- psllq mm5, 56
- psrlq mm5, 56
- pxor mm2, mm5
- test r2d, r2d
- jnz .do_top
-.fix_tr_1:
- movq mm5, mm3
- pxor mm5, mm1
- psrlq mm5, 56
- psllq mm5, 56
- pxor mm1, mm5
- jmp .do_top
-.fix_tr_2:
- punpckhbw mm3, mm3
- pshufw mm1, mm3, 0xFF
- jmp .do_topright
-.do_top:
- PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
- movq mm7, mm4
- test r2d, r2d
- jz .fix_tr_2
- movq mm0, [r0+8]
- movq mm5, mm0
- movq mm2, mm0
- movq mm4, mm0
- psrlq mm5, 56
- PALIGNR mm2, mm3, 7, mm3
- PALIGNR mm5, mm4, 1, mm4
- PRED4x4_LOWPASS mm1, mm2, mm5, mm0, mm4
-.do_topright:
- lea r1, [r0+r3*2]
- movq mm6, mm1
- psrlq mm1, 56
- movq mm4, mm1
- lea r2, [r1+r3*2]
- movq mm2, mm6
- PALIGNR mm2, mm7, 1, mm0
- movq mm3, mm6
- PALIGNR mm3, mm7, 7, mm0
- PALIGNR mm4, mm6, 1, mm0
- movq mm5, mm7
- movq mm1, mm7
- movq mm7, mm6
- lea r4, [r2+r3*2]
- psllq mm1, 8
- PRED4x4_LOWPASS mm0, mm1, mm2, mm5, mm6
- PRED4x4_LOWPASS mm1, mm3, mm4, mm7, mm6
- movq [r4+r3*2], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r4+r3*1], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r2+r3*2], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r2+r3*1], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r1+r3*2], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r1+r3*1], mm1
- movq mm2, mm0
- psllq mm1, 8
- psrlq mm2, 56
- psllq mm0, 8
- por mm1, mm2
- movq [r0+r3*2], mm1
- psllq mm1, 8
- psrlq mm0, 56
- por mm1, mm0
- movq [r0+r3*1], mm1
- RET
-
%macro PRED8x8L_DOWN_LEFT 0
cglobal pred8x8l_down_left_8, 4,4
sub r0, r3
@@ -1534,142 +1188,10 @@ INIT_MMX ssse3
PRED8x8L_DOWN_LEFT
;-----------------------------------------------------------------------------
-; void ff_pred8x8l_down_right_8_mmxext(uint8_t *src, int has_topleft,
-; int has_topright, ptrdiff_t stride)
+; void ff_pred8x8l_down_right_8(uint8_t *src, int has_topleft,
+; int has_topright, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-INIT_MMX mmxext
-cglobal pred8x8l_down_right_8, 4,5
- sub r0, r3
- lea r4, [r0+r3*2]
- movq mm0, [r0+r3*1-8]
- punpckhbw mm0, [r0+r3*0-8]
- movq mm1, [r4+r3*1-8]
- punpckhbw mm1, [r0+r3*2-8]
- mov r4, r0
- punpckhwd mm1, mm0
- lea r0, [r0+r3*4]
- movq mm2, [r0+r3*1-8]
- punpckhbw mm2, [r0+r3*0-8]
- lea r0, [r0+r3*2]
- movq mm3, [r0+r3*1-8]
- punpckhbw mm3, [r0+r3*0-8]
- punpckhwd mm3, mm2
- punpckhdq mm3, mm1
- lea r0, [r0+r3*2]
- movq mm0, [r0+r3*0-8]
- movq mm1, [r4]
- mov r0, r4
- movq mm4, mm3
- movq mm2, mm3
- PALIGNR mm4, mm0, 7, mm0
- PALIGNR mm1, mm2, 1, mm2
- test r1d, r1d ; top_left
- jz .fix_lt_1
-.do_left:
- movq mm0, mm4
- PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
- movq mm4, mm0
- movq mm7, mm2
- movq mm6, mm2
- PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
- psllq mm1, 56
- PALIGNR mm7, mm1, 7, mm3
- movq mm0, [r0-8]
- movq mm3, [r0]
- movq mm1, [r0+8]
- movq mm2, mm3
- movq mm4, mm3
- PALIGNR mm2, mm0, 7, mm0
- PALIGNR mm1, mm4, 1, mm4
- test r1d, r1d ; top_left
- jz .fix_lt_2
- test r2d, r2d ; top_right
- jz .fix_tr_1
-.do_top:
- PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
- movq mm5, mm4
- jmp .body
-.fix_lt_1:
- movq mm5, mm3
- pxor mm5, mm4
- psrlq mm5, 56
- psllq mm5, 48
- pxor mm1, mm5
- jmp .do_left
-.fix_lt_2:
- movq mm5, mm3
- pxor mm5, mm2
- psllq mm5, 56
- psrlq mm5, 56
- pxor mm2, mm5
- test r2d, r2d ; top_right
- jnz .do_top
-.fix_tr_1:
- movq mm5, mm3
- pxor mm5, mm1
- psrlq mm5, 56
- psllq mm5, 56
- pxor mm1, mm5
- jmp .do_top
-.body:
- lea r1, [r0+r3*2]
- movq mm1, mm7
- movq mm7, mm5
- movq mm5, mm6
- movq mm2, mm7
- lea r2, [r1+r3*2]
- PALIGNR mm2, mm6, 1, mm0
- movq mm3, mm7
- PALIGNR mm3, mm6, 7, mm0
- movq mm4, mm7
- lea r4, [r2+r3*2]
- psrlq mm4, 8
- PRED4x4_LOWPASS mm0, mm1, mm2, mm5, mm6
- PRED4x4_LOWPASS mm1, mm3, mm4, mm7, mm6
- movq [r4+r3*2], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r4+r3*1], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r2+r3*2], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r2+r3*1], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r1+r3*2], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r1+r3*1], mm0
- movq mm2, mm1
- psrlq mm0, 8
- psllq mm2, 56
- psrlq mm1, 8
- por mm0, mm2
- movq [r0+r3*2], mm0
- psrlq mm0, 8
- psllq mm1, 56
- por mm0, mm1
- movq [r0+r3*1], mm0
- RET
-
%macro PRED8x8L_DOWN_RIGHT 0
cglobal pred8x8l_down_right_8, 4,5
sub r0, r3
@@ -1790,113 +1312,6 @@ PRED8x8L_DOWN_RIGHT
; int has_topright, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-INIT_MMX mmxext
-cglobal pred8x8l_vertical_right_8, 4,5
- sub r0, r3
- lea r4, [r0+r3*2]
- movq mm0, [r0+r3*1-8]
- punpckhbw mm0, [r0+r3*0-8]
- movq mm1, [r4+r3*1-8]
- punpckhbw mm1, [r0+r3*2-8]
- mov r4, r0
- punpckhwd mm1, mm0
- lea r0, [r0+r3*4]
- movq mm2, [r0+r3*1-8]
- punpckhbw mm2, [r0+r3*0-8]
- lea r0, [r0+r3*2]
- movq mm3, [r0+r3*1-8]
- punpckhbw mm3, [r0+r3*0-8]
- punpckhwd mm3, mm2
- punpckhdq mm3, mm1
- lea r0, [r0+r3*2]
- movq mm0, [r0+r3*0-8]
- movq mm1, [r4]
- mov r0, r4
- movq mm4, mm3
- movq mm2, mm3
- PALIGNR mm4, mm0, 7, mm0
- PALIGNR mm1, mm2, 1, mm2
- test r1d, r1d
- jz .fix_lt_1
- jmp .do_left
-.fix_lt_1:
- movq mm5, mm3
- pxor mm5, mm4
- psrlq mm5, 56
- psllq mm5, 48
- pxor mm1, mm5
- jmp .do_left
-.fix_lt_2:
- movq mm5, mm3
- pxor mm5, mm2
- psllq mm5, 56
- psrlq mm5, 56
- pxor mm2, mm5
- test r2d, r2d
- jnz .do_top
-.fix_tr_1:
- movq mm5, mm3
- pxor mm5, mm1
- psrlq mm5, 56
- psllq mm5, 56
- pxor mm1, mm5
- jmp .do_top
-.do_left:
- movq mm0, mm4
- PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
- movq mm7, mm2
- movq mm0, [r0-8]
- movq mm3, [r0]
- movq mm1, [r0+8]
- movq mm2, mm3
- movq mm4, mm3
- PALIGNR mm2, mm0, 7, mm0
- PALIGNR mm1, mm4, 1, mm4
- test r1d, r1d
- jz .fix_lt_2
- test r2d, r2d
- jz .fix_tr_1
-.do_top:
- PRED4x4_LOWPASS mm6, mm2, mm1, mm3, mm5
- lea r1, [r0+r3*2]
- movq mm2, mm6
- movq mm3, mm6
- PALIGNR mm3, mm7, 7, mm0
- PALIGNR mm6, mm7, 6, mm1
- movq mm4, mm3
- pavgb mm3, mm2
- lea r2, [r1+r3*2]
- PRED4x4_LOWPASS mm0, mm6, mm2, mm4, mm5
- movq [r0+r3*1], mm3
- movq [r0+r3*2], mm0
- movq mm5, mm0
- movq mm6, mm3
- movq mm1, mm7
- movq mm2, mm1
- psllq mm2, 8
- movq mm3, mm1
- psllq mm3, 16
- lea r4, [r2+r3*2]
- PRED4x4_LOWPASS mm0, mm1, mm3, mm2, mm4
- PALIGNR mm6, mm0, 7, mm2
- movq [r1+r3*1], mm6
- psllq mm0, 8
- PALIGNR mm5, mm0, 7, mm1
- movq [r1+r3*2], mm5
- psllq mm0, 8
- PALIGNR mm6, mm0, 7, mm2
- movq [r2+r3*1], mm6
- psllq mm0, 8
- PALIGNR mm5, mm0, 7, mm1
- movq [r2+r3*2], mm5
- psllq mm0, 8
- PALIGNR mm6, mm0, 7, mm2
- movq [r4+r3*1], mm6
- psllq mm0, 8
- PALIGNR mm5, mm0, 7, mm1
- movq [r4+r3*2], mm5
- RET
-
%macro PRED8x8L_VERTICAL_RIGHT 0
cglobal pred8x8l_vertical_right_8, 4,5,7
; manually spill XMM registers for Win64 because
@@ -2196,121 +1611,6 @@ PRED8x8L_HORIZONTAL_UP
; int has_topright, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-INIT_MMX mmxext
-cglobal pred8x8l_horizontal_down_8, 4,5
- sub r0, r3
- lea r4, [r0+r3*2]
- movq mm0, [r0+r3*1-8]
- punpckhbw mm0, [r0+r3*0-8]
- movq mm1, [r4+r3*1-8]
- punpckhbw mm1, [r0+r3*2-8]
- mov r4, r0
- punpckhwd mm1, mm0
- lea r0, [r0+r3*4]
- movq mm2, [r0+r3*1-8]
- punpckhbw mm2, [r0+r3*0-8]
- lea r0, [r0+r3*2]
- movq mm3, [r0+r3*1-8]
- punpckhbw mm3, [r0+r3*0-8]
- punpckhwd mm3, mm2
- punpckhdq mm3, mm1
- lea r0, [r0+r3*2]
- movq mm0, [r0+r3*0-8]
- movq mm1, [r4]
- mov r0, r4
- movq mm4, mm3
- movq mm2, mm3
- PALIGNR mm4, mm0, 7, mm0
- PALIGNR mm1, mm2, 1, mm2
- test r1d, r1d
- jnz .do_left
-.fix_lt_1:
- movq mm5, mm3
- pxor mm5, mm4
- psrlq mm5, 56
- psllq mm5, 48
- pxor mm1, mm5
- jmp .do_left
-.fix_lt_2:
- movq mm5, mm3
- pxor mm5, mm2
- psllq mm5, 56
- psrlq mm5, 56
- pxor mm2, mm5
- test r2d, r2d
- jnz .do_top
-.fix_tr_1:
- movq mm5, mm3
- pxor mm5, mm1
- psrlq mm5, 56
- psllq mm5, 56
- pxor mm1, mm5
- jmp .do_top
-.do_left:
- movq mm0, mm4
- PRED4x4_LOWPASS mm2, mm1, mm4, mm3, mm5
- movq mm4, mm0
- movq mm7, mm2
- movq mm6, mm2
- PRED4x4_LOWPASS mm1, mm3, mm0, mm4, mm5
- psllq mm1, 56
- PALIGNR mm7, mm1, 7, mm3
- movq mm0, [r0-8]
- movq mm3, [r0]
- movq mm1, [r0+8]
- movq mm2, mm3
- movq mm4, mm3
- PALIGNR mm2, mm0, 7, mm0
- PALIGNR mm1, mm4, 1, mm4
- test r1d, r1d
- jz .fix_lt_2
- test r2d, r2d
- jz .fix_tr_1
-.do_top:
- PRED4x4_LOWPASS mm4, mm2, mm1, mm3, mm5
- movq mm5, mm4
- lea r1, [r0+r3*2]
- psllq mm7, 56
- movq mm2, mm5
- movq mm3, mm6
- movq mm4, mm2
- PALIGNR mm2, mm6, 7, mm5
- PALIGNR mm6, mm7, 7, mm0
- lea r2, [r1+r3*2]
- PALIGNR mm4, mm3, 1, mm7
- movq mm5, mm3
- pavgb mm3, mm6
- PRED4x4_LOWPASS mm0, mm4, mm6, mm5, mm7
- movq mm4, mm2
- movq mm1, mm2
- lea r4, [r2+r3*2]
- psrlq mm4, 16
- psrlq mm1, 8
- PRED4x4_LOWPASS mm6, mm4, mm2, mm1, mm5
- movq mm7, mm3
- punpcklbw mm3, mm0
- punpckhbw mm7, mm0
- movq mm1, mm7
- movq mm0, mm7
- movq mm4, mm7
- movq [r4+r3*2], mm3
- PALIGNR mm7, mm3, 2, mm5
- movq [r4+r3*1], mm7
- PALIGNR mm1, mm3, 4, mm5
- movq [r2+r3*2], mm1
- PALIGNR mm0, mm3, 6, mm3
- movq [r2+r3*1], mm0
- movq mm2, mm6
- movq mm3, mm6
- movq [r1+r3*2], mm4
- PALIGNR mm6, mm4, 2, mm5
- movq [r1+r3*1], mm6
- PALIGNR mm2, mm4, 4, mm5
- movq [r0+r3*2], mm2
- PALIGNR mm3, mm4, 6, mm4
- movq [r0+r3*1], mm3
- RET
-
%macro PRED8x8L_HORIZONTAL_DOWN 0
cglobal pred8x8l_horizontal_down_8, 4,5
sub r0, r3
@@ -2476,7 +1776,7 @@ cglobal pred4x4_dc_8, 3,5
; ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED4x4_TM 0
+INIT_MMX mmxext
cglobal pred4x4_tm_vp8_8, 3,6
sub r0, r2
pxor mm7, mm7
@@ -2491,15 +1791,8 @@ cglobal pred4x4_tm_vp8_8, 3,6
sub r3d, r4d
movd mm2, r1d
movd mm4, r3d
-%if cpuflag(mmxext)
pshufw mm2, mm2, 0
pshufw mm4, mm4, 0
-%else
- punpcklwd mm2, mm2
- punpcklwd mm4, mm4
- punpckldq mm2, mm2
- punpckldq mm4, mm4
-%endif
paddw mm2, mm0
paddw mm4, mm0
packuswb mm2, mm2
@@ -2509,13 +1802,7 @@ cglobal pred4x4_tm_vp8_8, 3,6
lea r0, [r0+r2*2]
dec r5d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmx
-PRED4x4_TM
-INIT_MMX mmxext
-PRED4x4_TM
+ RET
INIT_XMM ssse3
cglobal pred4x4_tm_vp8_8, 3,3
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
index 629e0a72e3..2f30807332 100644
--- a/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred_10bit.asm
@@ -327,19 +327,14 @@ cglobal pred8x8_horizontal_10, 2, 3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_predict_8x8_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
%macro MOV8 2-3
; sort of a hack, but it works
-%if mmsize==8
- movq [%1+0], %2
- movq [%1+8], %3
-%else
movdqa [%1], %2
-%endif
%endmacro
%macro PRED8x8_DC 1
@@ -348,17 +343,9 @@ cglobal pred8x8_dc_10, 2, 6
pxor m4, m4
movq m0, [r0+0]
movq m1, [r0+8]
-%if mmsize==16
punpcklwd m0, m1
movhlps m1, m0
paddw m0, m1
-%else
- pshufw m2, m0, 00001110b
- pshufw m3, m1, 00001110b
- paddw m0, m2
- paddw m1, m3
- punpcklwd m0, m1
-%endif
%1 m2, m0, 00001110b
paddw m0, m2
@@ -389,17 +376,10 @@ cglobal pred8x8_dc_10, 2, 6
paddw m0, m3
psrlw m0, 2
pavgw m0, m4 ; s0+s2, s1, s3, s1+s3
-%if mmsize==16
punpcklwd m0, m0
pshufd m3, m0, 11111010b
punpckldq m0, m0
SWAP 0,1
-%else
- pshufw m1, m0, 0x00
- pshufw m2, m0, 0x55
- pshufw m3, m0, 0xaa
- pshufw m4, m0, 0xff
-%endif
MOV8 r0+r1*1, m1, m2
MOV8 r0+r1*2, m1, m2
MOV8 r0+r5*1, m1, m2
@@ -411,8 +391,6 @@ cglobal pred8x8_dc_10, 2, 6
RET
%endmacro
-INIT_MMX mmxext
-PRED8x8_DC pshufw
INIT_XMM sse2
PRED8x8_DC pshuflw
@@ -503,14 +481,14 @@ cglobal pred8x8_plane_10, 2, 7, 7
add r0, r1
dec r2d
jg .loop
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_pred8x8l_128_dc_10(pixel *src, int has_topleft, int has_topright,
; ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED8x8L_128_DC 0
+INIT_XMM sse2
cglobal pred8x8l_128_dc_10, 4, 4
mova m0, [pw_512] ; (1<<(BIT_DEPTH-1))
lea r1, [r3*3]
@@ -524,12 +502,6 @@ cglobal pred8x8l_128_dc_10, 4, 4
MOV8 r2+r3*2, m0, m0
MOV8 r2+r1*1, m0, m0
RET
-%endmacro
-
-INIT_MMX mmxext
-PRED8x8L_128_DC
-INIT_XMM sse2
-PRED8x8L_128_DC
;-----------------------------------------------------------------------------
; void ff_pred8x8l_top_dc_10(pixel *src, int has_topleft, int has_topright,
@@ -1008,40 +980,26 @@ PRED8x8L_HORIZONTAL_UP
%macro MOV16 3-5
mova [%1+ 0], %2
mova [%1+mmsize], %3
-%if mmsize==8
- mova [%1+ 16], %4
- mova [%1+ 24], %5
-%endif
%endmacro
-%macro PRED16x16_VERTICAL 0
+INIT_XMM sse2
cglobal pred16x16_vertical_10, 2, 3
sub r0, r1
mov r2d, 8
mova m0, [r0+ 0]
mova m1, [r0+mmsize]
-%if mmsize==8
- mova m2, [r0+16]
- mova m3, [r0+24]
-%endif
.loop:
MOV16 r0+r1*1, m0, m1, m2, m3
MOV16 r0+r1*2, m0, m1, m2, m3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_VERTICAL
-INIT_XMM sse2
-PRED16x16_VERTICAL
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_horizontal_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_HORIZONTAL 0
+INIT_XMM sse2
cglobal pred16x16_horizontal_10, 2, 3
mov r2d, 8
.vloop:
@@ -1054,27 +1012,17 @@ cglobal pred16x16_horizontal_10, 2, 3
lea r0, [r0+r1*2]
dec r2d
jg .vloop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_HORIZONTAL
-INIT_XMM sse2
-PRED16x16_HORIZONTAL
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_DC 0
+INIT_XMM sse2
cglobal pred16x16_dc_10, 2, 6
mov r5, r0
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
-%if mmsize==8
- paddw m0, [r0+16]
- paddw m0, [r0+24]
-%endif
HADDW m0, m2
lea r0, [r0+r1-2]
@@ -1100,26 +1048,16 @@ cglobal pred16x16_dc_10, 2, 6
lea r5, [r5+r1*2]
dec r3d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_DC
-INIT_XMM sse2
-PRED16x16_DC
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_top_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_TOP_DC 0
+INIT_XMM sse2
cglobal pred16x16_top_dc_10, 2, 3
sub r0, r1
mova m0, [r0+0]
paddw m0, [r0+mmsize]
-%if mmsize==8
- paddw m0, [r0+16]
- paddw m0, [r0+24]
-%endif
HADDW m0, m2
SPLATW m0, m0
@@ -1132,18 +1070,12 @@ cglobal pred16x16_top_dc_10, 2, 3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_TOP_DC
-INIT_XMM sse2
-PRED16x16_TOP_DC
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_left_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_LEFT_DC 0
+INIT_XMM sse2
cglobal pred16x16_left_dc_10, 2, 6
mov r5, r0
@@ -1169,18 +1101,12 @@ cglobal pred16x16_left_dc_10, 2, 6
lea r5, [r5+r1*2]
dec r3d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_LEFT_DC
-INIT_XMM sse2
-PRED16x16_LEFT_DC
+ RET
;-----------------------------------------------------------------------------
; void ff_pred16x16_128_dc_10(pixel *src, ptrdiff_t stride)
;-----------------------------------------------------------------------------
-%macro PRED16x16_128_DC 0
+INIT_XMM sse2
cglobal pred16x16_128_dc_10, 2,3
mova m0, [pw_512]
mov r2d, 8
@@ -1190,10 +1116,4 @@ cglobal pred16x16_128_dc_10, 2,3
lea r0, [r0+r1*2]
dec r2d
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PRED16x16_128_DC
-INIT_XMM sse2
-PRED16x16_128_DC
+ RET
diff --git a/media/ffvpx/libavcodec/x86/h264_intrapred_init.c b/media/ffvpx/libavcodec/x86/h264_intrapred_init.c
index bdd5125d68..ee46927a24 100644
--- a/media/ffvpx/libavcodec/x86/h264_intrapred_init.c
+++ b/media/ffvpx/libavcodec/x86/h264_intrapred_init.c
@@ -18,10 +18,13 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <stddef.h>
+#include <stdint.h>
+#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
-#include "libavcodec/avcodec.h"
+#include "libavcodec/codec_id.h"
#include "libavcodec/h264pred.h"
#define PRED4x4(TYPE, DEPTH, OPT) \
@@ -49,7 +52,6 @@ PRED4x4(horizontal_down, 10, avx)
void ff_pred8x8_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
ptrdiff_t stride);
-PRED8x8(dc, 10, mmxext)
PRED8x8(dc, 10, sse2)
PRED8x8(top_dc, 10, sse2)
PRED8x8(plane, 10, sse2)
@@ -64,7 +66,6 @@ void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
PRED8x8L(dc, 10, sse2)
PRED8x8L(dc, 10, avx)
-PRED8x8L(128_dc, 10, mmxext)
PRED8x8L(128_dc, 10, sse2)
PRED8x8L(top_dc, 10, sse2)
PRED8x8L(top_dc, 10, avx)
@@ -90,42 +91,25 @@ PRED8x8L(horizontal_up, 10, avx)
void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
ptrdiff_t stride);
-PRED16x16(dc, 10, mmxext)
PRED16x16(dc, 10, sse2)
-PRED16x16(top_dc, 10, mmxext)
PRED16x16(top_dc, 10, sse2)
-PRED16x16(128_dc, 10, mmxext)
PRED16x16(128_dc, 10, sse2)
-PRED16x16(left_dc, 10, mmxext)
PRED16x16(left_dc, 10, sse2)
-PRED16x16(vertical, 10, mmxext)
PRED16x16(vertical, 10, sse2)
-PRED16x16(horizontal, 10, mmxext)
PRED16x16(horizontal, 10, sse2)
/* 8-bit versions */
-PRED16x16(vertical, 8, mmx)
PRED16x16(vertical, 8, sse)
-PRED16x16(horizontal, 8, mmx)
PRED16x16(horizontal, 8, mmxext)
PRED16x16(horizontal, 8, ssse3)
-PRED16x16(dc, 8, mmxext)
PRED16x16(dc, 8, sse2)
PRED16x16(dc, 8, ssse3)
-PRED16x16(plane_h264, 8, mmx)
-PRED16x16(plane_h264, 8, mmxext)
PRED16x16(plane_h264, 8, sse2)
PRED16x16(plane_h264, 8, ssse3)
-PRED16x16(plane_rv40, 8, mmx)
-PRED16x16(plane_rv40, 8, mmxext)
PRED16x16(plane_rv40, 8, sse2)
PRED16x16(plane_rv40, 8, ssse3)
-PRED16x16(plane_svq3, 8, mmx)
-PRED16x16(plane_svq3, 8, mmxext)
PRED16x16(plane_svq3, 8, sse2)
PRED16x16(plane_svq3, 8, ssse3)
-PRED16x16(tm_vp8, 8, mmx)
-PRED16x16(tm_vp8, 8, mmxext)
PRED16x16(tm_vp8, 8, sse2)
PRED16x16(tm_vp8, 8, avx2)
@@ -133,15 +117,10 @@ PRED8x8(top_dc, 8, mmxext)
PRED8x8(dc_rv40, 8, mmxext)
PRED8x8(dc, 8, mmxext)
PRED8x8(vertical, 8, mmx)
-PRED8x8(horizontal, 8, mmx)
PRED8x8(horizontal, 8, mmxext)
PRED8x8(horizontal, 8, ssse3)
-PRED8x8(plane, 8, mmx)
-PRED8x8(plane, 8, mmxext)
PRED8x8(plane, 8, sse2)
PRED8x8(plane, 8, ssse3)
-PRED8x8(tm_vp8, 8, mmx)
-PRED8x8(tm_vp8, 8, mmxext)
PRED8x8(tm_vp8, 8, sse2)
PRED8x8(tm_vp8, 8, ssse3)
@@ -153,20 +132,16 @@ PRED8x8L(horizontal, 8, mmxext)
PRED8x8L(horizontal, 8, ssse3)
PRED8x8L(vertical, 8, mmxext)
PRED8x8L(vertical, 8, ssse3)
-PRED8x8L(down_left, 8, mmxext)
PRED8x8L(down_left, 8, sse2)
PRED8x8L(down_left, 8, ssse3)
-PRED8x8L(down_right, 8, mmxext)
PRED8x8L(down_right, 8, sse2)
PRED8x8L(down_right, 8, ssse3)
-PRED8x8L(vertical_right, 8, mmxext)
PRED8x8L(vertical_right, 8, sse2)
PRED8x8L(vertical_right, 8, ssse3)
PRED8x8L(vertical_left, 8, sse2)
PRED8x8L(vertical_left, 8, ssse3)
PRED8x8L(horizontal_up, 8, mmxext)
PRED8x8L(horizontal_up, 8, ssse3)
-PRED8x8L(horizontal_down, 8, mmxext)
PRED8x8L(horizontal_down, 8, sse2)
PRED8x8L(horizontal_down, 8, ssse3)
@@ -177,7 +152,6 @@ PRED4x4(vertical_left, 8, mmxext)
PRED4x4(vertical_right, 8, mmxext)
PRED4x4(horizontal_up, 8, mmxext)
PRED4x4(horizontal_down, 8, mmxext)
-PRED4x4(tm_vp8, 8, mmx)
PRED4x4(tm_vp8, 8, mmxext)
PRED4x4(tm_vp8, 8, ssse3)
PRED4x4(vertical_vp8, 8, mmxext)
@@ -190,44 +164,20 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
if (bit_depth == 8) {
if (EXTERNAL_MMX(cpu_flags)) {
- h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_8_mmx;
- h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmx;
if (chroma_format_idc <= 1) {
h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx;
- h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx;
- }
- if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmx;
- h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx;
- h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx;
- } else {
- if (chroma_format_idc <= 1)
- h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmx;
- if (codec_id == AV_CODEC_ID_SVQ3) {
- if (cpu_flags & AV_CPU_FLAG_CMOV)
- h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_mmx;
- } else if (codec_id == AV_CODEC_ID_RV40) {
- h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_mmx;
- } else {
- h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_mmx;
- }
}
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmxext;
- h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_mmxext;
if (chroma_format_idc <= 1)
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmxext;
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_mmxext;
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_mmxext;
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_8_mmxext;
h->pred8x8l [VERT_PRED ] = ff_pred8x8l_vertical_8_mmxext;
- h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_mmxext;
- h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_mmxext;
h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_mmxext;
- h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_mmxext;
- h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_mmxext;
h->pred4x4 [DIAG_DOWN_RIGHT_PRED ] = ff_pred4x4_down_right_8_mmxext;
h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_8_mmxext;
h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_8_mmxext;
@@ -249,21 +199,9 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
}
}
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmxext;
h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_8_mmxext;
- h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmxext;
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmxext;
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_8_mmxext;
- } else {
- if (chroma_format_idc <= 1)
- h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmxext;
- if (codec_id == AV_CODEC_ID_SVQ3) {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_svq3_8_mmxext;
- } else if (codec_id == AV_CODEC_ID_RV40) {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_rv40_8_mmxext;
- } else {
- h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_h264_8_mmxext;
- }
}
}
@@ -334,18 +272,6 @@ av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
if (EXTERNAL_MMXEXT(cpu_flags)) {
h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
-
- if (chroma_format_idc <= 1)
- h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
-
- h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
-
- h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_mmxext;
- h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_mmxext;
- h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_mmxext;
- h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_mmxext;
- h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_mmxext;
- h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_sse2;
diff --git a/media/ffvpx/libavcodec/x86/videodsp.asm b/media/ffvpx/libavcodec/x86/videodsp.asm
index e237860700..3cc07878d3 100644
--- a/media/ffvpx/libavcodec/x86/videodsp.asm
+++ b/media/ffvpx/libavcodec/x86/videodsp.asm
@@ -45,7 +45,6 @@ SECTION .text
jnz .%1_y_loop
%endmacro
-%macro vvar_fn 0
; .----. <- zero
; | | <- top is copied from first line in body of source
; |----| <- start_y
@@ -53,6 +52,7 @@ SECTION .text
; |----| <- end_y
; | | <- bottom is copied from last line in body of source
; '----' <- bh
+INIT_XMM sse
%if ARCH_X86_64
cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \
start_y, end_y, bh, w
@@ -81,15 +81,6 @@ cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
V_COPY_ROW bottom, bhq ; v_copy_row(bottom, bh)
.end: ; }
RET
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX mmx
-vvar_fn
-%endif
-
-INIT_XMM sse
-vvar_fn
%macro hvar_fn 0
cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
@@ -105,11 +96,7 @@ cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
imul wd, 0x01010101 ; w *= 0x01010101
movd m0, wd
mov wq, n_wordsq ; initialize w
-%if cpuflag(sse2)
pshufd m0, m0, q0000 ; splat
-%else ; mmx
- punpckldq m0, m0 ; splat
-%endif ; mmx/sse
%endif ; avx2
.x_loop: ; do {
movu [dstq+wq*2], m0 ; write($reg, $mmsize)
@@ -123,11 +110,6 @@ cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
RET
%endmacro
-%if ARCH_X86_32
-INIT_MMX mmx
-hvar_fn
-%endif
-
INIT_XMM sse2
hvar_fn
@@ -338,9 +320,6 @@ cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
INIT_MMX mmx
VERTICAL_EXTEND 1, 15
-%if ARCH_X86_32
-VERTICAL_EXTEND 16, 22
-%endif
INIT_XMM sse
VERTICAL_EXTEND 16, 22
@@ -438,9 +417,6 @@ cglobal emu_edge_hfix %+ %%n, 4, 5, 1, dst, dst_stride, start_x, bh, val
INIT_MMX mmx
H_EXTEND 2, 14
-%if ARCH_X86_32
-H_EXTEND 16, 22
-%endif
INIT_XMM sse2
H_EXTEND 16, 22
@@ -450,19 +426,11 @@ INIT_XMM avx2
H_EXTEND 8, 22
%endif
-%macro PREFETCH_FN 1
+INIT_MMX mmxext
cglobal prefetch, 3, 3, 0, buf, stride, h
.loop:
- %1 [bufq]
+ prefetcht0 [bufq]
add bufq, strideq
dec hd
jg .loop
- REP_RET
-%endmacro
-
-INIT_MMX mmxext
-PREFETCH_FN prefetcht0
-%if ARCH_X86_32
-INIT_MMX 3dnow
-PREFETCH_FN prefetch
-%endif
+ RET
diff --git a/media/ffvpx/libavcodec/x86/videodsp_init.c b/media/ffvpx/libavcodec/x86/videodsp_init.c
index eeebb41547..ae9db95624 100644
--- a/media/ffvpx/libavcodec/x86/videodsp_init.c
+++ b/media/ffvpx/libavcodec/x86/videodsp_init.c
@@ -24,7 +24,6 @@
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/videodsp.h"
@@ -53,26 +52,6 @@ extern emu_edge_vfix_func ff_emu_edge_vfix12_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix13_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix14_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix15_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix16_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix17_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix18_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix19_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix20_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix21_mmx;
-extern emu_edge_vfix_func ff_emu_edge_vfix22_mmx;
-#if ARCH_X86_32
-static emu_edge_vfix_func * const vfixtbl_mmx[22] = {
- &ff_emu_edge_vfix1_mmx, &ff_emu_edge_vfix2_mmx, &ff_emu_edge_vfix3_mmx,
- &ff_emu_edge_vfix4_mmx, &ff_emu_edge_vfix5_mmx, &ff_emu_edge_vfix6_mmx,
- &ff_emu_edge_vfix7_mmx, &ff_emu_edge_vfix8_mmx, &ff_emu_edge_vfix9_mmx,
- &ff_emu_edge_vfix10_mmx, &ff_emu_edge_vfix11_mmx, &ff_emu_edge_vfix12_mmx,
- &ff_emu_edge_vfix13_mmx, &ff_emu_edge_vfix14_mmx, &ff_emu_edge_vfix15_mmx,
- &ff_emu_edge_vfix16_mmx, &ff_emu_edge_vfix17_mmx, &ff_emu_edge_vfix18_mmx,
- &ff_emu_edge_vfix19_mmx, &ff_emu_edge_vfix20_mmx, &ff_emu_edge_vfix21_mmx,
- &ff_emu_edge_vfix22_mmx
-};
-#endif
-extern emu_edge_vvar_func ff_emu_edge_vvar_mmx;
extern emu_edge_vfix_func ff_emu_edge_vfix16_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix17_sse;
extern emu_edge_vfix_func ff_emu_edge_vfix18_sse;
@@ -104,19 +83,6 @@ extern emu_edge_hfix_func ff_emu_edge_hfix8_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix10_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix12_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix14_mmx;
-extern emu_edge_hfix_func ff_emu_edge_hfix16_mmx;
-extern emu_edge_hfix_func ff_emu_edge_hfix18_mmx;
-extern emu_edge_hfix_func ff_emu_edge_hfix20_mmx;
-extern emu_edge_hfix_func ff_emu_edge_hfix22_mmx;
-#if ARCH_X86_32
-static emu_edge_hfix_func * const hfixtbl_mmx[11] = {
- ff_emu_edge_hfix2_mmx, ff_emu_edge_hfix4_mmx, ff_emu_edge_hfix6_mmx,
- ff_emu_edge_hfix8_mmx, ff_emu_edge_hfix10_mmx, ff_emu_edge_hfix12_mmx,
- ff_emu_edge_hfix14_mmx, ff_emu_edge_hfix16_mmx, ff_emu_edge_hfix18_mmx,
- ff_emu_edge_hfix20_mmx, ff_emu_edge_hfix22_mmx
-};
-#endif
-extern emu_edge_hvar_func ff_emu_edge_hvar_mmx;
extern emu_edge_hfix_func ff_emu_edge_hfix16_sse2;
extern emu_edge_hfix_func ff_emu_edge_hfix18_sse2;
extern emu_edge_hfix_func ff_emu_edge_hfix20_sse2;
@@ -222,30 +188,6 @@ static av_always_inline void emulated_edge_mc(uint8_t *dst, const uint8_t *src,
}
}
-#if ARCH_X86_32
-static av_noinline void emulated_edge_mc_mmx(uint8_t *buf, const uint8_t *src,
- ptrdiff_t buf_stride,
- ptrdiff_t src_stride,
- int block_w, int block_h,
- int src_x, int src_y, int w, int h)
-{
- emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
- src_x, src_y, w, h, vfixtbl_mmx, &ff_emu_edge_vvar_mmx,
- hfixtbl_mmx, &ff_emu_edge_hvar_mmx);
-}
-
-static av_noinline void emulated_edge_mc_sse(uint8_t *buf, const uint8_t *src,
- ptrdiff_t buf_stride,
- ptrdiff_t src_stride,
- int block_w, int block_h,
- int src_x, int src_y, int w, int h)
-{
- emulated_edge_mc(buf, src, buf_stride, src_stride, block_w, block_h,
- src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse,
- hfixtbl_mmx, &ff_emu_edge_hvar_mmx);
-}
-#endif
-
static av_noinline void emulated_edge_mc_sse2(uint8_t *buf, const uint8_t *src,
ptrdiff_t buf_stride,
ptrdiff_t src_stride,
@@ -273,30 +215,16 @@ static av_noinline void emulated_edge_mc_avx2(uint8_t *buf, const uint8_t *src,
#endif /* HAVE_AVX2_EXTERNAL */
#endif /* HAVE_X86ASM */
-void ff_prefetch_mmxext(uint8_t *buf, ptrdiff_t stride, int h);
-void ff_prefetch_3dnow(uint8_t *buf, ptrdiff_t stride, int h);
+void ff_prefetch_mmxext(const uint8_t *buf, ptrdiff_t stride, int h);
av_cold void ff_videodsp_init_x86(VideoDSPContext *ctx, int bpc)
{
#if HAVE_X86ASM
int cpu_flags = av_get_cpu_flags();
-#if ARCH_X86_32
- if (EXTERNAL_MMX(cpu_flags) && bpc <= 8) {
- ctx->emulated_edge_mc = emulated_edge_mc_mmx;
- }
- if (EXTERNAL_AMD3DNOW(cpu_flags)) {
- ctx->prefetch = ff_prefetch_3dnow;
- }
-#endif /* ARCH_X86_32 */
if (EXTERNAL_MMXEXT(cpu_flags)) {
ctx->prefetch = ff_prefetch_mmxext;
}
-#if ARCH_X86_32
- if (EXTERNAL_SSE(cpu_flags) && bpc <= 8) {
- ctx->emulated_edge_mc = emulated_edge_mc_sse;
- }
-#endif /* ARCH_X86_32 */
if (EXTERNAL_SSE2(cpu_flags) && bpc <= 8) {
ctx->emulated_edge_mc = emulated_edge_mc_sse2;
}
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp.asm b/media/ffvpx/libavcodec/x86/vp8dsp.asm
index 75de5690a1..6ac5a7721b 100644
--- a/media/ffvpx/libavcodec/x86/vp8dsp.asm
+++ b/media/ffvpx/libavcodec/x86/vp8dsp.asm
@@ -157,7 +157,7 @@ SECTION .text
; subpel MC functions:
;
; void ff_put_vp8_epel<size>_h<htap>v<vtap>_<opt>(uint8_t *dst, ptrdiff_t deststride,
-; uint8_t *src, ptrdiff_t srcstride,
+; const uint8_t *src, ptrdiff_t srcstride,
; int height, int mx, int my);
;-------------------------------------------------------------------------------
@@ -200,7 +200,7 @@ cglobal put_vp8_epel%1_h6, 6, 6 + npicregs, 8, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, height, mx, picreg
shl mxd, 4
@@ -230,7 +230,7 @@ cglobal put_vp8_epel%1_h4, 6, 6 + npicregs, 7, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
shl myd, 4
@@ -268,7 +268,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picreg, my
lea myd, [myq*3]
@@ -314,7 +314,7 @@ cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX ssse3
@@ -368,7 +368,7 @@ cglobal put_vp8_epel4_h4, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
; 4x4 block, H-only 6-tap filter
INIT_MMX mmxext
@@ -426,7 +426,7 @@ cglobal put_vp8_epel4_h6, 6, 6 + npicregs, 0, dst, dststride, src, srcstride, he
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
INIT_XMM sse2
cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, height, mx, picreg
@@ -474,7 +474,7 @@ cglobal put_vp8_epel8_h4, 6, 6 + npicregs, 10, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
INIT_XMM sse2
cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, height, mx, picreg
@@ -537,7 +537,7 @@ cglobal put_vp8_epel8_h6, 6, 6 + npicregs, 14, dst, dststride, src, srcstride, h
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%macro FILTER_V 1
; 4x4 block, V-only 4-tap filter
@@ -590,7 +590,7 @@ cglobal put_vp8_epel%1_v4, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
; 4x4 block, V-only 6-tap filter
@@ -655,7 +655,7 @@ cglobal put_vp8_epel%1_v6, 7, 7, 8, dst, dststride, src, srcstride, height, picr
add srcq, srcstrideq
dec heightd ; next row
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
@@ -738,7 +738,7 @@ cglobal put_vp8_bilinear%1_v, 7, 7, 7, dst, dststride, src, srcstride, height, p
lea srcq, [srcq+srcstrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
%if cpuflag(ssse3)
cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 5, dst, dststride, src, srcstride, height, mx, picreg
@@ -815,7 +815,7 @@ cglobal put_vp8_bilinear%1_h, 6, 6 + npicregs, 7, dst, dststride, src, srcstride
lea srcq, [srcq+srcstrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
%endmacro
INIT_MMX mmxext
@@ -838,26 +838,7 @@ cglobal put_vp8_pixels8, 5, 5, 0, dst, dststride, src, srcstride, height
lea dstq, [dstq+dststrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
-
-%if ARCH_X86_32
-INIT_MMX mmx
-cglobal put_vp8_pixels16, 5, 5, 0, dst, dststride, src, srcstride, height
-.nextrow:
- movq mm0, [srcq+srcstrideq*0+0]
- movq mm1, [srcq+srcstrideq*0+8]
- movq mm2, [srcq+srcstrideq*1+0]
- movq mm3, [srcq+srcstrideq*1+8]
- lea srcq, [srcq+srcstrideq*2]
- movq [dstq+dststrideq*0+0], mm0
- movq [dstq+dststrideq*0+8], mm1
- movq [dstq+dststrideq*1+0], mm2
- movq [dstq+dststrideq*1+8], mm3
- lea dstq, [dstq+dststrideq*2]
- sub heightd, 2
- jg .nextrow
- REP_RET
-%endif
+ RET
INIT_XMM sse
cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
@@ -870,7 +851,7 @@ cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
lea dstq, [dstq+dststrideq*2]
sub heightd, 2
jg .nextrow
- REP_RET
+ RET
;-----------------------------------------------------------------------------
; void ff_vp8_idct_dc_add_<opt>(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
@@ -895,32 +876,6 @@ cglobal put_vp8_pixels16, 5, 5, 2, dst, dststride, src, srcstride, height
%4 [dst2q+strideq+%3], m5
%endmacro
-%if ARCH_X86_32
-INIT_MMX mmx
-cglobal vp8_idct_dc_add, 3, 3, 0, dst, block, stride
- ; load data
- movd m0, [blockq]
-
- ; calculate DC
- paddw m0, [pw_4]
- pxor m1, m1
- psraw m0, 3
- movd [blockq], m1
- psubw m1, m0
- packuswb m0, m0
- packuswb m1, m1
- punpcklbw m0, m0
- punpcklbw m1, m1
- punpcklwd m0, m0
- punpcklwd m1, m1
-
- ; add DC
- DEFINE_ARGS dst1, dst2, stride
- lea dst2q, [dst1q+strideq*2]
- ADD_DC m0, m1, 0, movh
- RET
-%endif
-
%macro VP8_IDCT_DC_ADD 0
cglobal vp8_idct_dc_add, 3, 3, 6, dst, block, stride
; load data
@@ -971,44 +926,6 @@ VP8_IDCT_DC_ADD
; void ff_vp8_idct_dc_add4y_<opt>(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride);
;-----------------------------------------------------------------------------
-%if ARCH_X86_32
-INIT_MMX mmx
-cglobal vp8_idct_dc_add4y, 3, 3, 0, dst, block, stride
- ; load data
- movd m0, [blockq+32*0] ; A
- movd m1, [blockq+32*2] ; C
- punpcklwd m0, [blockq+32*1] ; A B
- punpcklwd m1, [blockq+32*3] ; C D
- punpckldq m0, m1 ; A B C D
- pxor m6, m6
-
- ; calculate DC
- paddw m0, [pw_4]
- movd [blockq+32*0], m6
- movd [blockq+32*1], m6
- movd [blockq+32*2], m6
- movd [blockq+32*3], m6
- psraw m0, 3
- psubw m6, m0
- packuswb m0, m0
- packuswb m6, m6
- punpcklbw m0, m0 ; AABBCCDD
- punpcklbw m6, m6 ; AABBCCDD
- movq m1, m0
- movq m7, m6
- punpcklbw m0, m0 ; AAAABBBB
- punpckhbw m1, m1 ; CCCCDDDD
- punpcklbw m6, m6 ; AAAABBBB
- punpckhbw m7, m7 ; CCCCDDDD
-
- ; add DC
- DEFINE_ARGS dst1, dst2, stride
- lea dst2q, [dst1q+strideq*2]
- ADD_DC m0, m6, 0, mova
- ADD_DC m1, m7, 8, mova
- RET
-%endif
-
INIT_XMM sse2
cglobal vp8_idct_dc_add4y, 3, 3, 6, dst, block, stride
; load data
@@ -1117,7 +1034,7 @@ cglobal vp8_idct_dc_add4uv, 3, 3, 0, dst, block, stride
SWAP %4, %3
%endmacro
-%macro VP8_IDCT_ADD 0
+INIT_MMX sse
cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
; load block data
movq m0, [blockq+ 0]
@@ -1126,17 +1043,9 @@ cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
movq m3, [blockq+24]
movq m6, [pw_20091]
movq m7, [pw_17734]
-%if cpuflag(sse)
xorps xmm0, xmm0
movaps [blockq+ 0], xmm0
movaps [blockq+16], xmm0
-%else
- pxor m4, m4
- movq [blockq+ 0], m4
- movq [blockq+ 8], m4
- movq [blockq+16], m4
- movq [blockq+24], m4
-%endif
; actual IDCT
VP8_IDCT_TRANSFORM4x4_1D 0, 1, 2, 3, 4, 5
@@ -1153,14 +1062,6 @@ cglobal vp8_idct_add, 3, 3, 0, dst, block, stride
STORE_DIFFx2 m2, m3, m6, m7, m4, 3, dst2q, strideq
RET
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX mmx
-VP8_IDCT_ADD
-%endif
-INIT_MMX sse
-VP8_IDCT_ADD
;-----------------------------------------------------------------------------
; void ff_vp8_luma_dc_wht(int16_t block[4][4][16], int16_t dc[16])
@@ -1193,23 +1094,15 @@ VP8_IDCT_ADD
SWAP %1, %4, %3
%endmacro
-%macro VP8_DC_WHT 0
+INIT_MMX sse
cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
movq m0, [dc1q]
movq m1, [dc1q+8]
movq m2, [dc1q+16]
movq m3, [dc1q+24]
-%if cpuflag(sse)
xorps xmm0, xmm0
movaps [dc1q+ 0], xmm0
movaps [dc1q+16], xmm0
-%else
- pxor m4, m4
- movq [dc1q+ 0], m4
- movq [dc1q+ 8], m4
- movq [dc1q+16], m4
- movq [dc1q+24], m4
-%endif
HADAMARD4_1D 0, 1, 2, 3
TRANSPOSE4x4W 0, 1, 2, 3, 4
paddw m0, [pw_3]
@@ -1221,11 +1114,3 @@ cglobal vp8_luma_dc_wht, 2, 3, 0, block, dc1, dc2
SCATTER_WHT 0, 1, 0
SCATTER_WHT 2, 3, 2
RET
-%endmacro
-
-%if ARCH_X86_32
-INIT_MMX mmx
-VP8_DC_WHT
-%endif
-INIT_MMX sse
-VP8_DC_WHT
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp_init.c b/media/ffvpx/libavcodec/x86/vp8dsp_init.c
index 397b2518cb..bd20da1fc9 100644
--- a/media/ffvpx/libavcodec/x86/vp8dsp_init.c
+++ b/media/ffvpx/libavcodec/x86/vp8dsp_init.c
@@ -22,7 +22,7 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp8dsp.h"
@@ -32,96 +32,93 @@
* MC functions
*/
void ff_put_vp8_epel4_h4_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_h6_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_v4_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_v6_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_h4_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_h6_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_v4_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_v6_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel4_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_h4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_h6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_v4_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_epel8_v6_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear4_h_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear8_h_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear4_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear8_h_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear4_v_mmxext(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear8_v_sse2 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear4_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_bilinear8_v_ssse3 (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_pixels8_mmx (uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
- int height, int mx, int my);
-void ff_put_vp8_pixels16_mmx(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
void ff_put_vp8_pixels16_sse(uint8_t *dst, ptrdiff_t dststride,
- uint8_t *src, ptrdiff_t srcstride,
+ const uint8_t *src, ptrdiff_t srcstride,
int height, int mx, int my);
#define TAP_W16(OPT, FILTERTYPE, TAPTYPE) \
static void ff_put_vp8_ ## FILTERTYPE ## 16_ ## TAPTYPE ## _ ## OPT( \
- uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
ptrdiff_t srcstride, int height, int mx, int my) \
{ \
ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
@@ -140,19 +137,6 @@ static void ff_put_vp8_ ## FILTERTYPE ## 8_ ## TAPTYPE ## _ ## OPT( \
dst + 4, dststride, src + 4, srcstride, height, mx, my); \
}
-#if ARCH_X86_32
-TAP_W8 (mmxext, epel, h4)
-TAP_W8 (mmxext, epel, h6)
-TAP_W16(mmxext, epel, h6)
-TAP_W8 (mmxext, epel, v4)
-TAP_W8 (mmxext, epel, v6)
-TAP_W16(mmxext, epel, v6)
-TAP_W8 (mmxext, bilinear, h)
-TAP_W16(mmxext, bilinear, h)
-TAP_W8 (mmxext, bilinear, v)
-TAP_W16(mmxext, bilinear, v)
-#endif
-
TAP_W16(sse2, epel, h6)
TAP_W16(sse2, epel, v6)
TAP_W16(sse2, bilinear, h)
@@ -165,7 +149,7 @@ TAP_W16(ssse3, bilinear, v)
#define HVTAP(OPT, ALIGN, TAPNUMX, TAPNUMY, SIZE, MAXHEIGHT) \
static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT( \
- uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
ptrdiff_t srcstride, int height, int mx, int my) \
{ \
LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + TAPNUMY - 1)]); \
@@ -177,16 +161,8 @@ static void ff_put_vp8_epel ## SIZE ## _h ## TAPNUMX ## v ## TAPNUMY ## _ ## OPT
dst, dststride, tmpptr, SIZE, height, mx, my); \
}
-#if ARCH_X86_32
-#define HVTAPMMX(x, y) \
-HVTAP(mmxext, 8, x, y, 4, 8) \
-HVTAP(mmxext, 8, x, y, 8, 16)
-
-HVTAP(mmxext, 8, 6, 6, 16, 16)
-#else
#define HVTAPMMX(x, y) \
HVTAP(mmxext, 8, x, y, 4, 8)
-#endif
HVTAPMMX(4, 4)
HVTAPMMX(4, 6)
@@ -210,7 +186,7 @@ HVTAP(ssse3, 16, 6, 6, 4, 8)
#define HVBILIN(OPT, ALIGN, SIZE, MAXHEIGHT) \
static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
- uint8_t *dst, ptrdiff_t dststride, uint8_t *src, \
+ uint8_t *dst, ptrdiff_t dststride, const uint8_t *src, \
ptrdiff_t srcstride, int height, int mx, int my) \
{ \
LOCAL_ALIGNED(ALIGN, uint8_t, tmp, [SIZE * (MAXHEIGHT + 2)]); \
@@ -221,31 +197,21 @@ static void ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT( \
}
HVBILIN(mmxext, 8, 4, 8)
-#if ARCH_X86_32
-HVBILIN(mmxext, 8, 8, 16)
-HVBILIN(mmxext, 8, 16, 16)
-#endif
HVBILIN(sse2, 8, 8, 16)
HVBILIN(sse2, 8, 16, 16)
HVBILIN(ssse3, 8, 4, 8)
HVBILIN(ssse3, 8, 8, 16)
HVBILIN(ssse3, 8, 16, 16)
-void ff_vp8_idct_dc_add_mmx(uint8_t *dst, int16_t block[16],
- ptrdiff_t stride);
void ff_vp8_idct_dc_add_sse2(uint8_t *dst, int16_t block[16],
ptrdiff_t stride);
void ff_vp8_idct_dc_add_sse4(uint8_t *dst, int16_t block[16],
ptrdiff_t stride);
-void ff_vp8_idct_dc_add4y_mmx(uint8_t *dst, int16_t block[4][16],
- ptrdiff_t stride);
void ff_vp8_idct_dc_add4y_sse2(uint8_t *dst, int16_t block[4][16],
ptrdiff_t stride);
void ff_vp8_idct_dc_add4uv_mmx(uint8_t *dst, int16_t block[2][16],
ptrdiff_t stride);
-void ff_vp8_luma_dc_wht_mmx(int16_t block[4][4][16], int16_t dc[16]);
void ff_vp8_luma_dc_wht_sse(int16_t block[4][4][16], int16_t dc[16]);
-void ff_vp8_idct_add_mmx(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
void ff_vp8_idct_add_sse(uint8_t *dst, int16_t block[16], ptrdiff_t stride);
#define DECLARE_LOOP_FILTER(NAME) \
@@ -284,8 +250,6 @@ void ff_vp8_h_loop_filter8uv_mbedge_ ## NAME(uint8_t *dstU, \
ptrdiff_t s, \
int e, int i, int hvt);
-DECLARE_LOOP_FILTER(mmx)
-DECLARE_LOOP_FILTER(mmxext)
DECLARE_LOOP_FILTER(sse2)
DECLARE_LOOP_FILTER(ssse3)
DECLARE_LOOP_FILTER(sse4)
@@ -322,10 +286,6 @@ av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(cpu_flags)) {
-#if ARCH_X86_32
- c->put_vp8_epel_pixels_tab[0][0][0] =
- c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
-#endif
c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
}
@@ -335,12 +295,6 @@ av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
if (EXTERNAL_MMXEXT(cpu_flags)) {
VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
-#if ARCH_X86_32
- VP8_LUMA_MC_FUNC(0, 16, mmxext);
- VP8_MC_FUNC(1, 8, mmxext);
- VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
- VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
-#endif
}
if (EXTERNAL_SSE(cpu_flags)) {
@@ -348,7 +302,7 @@ av_cold void ff_vp78dsp_init_x86(VP8DSPContext *c)
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
}
- if (EXTERNAL_SSE2(cpu_flags) || EXTERNAL_SSE2_SLOW(cpu_flags)) {
+ if (EXTERNAL_SSE2_SLOW(cpu_flags)) {
VP8_LUMA_MC_FUNC(0, 16, sse2);
VP8_MC_FUNC(1, 8, sse2);
VP8_BILINEAR_MC_FUNC(0, 16, sse2);
@@ -373,44 +327,6 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
if (EXTERNAL_MMX(cpu_flags)) {
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
-#if ARCH_X86_32
- c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
- c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
- c->vp8_idct_add = ff_vp8_idct_add_mmx;
- c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
-
- c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
- c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
-
- c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
- c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
- c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
- c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
-
- c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
- c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
- c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
- c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
-#endif
- }
-
- /* note that 4-tap width=16 functions are missing because w=16
- * is only used for luma, and luma is always a copy or sixtap. */
- if (EXTERNAL_MMXEXT(cpu_flags)) {
-#if ARCH_X86_32
- c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
- c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
-
- c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
- c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
- c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
- c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
-
- c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
- c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
- c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
- c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
-#endif
}
if (EXTERNAL_SSE(cpu_flags)) {
@@ -418,7 +334,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext *c)
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
}
- if (EXTERNAL_SSE2(cpu_flags) || EXTERNAL_SSE2_SLOW(cpu_flags)) {
+ if (EXTERNAL_SSE2_SLOW(cpu_flags)) {
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
diff --git a/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm b/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm
index caeb405267..ef397efd3e 100644
--- a/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm
+++ b/media/ffvpx/libavcodec/x86/vp8dsp_loopfilter.asm
@@ -46,30 +46,6 @@ SECTION .text
; void ff_vp8_h/v_loop_filter_simple_<opt>(uint8_t *dst, ptrdiff_t stride, int flim);
;-----------------------------------------------------------------------------
-; macro called with 7 mm register indexes as argument, and 4 regular registers
-;
-; first 4 mm registers will carry the transposed pixel data
-; the other three are scratchspace (one would be sufficient, but this allows
-; for more spreading/pipelining and thus faster execution on OOE CPUs)
-;
-; first two regular registers are buf+4*stride and buf+5*stride
-; third is -stride, fourth is +stride
-%macro READ_8x4_INTERLEAVED 11
- ; interleave 8 (A-H) rows of 4 pixels each
- movd m%1, [%8+%10*4] ; A0-3
- movd m%5, [%9+%10*4] ; B0-3
- movd m%2, [%8+%10*2] ; C0-3
- movd m%6, [%8+%10] ; D0-3
- movd m%3, [%8] ; E0-3
- movd m%7, [%9] ; F0-3
- movd m%4, [%9+%11] ; G0-3
- punpcklbw m%1, m%5 ; A/B interleaved
- movd m%5, [%9+%11*2] ; H0-3
- punpcklbw m%2, m%6 ; C/D interleaved
- punpcklbw m%3, m%7 ; E/F interleaved
- punpcklbw m%4, m%5 ; G/H interleaved
-%endmacro
-
; macro called with 7 mm register indexes as argument, and 5 regular registers
; first 11 mean the same as READ_8x4_TRANSPOSED above
; fifth regular register is scratchspace to reach the bottom 8 rows, it
@@ -112,26 +88,6 @@ SECTION .text
punpcklbw m%4, m%5 ; G/H/O/P interleaved
%endmacro
-; write 4 mm registers of 2 dwords each
-; first four arguments are mm register indexes containing source data
-; last four are registers containing buf+4*stride, buf+5*stride,
-; -stride and +stride
-%macro WRITE_4x2D 8
- ; write out (2 dwords per register)
- movd [%5+%7*4], m%1
- movd [%5+%7*2], m%2
- movd [%5], m%3
- movd [%6+%8], m%4
- punpckhdq m%1, m%1
- punpckhdq m%2, m%2
- punpckhdq m%3, m%3
- punpckhdq m%4, m%4
- movd [%6+%7*4], m%1
- movd [%5+%7], m%2
- movd [%6], m%3
- movd [%6+%8*2], m%4
-%endmacro
-
; write 4 xmm registers of 4 dwords each
; arguments same as WRITE_2x4D, but with an extra register, so that the 5 regular
; registers contain buf+4*stride, buf+5*stride, buf+12*stride, -stride and +stride
@@ -192,42 +148,6 @@ SECTION .text
movd [%7+%9*2], m%4
%endmacro
-; write 4 or 8 words in the mmx/xmm registers as 8 lines
-; 1 and 2 are the registers to write, this can be the same (for SSE2)
-; for pre-SSE4:
-; 3 is a general-purpose register that we will clobber
-; for SSE4:
-; 3 is a pointer to the destination's 5th line
-; 4 is a pointer to the destination's 4th line
-; 5/6 is -stride and +stride
-%macro WRITE_2x4W 6
- movd %3d, %1
- punpckhdq %1, %1
- mov [%4+%5*4], %3w
- shr %3, 16
- add %4, %6
- mov [%4+%5*4], %3w
-
- movd %3d, %1
- add %4, %5
- mov [%4+%5*2], %3w
- shr %3, 16
- mov [%4+%5 ], %3w
-
- movd %3d, %2
- punpckhdq %2, %2
- mov [%4 ], %3w
- shr %3, 16
- mov [%4+%6 ], %3w
-
- movd %3d, %2
- add %4, %6
- mov [%4+%6 ], %3w
- shr %3, 16
- mov [%4+%6*2], %3w
- add %4, %5
-%endmacro
-
%macro WRITE_8W 5
%if cpuflag(sse4)
pextrw [%3+%4*4], %1, 0
@@ -269,29 +189,19 @@ SECTION .text
%macro SIMPLE_LOOPFILTER 2
cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
-%if mmsize == 8 ; mmx/mmxext
- mov cntrq, 2
-%endif
%if cpuflag(ssse3)
pxor m0, m0
%endif
SPLATB_REG m7, flim, m0 ; splat "flim" into register
; set up indexes to address 4 rows
-%if mmsize == 8
- DEFINE_ARGS dst1, mstride, stride, cntr, dst2
-%else
DEFINE_ARGS dst1, mstride, stride, dst3, dst2
-%endif
mov strideq, mstrideq
neg mstrideq
%ifidn %1, h
lea dst1q, [dst1q+4*strideq-2]
%endif
-%if mmsize == 8 ; mmx / mmxext
-.next8px:
-%endif
%ifidn %1, v
; read 4 half/full rows of pixels
mova m0, [dst1q+mstrideq*2] ; p1
@@ -301,11 +211,7 @@ cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
%else ; h
lea dst2q, [dst1q+ strideq]
-%if mmsize == 8 ; mmx/mmxext
- READ_8x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq
-%else ; sse2
READ_16x4_INTERLEAVED 0, 1, 2, 3, 4, 5, 6, dst1q, dst2q, mstrideq, strideq, dst3q
-%endif
TRANSPOSE4x4W 0, 1, 2, 3, 4
%endif
@@ -380,7 +286,6 @@ cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
inc dst1q
SBUTTERFLY bw, 6, 4, 0
-%if mmsize == 16 ; sse2
%if cpuflag(sse4)
inc dst2q
%endif
@@ -390,35 +295,11 @@ cglobal vp8_%1_loop_filter_simple, 3, %2, 8, dst, stride, flim, cntr
inc dst3q
%endif
WRITE_8W m4, dst3q, dst2q, mstrideq, strideq
-%else ; mmx/mmxext
- WRITE_2x4W m6, m4, dst2q, dst1q, mstrideq, strideq
-%endif
%endif
-%if mmsize == 8 ; mmx/mmxext
- ; next 8 pixels
-%ifidn %1, v
- add dst1q, 8 ; advance 8 cols = pixels
-%else ; h
- lea dst1q, [dst1q+strideq*8-1] ; advance 8 rows = lines
-%endif
- dec cntrq
- jg .next8px
- REP_RET
-%else ; sse2
RET
-%endif
%endmacro
-%if ARCH_X86_32
-INIT_MMX mmx
-SIMPLE_LOOPFILTER v, 4
-SIMPLE_LOOPFILTER h, 5
-INIT_MMX mmxext
-SIMPLE_LOOPFILTER v, 4
-SIMPLE_LOOPFILTER h, 5
-%endif
-
INIT_XMM sse2
SIMPLE_LOOPFILTER v, 3
SIMPLE_LOOPFILTER h, 5
@@ -485,9 +366,6 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
%if %2 == 8 ; chroma
DEFINE_ARGS dst1, dst8, mstride, stride, dst2
-%elif mmsize == 8
- DEFINE_ARGS dst1, mstride, stride, dst2, cntr
- mov cntrq, 2
%else
DEFINE_ARGS dst1, mstride, stride, dst2, dst8
%endif
@@ -500,9 +378,6 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
%endif
%endif
-%if mmsize == 8
-.next8px:
-%endif
; read
lea dst2q, [dst1q+strideq]
%ifidn %1, v
@@ -527,33 +402,7 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
movhps m7, [dst8q+ strideq*2]
add dst8q, mstrideq
%endif
-%elif mmsize == 8 ; mmx/mmxext (h)
- ; read 8 rows of 8px each
- movu m0, [dst1q+mstrideq*4]
- movu m1, [dst2q+mstrideq*4]
- movu m2, [dst1q+mstrideq*2]
- movu m3, [dst1q+mstrideq ]
- movu m4, [dst1q]
- movu m5, [dst2q]
- movu m6, [dst2q+ strideq ]
-
- ; 8x8 transpose
- TRANSPOSE4x4B 0, 1, 2, 3, 7
- mova m_q0backup, m1
- movu m7, [dst2q+ strideq*2]
- TRANSPOSE4x4B 4, 5, 6, 7, 1
- SBUTTERFLY dq, 0, 4, 1 ; p3/p2
- SBUTTERFLY dq, 2, 6, 1 ; q0/q1
- SBUTTERFLY dq, 3, 7, 1 ; q2/q3
- mova m1, m_q0backup
- mova m_q0backup, m2 ; store q0
- SBUTTERFLY dq, 1, 5, 2 ; p1/p0
- mova m_p0backup, m5 ; store p0
- SWAP 1, 4
- SWAP 2, 4
- SWAP 6, 3
- SWAP 5, 3
-%else ; sse2 (h)
+%else ; h
%if %2 == 16
lea dst8q, [dst1q+ strideq*8]
%endif
@@ -641,25 +490,9 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
psubusb m6, m5 ; q2-q1
por m6, m4 ; abs(q2-q1)
-%if notcpuflag(mmxext)
- mova m4, m_flimI
- pxor m3, m3
- psubusb m0, m4
- psubusb m1, m4
- psubusb m7, m4
- psubusb m6, m4
- pcmpeqb m0, m3 ; abs(p3-p2) <= I
- pcmpeqb m1, m3 ; abs(p2-p1) <= I
- pcmpeqb m7, m3 ; abs(q3-q2) <= I
- pcmpeqb m6, m3 ; abs(q2-q1) <= I
- pand m0, m1
- pand m7, m6
- pand m0, m7
-%else ; mmxext/sse2
pmaxub m0, m1
pmaxub m6, m7
pmaxub m0, m6
-%endif
; normal_limit and high_edge_variance for p1-p0, q1-q0
SWAP 7, 3 ; now m7 is zero
@@ -681,18 +514,8 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
psubusb m1, m3 ; p1-p0
psubusb m6, m2 ; p0-p1
por m1, m6 ; abs(p1-p0)
-%if notcpuflag(mmxext)
- mova m6, m1
- psubusb m1, m4
- psubusb m6, m_hevthr
- pcmpeqb m1, m7 ; abs(p1-p0) <= I
- pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
- pand m0, m1
- mova m_maskres, m6
-%else ; mmxext/sse2
pmaxub m0, m1 ; max_I
SWAP 1, 4 ; max_hev_thresh
-%endif
SWAP 6, 4 ; now m6 is I
%ifidn %1, v
@@ -712,17 +535,6 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
psubusb m1, m5 ; q0-q1
psubusb m7, m4 ; q1-q0
por m1, m7 ; abs(q1-q0)
-%if notcpuflag(mmxext)
- mova m7, m1
- psubusb m1, m6
- psubusb m7, m_hevthr
- pxor m6, m6
- pcmpeqb m1, m6 ; abs(q1-q0) <= I
- pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
- mova m6, m_maskres
- pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
- pand m6, m7
-%else ; mmxext/sse2
pxor m7, m7
pmaxub m0, m1
pmaxub m6, m1
@@ -730,7 +542,6 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
psubusb m6, m_hevthr
pcmpeqb m0, m7 ; max(abs(..)) <= I
pcmpeqb m6, m7 ; !(max(abs..) > thresh)
-%endif
%ifdef m12
SWAP 6, 12
%else
@@ -820,25 +631,12 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
%else
mova m6, m_maskres
%endif
-%if notcpuflag(mmxext)
- mova m7, [pb_1]
-%else ; mmxext/sse2
pxor m7, m7
-%endif
pand m0, m6
pand m1, m6
-%if notcpuflag(mmxext)
- paddusb m0, m7
- pand m1, [pb_FE]
- pandn m7, m0
- psrlq m1, 1
- psrlq m7, 1
- SWAP 0, 7
-%else ; mmxext/sse2
psubusb m1, [pb_1]
pavgb m0, m7 ; a
pavgb m1, m7 ; -a
-%endif
psubusb m5, m0
psubusb m2, m1
paddusb m5, m1 ; q1-a
@@ -863,51 +661,13 @@ cglobal vp8_%1_loop_filter16y_inner, 5, 5, 13, stack_size, dst, stride, flimE, f
; 4x8/16 transpose
TRANSPOSE4x4B 2, 3, 4, 5, 6
-%if mmsize == 8 ; mmx/mmxext (h)
- WRITE_4x2D 2, 3, 4, 5, dst1q, dst2q, mstrideq, strideq
-%else ; sse2 (h)
lea dst8q, [dst8q+mstrideq +2]
WRITE_4x4D 2, 3, 4, 5, dst1q, dst2q, dst8q, mstrideq, strideq, %2
%endif
-%endif
-%if mmsize == 8
-%if %2 == 8 ; chroma
-%ifidn %1, h
- sub dst1q, 2
-%endif
- cmp dst1q, dst8q
- mov dst1q, dst8q
- jnz .next8px
-%else
-%ifidn %1, h
- lea dst1q, [dst1q+ strideq*8-2]
-%else ; v
- add dst1q, 8
-%endif
- dec cntrq
- jg .next8px
-%endif
- REP_RET
-%else ; mmsize == 16
RET
-%endif
%endmacro
-%if ARCH_X86_32
-INIT_MMX mmx
-INNER_LOOPFILTER v, 16
-INNER_LOOPFILTER h, 16
-INNER_LOOPFILTER v, 8
-INNER_LOOPFILTER h, 8
-
-INIT_MMX mmxext
-INNER_LOOPFILTER v, 16
-INNER_LOOPFILTER h, 16
-INNER_LOOPFILTER v, 8
-INNER_LOOPFILTER h, 8
-%endif
-
INIT_XMM sse2
INNER_LOOPFILTER v, 16
INNER_LOOPFILTER h, 16
@@ -992,9 +752,6 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
%if %2 == 8 ; chroma
DEFINE_ARGS dst1, dst8, mstride, stride, dst2
-%elif mmsize == 8
- DEFINE_ARGS dst1, mstride, stride, dst2, cntr
- mov cntrq, 2
%else
DEFINE_ARGS dst1, mstride, stride, dst2, dst8
%endif
@@ -1007,9 +764,6 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
%endif
%endif
-%if mmsize == 8
-.next8px:
-%endif
; read
lea dst2q, [dst1q+ strideq ]
%ifidn %1, v
@@ -1034,33 +788,7 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
movhps m7, [dst8q+ strideq*2]
add dst8q, mstrideq
%endif
-%elif mmsize == 8 ; mmx/mmxext (h)
- ; read 8 rows of 8px each
- movu m0, [dst1q+mstrideq*4]
- movu m1, [dst2q+mstrideq*4]
- movu m2, [dst1q+mstrideq*2]
- movu m3, [dst1q+mstrideq ]
- movu m4, [dst1q]
- movu m5, [dst2q]
- movu m6, [dst2q+ strideq ]
-
- ; 8x8 transpose
- TRANSPOSE4x4B 0, 1, 2, 3, 7
- mova m_q0backup, m1
- movu m7, [dst2q+ strideq*2]
- TRANSPOSE4x4B 4, 5, 6, 7, 1
- SBUTTERFLY dq, 0, 4, 1 ; p3/p2
- SBUTTERFLY dq, 2, 6, 1 ; q0/q1
- SBUTTERFLY dq, 3, 7, 1 ; q2/q3
- mova m1, m_q0backup
- mova m_q0backup, m2 ; store q0
- SBUTTERFLY dq, 1, 5, 2 ; p1/p0
- mova m_p0backup, m5 ; store p0
- SWAP 1, 4
- SWAP 2, 4
- SWAP 6, 3
- SWAP 5, 3
-%else ; sse2 (h)
+%else ; h
%if %2 == 16
lea dst8q, [dst1q+ strideq*8 ]
%endif
@@ -1150,25 +878,9 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
psubusb m6, m5 ; q2-q1
por m6, m4 ; abs(q2-q1)
-%if notcpuflag(mmxext)
- mova m4, m_flimI
- pxor m3, m3
- psubusb m0, m4
- psubusb m1, m4
- psubusb m7, m4
- psubusb m6, m4
- pcmpeqb m0, m3 ; abs(p3-p2) <= I
- pcmpeqb m1, m3 ; abs(p2-p1) <= I
- pcmpeqb m7, m3 ; abs(q3-q2) <= I
- pcmpeqb m6, m3 ; abs(q2-q1) <= I
- pand m0, m1
- pand m7, m6
- pand m0, m7
-%else ; mmxext/sse2
pmaxub m0, m1
pmaxub m6, m7
pmaxub m0, m6
-%endif
; normal_limit and high_edge_variance for p1-p0, q1-q0
SWAP 7, 3 ; now m7 is zero
@@ -1190,18 +902,8 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
psubusb m1, m3 ; p1-p0
psubusb m6, m2 ; p0-p1
por m1, m6 ; abs(p1-p0)
-%if notcpuflag(mmxext)
- mova m6, m1
- psubusb m1, m4
- psubusb m6, m_hevthr
- pcmpeqb m1, m7 ; abs(p1-p0) <= I
- pcmpeqb m6, m7 ; abs(p1-p0) <= hev_thresh
- pand m0, m1
- mova m_maskres, m6
-%else ; mmxext/sse2
pmaxub m0, m1 ; max_I
SWAP 1, 4 ; max_hev_thresh
-%endif
SWAP 6, 4 ; now m6 is I
%ifidn %1, v
@@ -1221,17 +923,6 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
psubusb m1, m5 ; q0-q1
psubusb m7, m4 ; q1-q0
por m1, m7 ; abs(q1-q0)
-%if notcpuflag(mmxext)
- mova m7, m1
- psubusb m1, m6
- psubusb m7, m_hevthr
- pxor m6, m6
- pcmpeqb m1, m6 ; abs(q1-q0) <= I
- pcmpeqb m7, m6 ; abs(q1-q0) <= hev_thresh
- mova m6, m_maskres
- pand m0, m1 ; abs([pq][321]-[pq][210]) <= I
- pand m6, m7
-%else ; mmxext/sse2
pxor m7, m7
pmaxub m0, m1
pmaxub m6, m1
@@ -1239,7 +930,6 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
psubusb m6, m_hevthr
pcmpeqb m0, m7 ; max(abs(..)) <= I
pcmpeqb m6, m7 ; !(max(abs..) > thresh)
-%endif
%ifdef m12
SWAP 6, 12
%else
@@ -1510,11 +1200,6 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
TRANSPOSE4x4B 1, 2, 3, 4, 0
SBUTTERFLY bw, 5, 6, 0
-%if mmsize == 8 ; mmx/mmxext (h)
- WRITE_4x2D 1, 2, 3, 4, dst1q, dst2q, mstrideq, strideq
- add dst1q, 4
- WRITE_2x4W m5, m6, dst2q, dst1q, mstrideq, strideq
-%else ; sse2 (h)
lea dst8q, [dst8q+mstrideq+1]
WRITE_4x4D 1, 2, 3, 4, dst1q, dst2q, dst8q, mstrideq, strideq, %2
lea dst1q, [dst2q+mstrideq+4]
@@ -1528,45 +1213,10 @@ cglobal vp8_%1_loop_filter16y_mbedge, 5, 5, 15, stack_size, dst1, stride, flimE,
%endif
WRITE_8W m6, dst2q, dst8q, mstrideq, strideq
%endif
-%endif
-%if mmsize == 8
-%if %2 == 8 ; chroma
-%ifidn %1, h
- sub dst1q, 5
-%endif
- cmp dst1q, dst8q
- mov dst1q, dst8q
- jnz .next8px
-%else
-%ifidn %1, h
- lea dst1q, [dst1q+ strideq*8-5]
-%else ; v
- add dst1q, 8
-%endif
- dec cntrq
- jg .next8px
-%endif
- REP_RET
-%else ; mmsize == 16
RET
-%endif
%endmacro
-%if ARCH_X86_32
-INIT_MMX mmx
-MBEDGE_LOOPFILTER v, 16
-MBEDGE_LOOPFILTER h, 16
-MBEDGE_LOOPFILTER v, 8
-MBEDGE_LOOPFILTER h, 8
-
-INIT_MMX mmxext
-MBEDGE_LOOPFILTER v, 16
-MBEDGE_LOOPFILTER h, 16
-MBEDGE_LOOPFILTER v, 8
-MBEDGE_LOOPFILTER h, 8
-%endif
-
INIT_XMM sse2
MBEDGE_LOOPFILTER v, 16
MBEDGE_LOOPFILTER h, 16
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init.c b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
index 837cce8508..8d11dbc348 100644
--- a/media/ffvpx/libavcodec/x86/vp9dsp_init.c
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init.c
@@ -22,7 +22,6 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/x86/vp9dsp_init.h"
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init.h b/media/ffvpx/libavcodec/x86/vp9dsp_init.h
index e410cab3a1..fc1e0557fa 100644
--- a/media/ffvpx/libavcodec/x86/vp9dsp_init.h
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init.h
@@ -23,6 +23,9 @@
#ifndef AVCODEC_X86_VP9DSP_INIT_H
#define AVCODEC_X86_VP9DSP_INIT_H
+#include "libavutil/attributes.h"
+#include "libavutil/mem_internal.h"
+
#include "libavcodec/vp9dsp.h"
// hack to force-expand BPC
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c
index 60d10a12a3..e5afea1512 100644
--- a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp.c
@@ -22,7 +22,6 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/x86/vp9dsp_init.h"
@@ -55,6 +54,8 @@ decl_ipred_fn(dl, 16, 16, avx2);
decl_ipred_fn(dl, 32, 16, avx2);
decl_ipred_fn(dr, 16, 16, avx2);
decl_ipred_fn(dr, 32, 16, avx2);
+decl_ipred_fn(vl, 16, 16, avx2);
+decl_ipred_fn(hd, 16, 16, avx2);
#define decl_ipred_dir_funcs(type) \
decl_ipred_fns(type, 16, sse2, sse2); \
@@ -140,6 +141,8 @@ av_cold void ff_vp9dsp_init_16bpp_x86(VP9DSPContext *dsp)
init_ipred_func(dl, DIAG_DOWN_LEFT, 16, 16, avx2);
init_ipred_func(dl, DIAG_DOWN_LEFT, 32, 16, avx2);
init_ipred_func(dr, DIAG_DOWN_RIGHT, 16, 16, avx2);
+ init_ipred_func(vl, VERT_LEFT, 16, 16, avx2);
+ init_ipred_func(hd, HOR_DOWN, 16, 16, avx2);
#if ARCH_X86_64
init_ipred_func(dr, DIAG_DOWN_RIGHT, 32, 16, avx2);
#endif
diff --git a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c
index b56afc7f50..f93ea2468e 100644
--- a/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c
+++ b/media/ffvpx/libavcodec/x86/vp9dsp_init_16bpp_template.c
@@ -22,7 +22,6 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavutil/mem.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/vp9dsp.h"
#include "libavcodec/x86/vp9dsp_init.h"
diff --git a/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm b/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm
index 32b698243a..808056a809 100644
--- a/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm
+++ b/media/ffvpx/libavcodec/x86/vp9intrapred_16bpp.asm
@@ -1222,6 +1222,111 @@ cglobal vp9_ipred_dr_16x16_16, 4, 5, 6, dst, stride, l, a
mova [dst3q+strideq*4], m5 ; 7
RET
+cglobal vp9_ipred_vl_16x16_16, 4, 5, 7, dst, stride, l, a
+ movifnidn aq, amp
+ mova m0, [aq] ; abcdefghijklmnop
+ vpbroadcastw xm1, [aq+30] ; pppppppp
+ vperm2i128 m2, m0, m1, q0201 ; ijklmnoppppppppp
+ vpalignr m3, m2, m0, 2 ; bcdefghijklmnopp
+ vperm2i128 m4, m3, m1, q0201 ; jklmnopppppppppp
+ vpalignr m5, m2, m0, 4 ; cdefghijklmnoppp
+ vperm2i128 m6, m5, m1, q0201 ; klmnoppppppppppp
+ LOWPASS 5, 3, 0 ; BCDEFGHIJKLMNOPP
+ LOWPASS 6, 4, 2 ; JKLMNOPPPPPPPPPP
+ pavgw m3, m0 ; abcdefghijklmnop
+ pavgw m4, m2 ; ijklmnoppppppppp
+ DEFINE_ARGS dst, stride, stride3, stride5, dst4
+ lea dst4q, [dstq+strideq*4]
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+
+ mova [dstq+strideq*0], m3 ; 0 abcdefghijklmnop
+ mova [dstq+strideq*1], m5 ; 1 BCDEFGHIJKLMNOPP
+ vpalignr m0, m4, m3, 2
+ vpalignr m1, m6, m5, 2
+ mova [dstq+strideq*2 ], m0 ; 2 bcdefghijklmnopp
+ mova [dstq+stride3q*1], m1 ; 3 CDEFGHIJKLMNOPPP
+ vpalignr m0, m4, m3, 4
+ vpalignr m1, m6, m5, 4
+ mova [dst4q+strideq*0], m0 ; 4 cdefghijklmnoppp
+ mova [dstq+stride5q*1], m1 ; 5 DEFGHIJKLMNOPPPP
+ vpalignr m0, m4, m3, 6
+ vpalignr m1, m6, m5, 6
+ mova [ dstq+stride3q*2], m0 ; 6 defghijklmnopppp
+ mova [dst4q+stride3q*1], m1 ; 7 EFGHIJKLMNOPPPPP
+ vpalignr m0, m4, m3, 8
+ vpalignr m1, m6, m5, 8
+ mova [ dstq+strideq*8], m0 ; 8 efghijklmnoppppp
+ mova [dst4q+stride5q*1], m1 ; 9 FGHIJKLMNOPPPPPP
+ vpalignr m0, m4, m3, 10
+ mova [dstq+stride5q*2], m0 ; 10 fghijklmnopppppp
+ vpalignr m0, m4, m3, 12
+ mova [dst4q+strideq*8], m0 ; 12 ghijklmnoppppppp
+ vpalignr m0, m4, m3, 14
+ mova [dst4q+stride5q*2], m0 ; 14 hijklmnopppppppp
+ sub dst4q, strideq
+ vpalignr m1, m6, m5, 10
+ mova [dst4q+strideq*8], m1 ; 11 GHIJKLMNOPPPPPPP
+ vpalignr m1, m6, m5, 12
+ mova [dst4q+stride5q*2], m1 ; 13 HIJKLMNOPPPPPPPP
+ vpalignr m1, m6, m5, 14
+ mova [dst4q+stride3q*4], m1 ; 15 IJKLMNOPPPPPPPPP
+ RET
+
+cglobal vp9_ipred_hd_16x16_16, 4, 5, 7, dst, stride, l, a
+ movu m0, [aq-2] ; *abcdefghijklmno
+ mova m1, [lq] ; klmnopqrstuvwxyz
+ vperm2i128 m2, m1, m0, q0201 ; stuvwxyz*abcdefg
+ vpalignr m3, m2, m1, 2 ; lmnopqrstuvwxyz*
+ vpalignr m4, m2, m1, 4 ; mnopqrstuvwxyz*a
+ LOWPASS 4, 3, 1 ; LMNOPQRSTUVWXYZ#
+ pavgw m3, m1 ; klmnopqrstuvwxyz
+ mova m1, [aq] ; abcdefghijklmnop
+ movu m2, [aq+2] ; bcdefghijklmnop.
+ LOWPASS 2, 1, 0 ; ABCDEFGHIJKLMNO.
+ vpunpcklwd m0, m3, m4 ; kLlMmNnOsTtUuVvW
+ vpunpckhwd m1, m3, m4 ; oPpQqRrSwXxYyZz#
+ vperm2i128 m3, m1, m0, q0002 ; kLlMmNnOoPpQqRrS
+ vperm2i128 m4, m0, m1, q0301 ; sTtUuVvWwXxYyZz#
+ vperm2i128 m0, m4, m2, q0201 ; wXxYyZz#ABCDEFGH
+ vperm2i128 m1, m3, m4, q0201 ; oPpQqRrSsTtUuVvW
+ DEFINE_ARGS dst, stride, stride3, stride5, dst5
+ lea stride3q, [strideq*3]
+ lea stride5q, [stride3q+strideq*2]
+ lea dst5q, [dstq+stride5q]
+
+ mova [dst5q+stride5q*2], m3 ; 15 kLlMmNnOoPpQqRrS
+ mova [dst5q+stride3q*2], m1 ; 11 oPpQqRrSsTtUuVvW
+ mova [dst5q+strideq*2], m4 ; 7 sTtUuVvWwXxYyZz#
+ mova [dstq+stride3q*1], m0 ; 3 wXxYyZz#ABCDEFGH
+ vpalignr m5, m4, m1, 4
+ mova [dstq+stride5q*2], m5 ; 10 pQqRrSsTtUuVvWwX
+ vpalignr m5, m0, m4, 4
+ vpalignr m6, m2, m0, 4
+ mova [dstq+stride3q*2], m5 ; 6 tUuVvWwXxYyZz#AB
+ mova [dstq+strideq*2], m6 ; 2 xYyZz#ABCDEFGHIJ
+ vpalignr m5, m4, m1, 8
+ mova [dst5q+strideq*4], m5 ; 9 qRrSsTtUuVvWwXxY
+ vpalignr m5, m0, m4, 8
+ vpalignr m6, m2, m0, 8
+ mova [dstq+stride5q*1], m5 ; 5 uVvWwXxYyZz#ABCD
+ mova [dstq+strideq*1], m6 ; 1 yZz#ABCDEFGHIJKL
+ vpalignr m5, m1, m3, 12
+ vpalignr m6, m4, m1, 12
+ mova [dstq+stride3q*4], m5 ; 12 nOoPpQqRrSsTtUuV
+ mova [dst5q+stride3q], m6 ; 8 rSsTtUuVvWwXxYyZ
+ vpalignr m5, m0, m4, 12
+ vpalignr m6, m2, m0, 12
+ mova [dstq+strideq*4], m5 ; 4 nOoPpQqRrSsTtUuV
+ mova [dstq+strideq*0], m6 ; 0 z#ABCDEFGHIJKLMN
+ sub dst5q, strideq
+ vpalignr m5, m1, m3, 4
+ mova [dst5q+stride5q*2], m5 ; 14 lMmNnOoPpQqRrSsT
+ sub dst5q, strideq
+ vpalignr m5, m1, m3, 8
+ mova [dst5q+stride5q*2], m5 ; 13 mNnOoPpQqRrSsTtU
+ RET
+
%if ARCH_X86_64
cglobal vp9_ipred_dr_32x32_16, 4, 7, 10, dst, stride, l, a
mova m0, [lq+mmsize*0+0] ; l[0-15]
diff --git a/media/ffvpx/libavcodec/x86/vp9mc.asm b/media/ffvpx/libavcodec/x86/vp9mc.asm
index f64161b2c2..efc4cfbef1 100644
--- a/media/ffvpx/libavcodec/x86/vp9mc.asm
+++ b/media/ffvpx/libavcodec/x86/vp9mc.asm
@@ -604,7 +604,12 @@ cglobal vp9_%1%2 %+ %%szsuf, 5, 5, %8, dst, dstride, src, sstride, h
%%pavg m0, [dstq]
%%pavg m1, [dstq+d%3]
%%pavg m2, [dstq+d%4]
+%if %2 == 4
+ %%srcfn m4, [dstq+d%5]
+ %%pavg m3, m4
+%else
%%pavg m3, [dstq+d%5]
+%endif
%if %2/mmsize == 8
%%pavg m4, [dstq+mmsize*4]
%%pavg m5, [dstq+mmsize*5]
diff --git a/media/ffvpx/libavcodec/x86/vp56_arith.h b/media/ffvpx/libavcodec/x86/vpx_arith.h
index 810cc8dcd8..d9e4c0dec4 100644
--- a/media/ffvpx/libavcodec/x86/vp56_arith.h
+++ b/media/ffvpx/libavcodec/x86/vpx_arith.h
@@ -21,14 +21,18 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef AVCODEC_X86_VP56_ARITH_H
-#define AVCODEC_X86_VP56_ARITH_H
+#ifndef AVCODEC_X86_VPX_ARITH_H
+#define AVCODEC_X86_VPX_ARITH_H
+
+#include "libavutil/x86/asm.h"
#if HAVE_INLINE_ASM && HAVE_FAST_CMOV && HAVE_6REGS
-#define vp56_rac_get_prob vp56_rac_get_prob
-static av_always_inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob)
+#include "libavutil/attributes.h"
+
+#define vpx_rac_get_prob vpx_rac_get_prob
+static av_always_inline int vpx_rac_get_prob(VPXRangeCoder *c, uint8_t prob)
{
- unsigned int code_word = vp56_rac_renorm(c);
+ unsigned int code_word = vpx_rac_renorm(c);
unsigned int low = 1 + (((c->high - 1) * prob) >> 8);
unsigned int low_shift = low << 16;
int bit = 0;
@@ -48,4 +52,4 @@ static av_always_inline int vp56_rac_get_prob(VP56RangeCoder *c, uint8_t prob)
}
#endif
-#endif /* AVCODEC_X86_VP56_ARITH_H */
+#endif /* AVCODEC_X86_VPX_ARITH_H */
diff --git a/media/ffvpx/libavcodec/xiph.c b/media/ffvpx/libavcodec/xiph.c
index d072224b4a..218b0813e9 100644
--- a/media/ffvpx/libavcodec/xiph.c
+++ b/media/ffvpx/libavcodec/xiph.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <limits.h>
+#include "libavutil/error.h"
#include "libavutil/intreadwrite.h"
#include "xiph.h"
@@ -35,7 +37,7 @@ int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size,
header_start[i] = extradata;
extradata += header_len[i];
if (overall_len > extradata_size - header_len[i])
- return -1;
+ return AVERROR_INVALIDDATA;
overall_len += header_len[i];
}
} else if (extradata_size >= 3 && extradata_size < INT_MAX - 0x1ff && extradata[0] == 2) {
@@ -50,7 +52,7 @@ int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size,
header_len[i] += *extradata;
overall_len += *extradata;
if (overall_len > extradata_size)
- return -1;
+ return AVERROR_INVALIDDATA;
}
header_len[2] = extradata_size - overall_len;
header_start[0] = extradata;
diff --git a/media/ffvpx/libavcodec/xiph.h b/media/ffvpx/libavcodec/xiph.h
index 1741a51b65..4ab2469528 100644
--- a/media/ffvpx/libavcodec/xiph.h
+++ b/media/ffvpx/libavcodec/xiph.h
@@ -21,7 +21,7 @@
#ifndef AVCODEC_XIPH_H
#define AVCODEC_XIPH_H
-#include "libavutil/common.h"
+#include <stdint.h>
/**
* Split a single extradata buffer into the three headers that most
diff --git a/media/ffvpx/libavutil/adler32.c b/media/ffvpx/libavutil/adler32.c
index c87d5e261c..7124f18802 100644
--- a/media/ffvpx/libavutil/adler32.c
+++ b/media/ffvpx/libavutil/adler32.c
@@ -32,8 +32,8 @@
#include "config.h"
#include "adler32.h"
-#include "common.h"
#include "intreadwrite.h"
+#include "macros.h"
#define BASE 65521L /* largest prime smaller than 65536 */
@@ -41,8 +41,7 @@
#define DO4(buf) DO1(buf); DO1(buf); DO1(buf); DO1(buf);
#define DO16(buf) DO4(buf); DO4(buf); DO4(buf); DO4(buf);
-unsigned long av_adler32_update(unsigned long adler, const uint8_t * buf,
- unsigned int len)
+AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf, size_t len)
{
unsigned long s1 = adler & 0xffff;
unsigned long s2 = adler >> 16;
diff --git a/media/ffvpx/libavutil/adler32.h b/media/ffvpx/libavutil/adler32.h
index a1f035b734..232d07f5fe 100644
--- a/media/ffvpx/libavutil/adler32.h
+++ b/media/ffvpx/libavutil/adler32.h
@@ -27,6 +27,7 @@
#ifndef AVUTIL_ADLER32_H
#define AVUTIL_ADLER32_H
+#include <stddef.h>
#include <stdint.h>
#include "attributes.h"
@@ -38,6 +39,8 @@
* @{
*/
+typedef uint32_t AVAdler;
+
/**
* Calculate the Adler32 checksum of a buffer.
*
@@ -50,8 +53,8 @@
* @param len size of input buffer
* @return updated checksum
*/
-unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
- unsigned int len) av_pure;
+AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf,
+ size_t len) av_pure;
/**
* @}
diff --git a/media/ffvpx/libavutil/attributes.h b/media/ffvpx/libavutil/attributes.h
index ced108aa2c..04c615c952 100644
--- a/media/ffvpx/libavutil/attributes.h
+++ b/media/ffvpx/libavutil/attributes.h
@@ -34,6 +34,12 @@
# define AV_GCC_VERSION_AT_MOST(x,y) 0
#endif
+#ifdef __has_builtin
+# define AV_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define AV_HAS_BUILTIN(x) 0
+#endif
+
#ifndef av_always_inline
#if AV_GCC_VERSION_AT_LEAST(3,1)
# define av_always_inline __attribute__((always_inline)) inline
@@ -104,7 +110,7 @@
* scheduled for removal.
*/
#ifndef AV_NOWARN_DEPRECATED
-#if AV_GCC_VERSION_AT_LEAST(4,6)
+#if AV_GCC_VERSION_AT_LEAST(4,6) || defined(__clang__)
# define AV_NOWARN_DEPRECATED(code) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
diff --git a/media/ffvpx/libavutil/attributes_internal.h b/media/ffvpx/libavutil/attributes_internal.h
new file mode 100644
index 0000000000..3df1ee6af3
--- /dev/null
+++ b/media/ffvpx/libavutil/attributes_internal.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_INTERNAL_H
+#define AVUTIL_ATTRIBUTES_INTERNAL_H
+
+#include "attributes.h"
+
+#if (AV_GCC_VERSION_AT_LEAST(4,0) || defined(__clang__)) && (defined(__ELF__) || defined(__MACH__))
+# define attribute_visibility_hidden __attribute__((visibility("hidden")))
+# define FF_VISIBILITY_PUSH_HIDDEN _Pragma("GCC visibility push(hidden)")
+# define FF_VISIBILITY_POP_HIDDEN _Pragma("GCC visibility pop")
+#else
+# define attribute_visibility_hidden
+# define FF_VISIBILITY_PUSH_HIDDEN
+# define FF_VISIBILITY_POP_HIDDEN
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/avassert.h b/media/ffvpx/libavutil/avassert.h
index 9abeadea4a..51e462bbae 100644
--- a/media/ffvpx/libavutil/avassert.h
+++ b/media/ffvpx/libavutil/avassert.h
@@ -28,8 +28,8 @@
#define AVUTIL_AVASSERT_H
#include <stdlib.h>
-#include "avutil.h"
#include "log.h"
+#include "macros.h"
/**
* assert() equivalent, that is always enabled.
diff --git a/media/ffvpx/libavutil/avsscanf.c b/media/ffvpx/libavutil/avsscanf.c
new file mode 100644
index 0000000000..7061e6d965
--- /dev/null
+++ b/media/ffvpx/libavutil/avsscanf.c
@@ -0,0 +1,970 @@
+/*
+ * Copyright (c) 2005-2014 Rich Felker, et al.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <float.h>
+
+#include "avstring.h"
+#include "libm.h"
+
+typedef struct FFFILE {
+ size_t buf_size;
+ unsigned char *buf;
+ unsigned char *rpos, *rend;
+ unsigned char *shend;
+ ptrdiff_t shlim, shcnt;
+ void *cookie;
+ size_t (*read)(struct FFFILE *, unsigned char *, size_t);
+} FFFILE;
+
+#define SIZE_hh -2
+#define SIZE_h -1
+#define SIZE_def 0
+#define SIZE_l 1
+#define SIZE_L 2
+#define SIZE_ll 3
+
+#define shcnt(f) ((f)->shcnt + ((f)->rpos - (f)->buf))
+
+static int fftoread(FFFILE *f)
+{
+ f->rpos = f->rend = f->buf + f->buf_size;
+ return 0;
+}
+
+static size_t ffstring_read(FFFILE *f, unsigned char *buf, size_t len)
+{
+ char *src = f->cookie;
+ size_t k = len+256;
+ char *end = memchr(src, 0, k);
+
+ if (end) k = end-src;
+ if (k < len) len = k;
+ memcpy(buf, src, len);
+ f->rpos = (void *)(src+len);
+ f->rend = (void *)(src+k);
+ f->cookie = src+k;
+
+ return len;
+}
+
+static int ffuflow(FFFILE *f)
+{
+ unsigned char c;
+ if (!fftoread(f) && f->read(f, &c, 1)==1) return c;
+ return EOF;
+}
+
+static void ffshlim(FFFILE *f, ptrdiff_t lim)
+{
+ f->shlim = lim;
+ f->shcnt = f->buf - f->rpos;
+ /* If lim is nonzero, rend must be a valid pointer. */
+ if (lim && f->rend - f->rpos > lim)
+ f->shend = f->rpos + lim;
+ else
+ f->shend = f->rend;
+}
+
+static int ffshgetc(FFFILE *f)
+{
+ int c;
+ ptrdiff_t cnt = shcnt(f);
+ if (f->shlim && cnt >= f->shlim || (c=ffuflow(f)) < 0) {
+ f->shcnt = f->buf - f->rpos + cnt;
+ f->shend = 0;
+ return EOF;
+ }
+ cnt++;
+ if (f->shlim && f->rend - f->rpos > f->shlim - cnt)
+ f->shend = f->rpos + (f->shlim - cnt);
+ else
+ f->shend = f->rend;
+ f->shcnt = f->buf - f->rpos + cnt;
+ if (f->rpos[-1] != c) f->rpos[-1] = c;
+ return c;
+}
+
+#define shlim(f, lim) ffshlim((f), (lim))
+#define shgetc(f) (((f)->rpos < (f)->shend) ? *(f)->rpos++ : ffshgetc(f))
+#define shunget(f) ((f)->shend ? (void)(f)->rpos-- : (void)0)
+
+static const unsigned char table[] = { -1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,33,34,35,-1,-1,-1,-1,-1,
+ -1,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,
+ 25,26,27,28,29,30,31,32,33,34,35,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+};
+
+static unsigned long long ffintscan(FFFILE *f, unsigned base, int pok, unsigned long long lim)
+{
+ const unsigned char *val = table+1;
+ int c, neg=0;
+ unsigned x;
+ unsigned long long y;
+ if (base > 36 || base == 1) {
+ errno = EINVAL;
+ return 0;
+ }
+ while (av_isspace((c=shgetc(f))));
+ if (c=='+' || c=='-') {
+ neg = -(c=='-');
+ c = shgetc(f);
+ }
+ if ((base == 0 || base == 16) && c=='0') {
+ c = shgetc(f);
+ if ((c|32)=='x') {
+ c = shgetc(f);
+ if (val[c]>=16) {
+ shunget(f);
+ if (pok) shunget(f);
+ else shlim(f, 0);
+ return 0;
+ }
+ base = 16;
+ } else if (base == 0) {
+ base = 8;
+ }
+ } else {
+ if (base == 0) base = 10;
+ if (val[c] >= base) {
+ shunget(f);
+ shlim(f, 0);
+ errno = EINVAL;
+ return 0;
+ }
+ }
+ if (base == 10) {
+ for (x=0; c-'0'<10U && x<=UINT_MAX/10-1; c=shgetc(f))
+ x = x*10 + (c-'0');
+ for (y=x; c-'0'<10U && y<=ULLONG_MAX/10 && 10*y<=ULLONG_MAX-(c-'0'); c=shgetc(f))
+ y = y*10 + (c-'0');
+ if (c-'0'>=10U) goto done;
+ } else if (!(base & base-1)) {
+ int bs = "\0\1\2\4\7\3\6\5"[(0x17*base)>>5&7];
+ for (x=0; val[c]<base && x<=UINT_MAX/32; c=shgetc(f))
+ x = x<<bs | val[c];
+ for (y=x; val[c]<base && y<=ULLONG_MAX>>bs; c=shgetc(f))
+ y = y<<bs | val[c];
+ } else {
+ for (x=0; val[c]<base && x<=UINT_MAX/36-1; c=shgetc(f))
+ x = x*base + val[c];
+ for (y=x; val[c]<base && y<=ULLONG_MAX/base && base*y<=ULLONG_MAX-val[c]; c=shgetc(f))
+ y = y*base + val[c];
+ }
+ if (val[c]<base) {
+ for (; val[c]<base; c=shgetc(f));
+ errno = ERANGE;
+ y = lim;
+ if (lim&1) neg = 0;
+ }
+done:
+ shunget(f);
+ if (y>=lim) {
+ if (!(lim&1) && !neg) {
+ errno = ERANGE;
+ return lim-1;
+ } else if (y>lim) {
+ errno = ERANGE;
+ return lim;
+ }
+ }
+ return (y^neg)-neg;
+}
+
+static long long scanexp(FFFILE *f, int pok)
+{
+ int c;
+ int x;
+ long long y;
+ int neg = 0;
+
+ c = shgetc(f);
+ if (c=='+' || c=='-') {
+ neg = (c=='-');
+ c = shgetc(f);
+ if (c-'0'>=10U && pok) shunget(f);
+ }
+ if (c-'0'>=10U) {
+ shunget(f);
+ return LLONG_MIN;
+ }
+ for (x=0; c-'0'<10U && x<INT_MAX/10; c = shgetc(f))
+ x = 10*x + (c-'0');
+ for (y=x; c-'0'<10U && y<LLONG_MAX/100; c = shgetc(f))
+ y = 10*y + (c-'0');
+ for (; c-'0'<10U; c = shgetc(f));
+ shunget(f);
+ return neg ? -y : y;
+}
+
+#define LD_B1B_DIG 2
+#define LD_B1B_MAX 9007199, 254740991
+#define KMAX 128
+#define MASK (KMAX-1)
+
+static double decfloat(FFFILE *f, int c, int bits, int emin, int sign, int pok)
+{
+ uint32_t x[KMAX];
+ static const uint32_t th[] = { LD_B1B_MAX };
+ int i, j, k, a, z;
+ long long lrp=0, dc=0;
+ long long e10=0;
+ int lnz = 0;
+ int gotdig = 0, gotrad = 0;
+ int rp;
+ int e2;
+ int emax = -emin-bits+3;
+ int denormal = 0;
+ double y;
+ double frac=0;
+ double bias=0;
+ static const int p10s[] = { 10, 100, 1000, 10000,
+ 100000, 1000000, 10000000, 100000000 };
+
+ j=0;
+ k=0;
+
+ /* Don't let leading zeros consume buffer space */
+ for (; c=='0'; c = shgetc(f)) gotdig=1;
+ if (c=='.') {
+ gotrad = 1;
+ for (c = shgetc(f); c=='0'; c = shgetc(f)) gotdig=1, lrp--;
+ }
+
+ x[0] = 0;
+ for (; c-'0'<10U || c=='.'; c = shgetc(f)) {
+ if (c == '.') {
+ if (gotrad) break;
+ gotrad = 1;
+ lrp = dc;
+ } else if (k < KMAX-3) {
+ dc++;
+ if (c!='0') lnz = dc;
+ if (j) x[k] = x[k]*10 + c-'0';
+ else x[k] = c-'0';
+ if (++j==9) {
+ k++;
+ j=0;
+ }
+ gotdig=1;
+ } else {
+ dc++;
+ if (c!='0') {
+ lnz = (KMAX-4)*9;
+ x[KMAX-4] |= 1;
+ }
+ }
+ }
+ if (!gotrad) lrp=dc;
+
+ if (gotdig && (c|32)=='e') {
+ e10 = scanexp(f, pok);
+ if (e10 == LLONG_MIN) {
+ if (pok) {
+ shunget(f);
+ } else {
+ shlim(f, 0);
+ return 0;
+ }
+ e10 = 0;
+ }
+ lrp += e10;
+ } else if (c>=0) {
+ shunget(f);
+ }
+ if (!gotdig) {
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+
+ /* Handle zero specially to avoid nasty special cases later */
+ if (!x[0]) return sign * 0.0;
+
+ /* Optimize small integers (w/no exponent) and over/under-flow */
+ if (lrp==dc && dc<10 && (bits>30 || x[0]>>bits==0))
+ return sign * (double)x[0];
+ if (lrp > -emin/2) {
+ errno = ERANGE;
+ return sign * DBL_MAX * DBL_MAX;
+ }
+ if (lrp < emin-2*DBL_MANT_DIG) {
+ errno = ERANGE;
+ return sign * DBL_MIN * DBL_MIN;
+ }
+
+ /* Align incomplete final B1B digit */
+ if (j) {
+ for (; j<9; j++) x[k]*=10;
+ k++;
+ j=0;
+ }
+
+ a = 0;
+ z = k;
+ e2 = 0;
+ rp = lrp;
+
+ /* Optimize small to mid-size integers (even in exp. notation) */
+ if (lnz<9 && lnz<=rp && rp < 18) {
+ int bitlim;
+ if (rp == 9) return sign * (double)x[0];
+ if (rp < 9) return sign * (double)x[0] / p10s[8-rp];
+ bitlim = bits-3*(int)(rp-9);
+ if (bitlim>30 || x[0]>>bitlim==0)
+ return sign * (double)x[0] * p10s[rp-10];
+ }
+
+ /* Drop trailing zeros */
+ for (; !x[z-1]; z--);
+
+ /* Align radix point to B1B digit boundary */
+ if (rp % 9) {
+ int rpm9 = rp>=0 ? rp%9 : rp%9+9;
+ int p10 = p10s[8-rpm9];
+ uint32_t carry = 0;
+ for (k=a; k!=z; k++) {
+ uint32_t tmp = x[k] % p10;
+ x[k] = x[k]/p10 + carry;
+ carry = 1000000000/p10 * tmp;
+ if (k==a && !x[k]) {
+ a = (a+1 & MASK);
+ rp -= 9;
+ }
+ }
+ if (carry) x[z++] = carry;
+ rp += 9-rpm9;
+ }
+
+ /* Upscale until desired number of bits are left of radix point */
+ while (rp < 9*LD_B1B_DIG || (rp == 9*LD_B1B_DIG && x[a]<th[0])) {
+ uint32_t carry = 0;
+ e2 -= 29;
+ for (k=(z-1 & MASK); ; k=(k-1 & MASK)) {
+ uint64_t tmp = ((uint64_t)x[k] << 29) + carry;
+ if (tmp > 1000000000) {
+ carry = tmp / 1000000000;
+ x[k] = tmp % 1000000000;
+ } else {
+ carry = 0;
+ x[k] = tmp;
+ }
+ if (k==(z-1 & MASK) && k!=a && !x[k]) z = k;
+ if (k==a) break;
+ }
+ if (carry) {
+ rp += 9;
+ a = (a-1 & MASK);
+ if (a == z) {
+ z = (z-1 & MASK);
+ x[z-1 & MASK] |= x[z];
+ }
+ x[a] = carry;
+ }
+ }
+
+ /* Downscale until exactly number of bits are left of radix point */
+ for (;;) {
+ uint32_t carry = 0;
+ int sh = 1;
+ for (i=0; i<LD_B1B_DIG; i++) {
+ k = (a+i & MASK);
+ if (k == z || x[k] < th[i]) {
+ i=LD_B1B_DIG;
+ break;
+ }
+ if (x[a+i & MASK] > th[i]) break;
+ }
+ if (i==LD_B1B_DIG && rp==9*LD_B1B_DIG) break;
+ /* FIXME: find a way to compute optimal sh */
+ if (rp > 9+9*LD_B1B_DIG) sh = 9;
+ e2 += sh;
+ for (k=a; k!=z; k=(k+1 & MASK)) {
+ uint32_t tmp = x[k] & (1<<sh)-1;
+ x[k] = (x[k]>>sh) + carry;
+ carry = (1000000000>>sh) * tmp;
+ if (k==a && !x[k]) {
+ a = (a+1 & MASK);
+ i--;
+ rp -= 9;
+ }
+ }
+ if (carry) {
+ if ((z+1 & MASK) != a) {
+ x[z] = carry;
+ z = (z+1 & MASK);
+ } else x[z-1 & MASK] |= 1;
+ }
+ }
+
+ /* Assemble desired bits into floating point variable */
+ for (y=i=0; i<LD_B1B_DIG; i++) {
+ if ((a+i & MASK)==z) x[(z=(z+1 & MASK))-1] = 0;
+ y = 1000000000.0L * y + x[a+i & MASK];
+ }
+
+ y *= sign;
+
+ /* Limit precision for denormal results */
+ if (bits > DBL_MANT_DIG+e2-emin) {
+ bits = DBL_MANT_DIG+e2-emin;
+ if (bits<0) bits=0;
+ denormal = 1;
+ }
+
+ /* Calculate bias term to force rounding, move out lower bits */
+ if (bits < DBL_MANT_DIG) {
+ bias = copysign(scalbn(1, 2*DBL_MANT_DIG-bits-1), y);
+ frac = fmod(y, scalbn(1, DBL_MANT_DIG-bits));
+ y -= frac;
+ y += bias;
+ }
+
+ /* Process tail of decimal input so it can affect rounding */
+ if ((a+i & MASK) != z) {
+ uint32_t t = x[a+i & MASK];
+ if (t < 500000000 && (t || (a+i+1 & MASK) != z))
+ frac += 0.25*sign;
+ else if (t > 500000000)
+ frac += 0.75*sign;
+ else if (t == 500000000) {
+ if ((a+i+1 & MASK) == z)
+ frac += 0.5*sign;
+ else
+ frac += 0.75*sign;
+ }
+ if (DBL_MANT_DIG-bits >= 2 && !fmod(frac, 1))
+ frac++;
+ }
+
+ y += frac;
+ y -= bias;
+
+ if ((e2+DBL_MANT_DIG & INT_MAX) > emax-5) {
+ if (fabs(y) >= pow(2, DBL_MANT_DIG)) {
+ if (denormal && bits==DBL_MANT_DIG+e2-emin)
+ denormal = 0;
+ y *= 0.5;
+ e2++;
+ }
+ if (e2+DBL_MANT_DIG>emax || (denormal && frac))
+ errno = ERANGE;
+ }
+
+ return scalbn(y, e2);
+}
+
+static double hexfloat(FFFILE *f, int bits, int emin, int sign, int pok)
+{
+ uint32_t x = 0;
+ double y = 0;
+ double scale = 1;
+ double bias = 0;
+ int gottail = 0, gotrad = 0, gotdig = 0;
+ long long rp = 0;
+ long long dc = 0;
+ long long e2 = 0;
+ int d;
+ int c;
+
+ c = shgetc(f);
+
+ /* Skip leading zeros */
+ for (; c=='0'; c = shgetc(f))
+ gotdig = 1;
+
+ if (c=='.') {
+ gotrad = 1;
+ c = shgetc(f);
+ /* Count zeros after the radix point before significand */
+ for (rp=0; c=='0'; c = shgetc(f), rp--) gotdig = 1;
+ }
+
+ for (; c-'0'<10U || (c|32)-'a'<6U || c=='.'; c = shgetc(f)) {
+ if (c=='.') {
+ if (gotrad) break;
+ rp = dc;
+ gotrad = 1;
+ } else {
+ gotdig = 1;
+ if (c > '9') d = (c|32)+10-'a';
+ else d = c-'0';
+ if (dc<8) {
+ x = x*16 + d;
+ } else if (dc < DBL_MANT_DIG/4+1) {
+ y += d*(scale/=16);
+ } else if (d && !gottail) {
+ y += 0.5*scale;
+ gottail = 1;
+ }
+ dc++;
+ }
+ }
+ if (!gotdig) {
+ shunget(f);
+ if (pok) {
+ shunget(f);
+ if (gotrad) shunget(f);
+ } else {
+ shlim(f, 0);
+ }
+ return sign * 0.0;
+ }
+ if (!gotrad) rp = dc;
+ while (dc<8) x *= 16, dc++;
+ if ((c|32)=='p') {
+ e2 = scanexp(f, pok);
+ if (e2 == LLONG_MIN) {
+ if (pok) {
+ shunget(f);
+ } else {
+ shlim(f, 0);
+ return 0;
+ }
+ e2 = 0;
+ }
+ } else {
+ shunget(f);
+ }
+ e2 += 4*rp - 32;
+
+ if (!x) return sign * 0.0;
+ if (e2 > -emin) {
+ errno = ERANGE;
+ return sign * DBL_MAX * DBL_MAX;
+ }
+ if (e2 < emin-2*DBL_MANT_DIG) {
+ errno = ERANGE;
+ return sign * DBL_MIN * DBL_MIN;
+ }
+
+ while (x < 0x80000000) {
+ if (y>=0.5) {
+ x += x + 1;
+ y += y - 1;
+ } else {
+ x += x;
+ y += y;
+ }
+ e2--;
+ }
+
+ if (bits > 32+e2-emin) {
+ bits = 32+e2-emin;
+ if (bits<0) bits=0;
+ }
+
+ if (bits < DBL_MANT_DIG)
+ bias = copysign(scalbn(1, 32+DBL_MANT_DIG-bits-1), sign);
+
+ if (bits<32 && y && !(x&1)) x++, y=0;
+
+ y = bias + sign*(double)x + sign*y;
+ y -= bias;
+
+ if (!y) errno = ERANGE;
+
+ return scalbn(y, e2);
+}
+
+static double fffloatscan(FFFILE *f, int prec, int pok)
+{
+ int sign = 1;
+ size_t i;
+ int bits;
+ int emin;
+ int c;
+
+ switch (prec) {
+ case 0:
+ bits = FLT_MANT_DIG;
+ emin = FLT_MIN_EXP-bits;
+ break;
+ case 1:
+ bits = DBL_MANT_DIG;
+ emin = DBL_MIN_EXP-bits;
+ break;
+ case 2:
+ bits = DBL_MANT_DIG;
+ emin = DBL_MIN_EXP-bits;
+ break;
+ default:
+ return 0;
+ }
+
+ while (av_isspace((c = shgetc(f))));
+
+ if (c=='+' || c=='-') {
+ sign -= 2*(c=='-');
+ c = shgetc(f);
+ }
+
+ for (i=0; i<8 && (c|32)=="infinity"[i]; i++)
+ if (i<7) c = shgetc(f);
+ if (i==3 || i==8 || (i>3 && pok)) {
+ if (i!=8) {
+ shunget(f);
+ if (pok) for (; i>3; i--) shunget(f);
+ }
+ return sign * INFINITY;
+ }
+ if (!i) for (i=0; i<3 && (c|32)=="nan"[i]; i++)
+ if (i<2) c = shgetc(f);
+ if (i==3) {
+ if (shgetc(f) != '(') {
+ shunget(f);
+ return NAN;
+ }
+ for (i=1; ; i++) {
+ c = shgetc(f);
+ if (c-'0'<10U || c-'A'<26U || c-'a'<26U || c=='_')
+ continue;
+ if (c==')') return NAN;
+ shunget(f);
+ if (!pok) {
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+ while (i--) shunget(f);
+ return NAN;
+ }
+ return NAN;
+ }
+
+ if (i) {
+ shunget(f);
+ errno = EINVAL;
+ shlim(f, 0);
+ return 0;
+ }
+
+ if (c=='0') {
+ c = shgetc(f);
+ if ((c|32) == 'x')
+ return hexfloat(f, bits, emin, sign, pok);
+ shunget(f);
+ c = '0';
+ }
+
+ return decfloat(f, c, bits, emin, sign, pok);
+}
+
+static void *arg_n(va_list ap, unsigned int n)
+{
+ void *p;
+ unsigned int i;
+ va_list ap2;
+ va_copy(ap2, ap);
+ for (i=n; i>1; i--) va_arg(ap2, void *);
+ p = va_arg(ap2, void *);
+ va_end(ap2);
+ return p;
+}
+
+static void store_int(void *dest, int size, unsigned long long i)
+{
+ if (!dest) return;
+ switch (size) {
+ case SIZE_hh:
+ *(char *)dest = i;
+ break;
+ case SIZE_h:
+ *(short *)dest = i;
+ break;
+ case SIZE_def:
+ *(int *)dest = i;
+ break;
+ case SIZE_l:
+ *(long *)dest = i;
+ break;
+ case SIZE_ll:
+ *(long long *)dest = i;
+ break;
+ }
+}
+
+static int ff_vfscanf(FFFILE *f, const char *fmt, va_list ap)
+{
+ int width;
+ int size;
+ int base;
+ const unsigned char *p;
+ int c, t;
+ char *s;
+ void *dest=NULL;
+ int invert;
+ int matches=0;
+ unsigned long long x;
+ double y;
+ ptrdiff_t pos = 0;
+ unsigned char scanset[257];
+ size_t i;
+
+ for (p=(const unsigned char *)fmt; *p; p++) {
+
+ if (av_isspace(*p)) {
+ while (av_isspace(p[1])) p++;
+ shlim(f, 0);
+ while (av_isspace(shgetc(f)));
+ shunget(f);
+ pos += shcnt(f);
+ continue;
+ }
+ if (*p != '%' || p[1] == '%') {
+ shlim(f, 0);
+ if (*p == '%') {
+ p++;
+ while (av_isspace((c=shgetc(f))));
+ } else {
+ c = shgetc(f);
+ }
+ if (c!=*p) {
+ shunget(f);
+ if (c<0) goto input_fail;
+ goto match_fail;
+ }
+ pos += shcnt(f);
+ continue;
+ }
+
+ p++;
+ if (*p=='*') {
+ dest = 0; p++;
+ } else if (av_isdigit(*p) && p[1]=='$') {
+ dest = arg_n(ap, *p-'0'); p+=2;
+ } else {
+ dest = va_arg(ap, void *);
+ }
+
+ for (width=0; av_isdigit(*p); p++) {
+ width = 10*width + *p - '0';
+ }
+
+ if (*p=='m') {
+ s = 0;
+ p++;
+ }
+
+ size = SIZE_def;
+ switch (*p++) {
+ case 'h':
+ if (*p == 'h') p++, size = SIZE_hh;
+ else size = SIZE_h;
+ break;
+ case 'l':
+ if (*p == 'l') p++, size = SIZE_ll;
+ else size = SIZE_l;
+ break;
+ case 'j':
+ size = SIZE_ll;
+ break;
+ case 'z':
+ case 't':
+ size = SIZE_l;
+ break;
+ case 'L':
+ size = SIZE_L;
+ break;
+ case 'd': case 'i': case 'o': case 'u': case 'x':
+ case 'a': case 'e': case 'f': case 'g':
+ case 'A': case 'E': case 'F': case 'G': case 'X':
+ case 's': case 'c': case '[':
+ case 'S': case 'C':
+ case 'p': case 'n':
+ p--;
+ break;
+ default:
+ goto fmt_fail;
+ }
+
+ t = *p;
+
+ /* C or S */
+ if ((t&0x2f) == 3) {
+ t |= 32;
+ size = SIZE_l;
+ }
+
+ switch (t) {
+ case 'c':
+ if (width < 1) width = 1;
+ case '[':
+ break;
+ case 'n':
+ store_int(dest, size, pos);
+ /* do not increment match count, etc! */
+ continue;
+ default:
+ shlim(f, 0);
+ while (av_isspace(shgetc(f)));
+ shunget(f);
+ pos += shcnt(f);
+ }
+
+ shlim(f, width);
+ if (shgetc(f) < 0) goto input_fail;
+ shunget(f);
+
+ switch (t) {
+ case 's':
+ case 'c':
+ case '[':
+ if (t == 'c' || t == 's') {
+ memset(scanset, -1, sizeof scanset);
+ scanset[0] = 0;
+ if (t == 's') {
+ scanset[1 + '\t'] = 0;
+ scanset[1 + '\n'] = 0;
+ scanset[1 + '\v'] = 0;
+ scanset[1 + '\f'] = 0;
+ scanset[1 + '\r'] = 0;
+ scanset[1 + ' ' ] = 0;
+ }
+ } else {
+ if (*++p == '^') p++, invert = 1;
+ else invert = 0;
+ memset(scanset, invert, sizeof scanset);
+ scanset[0] = 0;
+ if (*p == '-') p++, scanset[1+'-'] = 1-invert;
+ else if (*p == ']') p++, scanset[1+']'] = 1-invert;
+ for (; *p != ']'; p++) {
+ if (!*p) goto fmt_fail;
+ if (*p=='-' && p[1] && p[1] != ']')
+ for (c=p++[-1]; c<*p; c++)
+ scanset[1+c] = 1-invert;
+ scanset[1+*p] = 1-invert;
+ }
+ }
+ s = 0;
+ i = 0;
+ if ((s = dest)) {
+ while (scanset[(c=shgetc(f))+1])
+ s[i++] = c;
+ } else {
+ while (scanset[(c=shgetc(f))+1]);
+ }
+ shunget(f);
+ if (!shcnt(f)) goto match_fail;
+ if (t == 'c' && shcnt(f) != width) goto match_fail;
+ if (t != 'c') {
+ if (s) s[i] = 0;
+ }
+ break;
+ case 'p':
+ case 'X':
+ case 'x':
+ base = 16;
+ goto int_common;
+ case 'o':
+ base = 8;
+ goto int_common;
+ case 'd':
+ case 'u':
+ base = 10;
+ goto int_common;
+ case 'i':
+ base = 0;
+int_common:
+ x = ffintscan(f, base, 0, ULLONG_MAX);
+ if (!shcnt(f))
+ goto match_fail;
+ if (t=='p' && dest)
+ *(void **)dest = (void *)(uintptr_t)x;
+ else
+ store_int(dest, size, x);
+ break;
+ case 'a': case 'A':
+ case 'e': case 'E':
+ case 'f': case 'F':
+ case 'g': case 'G':
+ y = fffloatscan(f, size, 0);
+ if (!shcnt(f))
+ goto match_fail;
+ if (dest) {
+ switch (size) {
+ case SIZE_def:
+ *(float *)dest = y;
+ break;
+ case SIZE_l:
+ *(double *)dest = y;
+ break;
+ case SIZE_L:
+ *(double *)dest = y;
+ break;
+ }
+ }
+ break;
+ }
+
+ pos += shcnt(f);
+ if (dest) matches++;
+ }
+ if (0) {
+fmt_fail:
+input_fail:
+ if (!matches) matches--;
+ }
+match_fail:
+ return matches;
+}
+
+static int ff_vsscanf(const char *s, const char *fmt, va_list ap)
+{
+ FFFILE f = {
+ .buf = (void *)s, .cookie = (void *)s,
+ .read = ffstring_read,
+ };
+
+ return ff_vfscanf(&f, fmt, ap);
+}
+
+int av_sscanf(const char *string, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret = ff_vsscanf(string, format, ap);
+ va_end(ap);
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/avstring.c b/media/ffvpx/libavutil/avstring.c
index 4c068f5bc5..e460b5be7f 100644
--- a/media/ffvpx/libavutil/avstring.c
+++ b/media/ffvpx/libavutil/avstring.c
@@ -19,17 +19,20 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <limits.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "config.h"
-#include "common.h"
#include "mem.h"
#include "avassert.h"
#include "avstring.h"
#include "bprint.h"
+#include "error.h"
+#include "macros.h"
+#include "version.h"
int av_strstart(const char *str, const char *pfx, const char **ptr)
{
@@ -136,14 +139,6 @@ end:
return p;
}
-char *av_d2str(double d)
-{
- char *str = av_malloc(16);
- if (str)
- snprintf(str, 16, "%f", d);
- return str;
-}
-
#define WHITESPACES " \n\t\r"
char *av_get_token(const char **buf, const char *term)
@@ -257,12 +252,18 @@ char *av_strireplace(const char *str, const char *from, const char *to)
const char *av_basename(const char *path)
{
- char *p = strrchr(path, '/');
-
+ char *p;
#if HAVE_DOS_PATHS
- char *q = strrchr(path, '\\');
- char *d = strchr(path, ':');
+ char *q, *d;
+#endif
+ if (!path || *path == '\0')
+ return ".";
+
+ p = strrchr(path, '/');
+#if HAVE_DOS_PATHS
+ q = strrchr(path, '\\');
+ d = strchr(path, ':');
p = FFMAX3(p, q, d);
#endif
@@ -274,11 +275,11 @@ const char *av_basename(const char *path)
const char *av_dirname(char *path)
{
- char *p = strrchr(path, '/');
+ char *p = path ? strrchr(path, '/') : NULL;
#if HAVE_DOS_PATHS
- char *q = strrchr(path, '\\');
- char *d = strchr(path, ':');
+ char *q = path ? strrchr(path, '\\') : NULL;
+ char *d = path ? strchr(path, ':') : NULL;
d = d ? d + 1 : d;
@@ -328,17 +329,18 @@ int av_escape(char **dst, const char *src, const char *special_chars,
enum AVEscapeMode mode, int flags)
{
AVBPrint dstbuf;
+ int ret;
- av_bprint_init(&dstbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
+ av_bprint_init(&dstbuf, 1, INT_MAX); /* (int)dstbuf.len must be >= 0 */
av_bprint_escape(&dstbuf, src, special_chars, mode, flags);
if (!av_bprint_is_complete(&dstbuf)) {
av_bprint_finalize(&dstbuf, NULL);
return AVERROR(ENOMEM);
- } else {
- av_bprint_finalize(&dstbuf, dst);
- return dstbuf.len;
}
+ if ((ret = av_bprint_finalize(&dstbuf, dst)) < 0)
+ return ret;
+ return dstbuf.len;
}
int av_match_name(const char *name, const char *names)
diff --git a/media/ffvpx/libavutil/avstring.h b/media/ffvpx/libavutil/avstring.h
index 37dd4e2da0..e260263763 100644
--- a/media/ffvpx/libavutil/avstring.h
+++ b/media/ffvpx/libavutil/avstring.h
@@ -24,6 +24,7 @@
#include <stddef.h>
#include <stdint.h>
#include "attributes.h"
+#include "version.h"
/**
* @addtogroup lavu_string
@@ -134,6 +135,7 @@ size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_forma
/**
* Get the count of continuous non zero chars starting from the beginning.
*
+ * @param s the string whose length to count
* @param len maximum number of characters to check in the string, that
* is the maximum value which is returned by the function
*/
@@ -156,11 +158,6 @@ static inline size_t av_strnlen(const char *s, size_t len)
char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
/**
- * Convert a number to an av_malloced string.
- */
-char *av_d2str(double d);
-
-/**
* Unescape the given string until a non escaped terminating char,
* and return the token corresponding to the unescaped string.
*
@@ -274,16 +271,21 @@ char *av_strireplace(const char *str, const char *from, const char *to);
/**
* Thread safe basename.
- * @param path the path, on DOS both \ and / are considered separators.
+ * @param path the string to parse, on DOS both \ and / are considered separators.
* @return pointer to the basename substring.
+ * If path does not contain a slash, the function returns a copy of path.
+ * If path is a NULL pointer or points to an empty string, a pointer
+ * to a string "." is returned.
*/
const char *av_basename(const char *path);
/**
* Thread safe dirname.
- * @param path the path, on DOS both \ and / are considered separators.
- * @return the path with the separator replaced by the string terminator or ".".
- * @note the function may change the input string.
+ * @param path the string to parse, on DOS both \ and / are considered separators.
+ * @return A pointer to a string that's the parent directory of path.
+ * If path is a NULL pointer or points to an empty string, a pointer
+ * to a string "." is returned.
+ * @note the function may modify the contents of the path, so copies should be passed.
*/
const char *av_dirname(char *path);
@@ -314,6 +316,7 @@ enum AVEscapeMode {
AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode.
AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.
AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping.
+ AV_ESCAPE_MODE_XML, ///< Use XML non-markup character data escaping.
};
/**
@@ -334,6 +337,19 @@ enum AVEscapeMode {
#define AV_ESCAPE_FLAG_STRICT (1 << 1)
/**
+ * Within AV_ESCAPE_MODE_XML, additionally escape single quotes for single
+ * quoted attributes.
+ */
+#define AV_ESCAPE_FLAG_XML_SINGLE_QUOTES (1 << 2)
+
+/**
+ * Within AV_ESCAPE_MODE_XML, additionally escape double quotes for double
+ * quoted attributes.
+ */
+#define AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES (1 << 3)
+
+
+/**
* Escape string in src, and put the escaped string in an allocated
* string in *dst, which must be freed with av_free().
*
diff --git a/media/ffvpx/libavutil/avutil.h b/media/ffvpx/libavutil/avutil.h
index 4d633156d1..64b68bdbd3 100644
--- a/media/ffvpx/libavutil/avutil.h
+++ b/media/ffvpx/libavutil/avutil.h
@@ -331,12 +331,18 @@ unsigned av_int_list_length_for_size(unsigned elsize,
#define av_int_list_length(list, term) \
av_int_list_length_for_size(sizeof(*(list)), list, term)
+#if FF_API_AV_FOPEN_UTF8
/**
* Open a file using a UTF-8 filename.
* The API of this function matches POSIX fopen(), errors are returned through
* errno.
+ * @deprecated Avoid using it, as on Windows, the FILE* allocated by this
+ * function may be allocated with a different CRT than the caller
+ * who uses the FILE*. No replacement provided in public API.
*/
+attribute_deprecated
FILE *av_fopen_utf8(const char *path, const char *mode);
+#endif
/**
* Return the fractional representation of the internal time base.
diff --git a/media/ffvpx/libavutil/base64.c b/media/ffvpx/libavutil/base64.c
index 25ae8c411c..3e66f4fcbe 100644
--- a/media/ffvpx/libavutil/base64.c
+++ b/media/ffvpx/libavutil/base64.c
@@ -24,10 +24,12 @@
* @author Ryan Martell <rdm4@martellventures.com> (with lots of Michael)
*/
-#include "common.h"
+#include <limits.h>
+#include <stddef.h>
+
#include "base64.h"
+#include "error.h"
#include "intreadwrite.h"
-#include "timer.h"
/* ---------------- private code */
static const uint8_t map2[256] =
@@ -79,12 +81,16 @@ static const uint8_t map2[256] =
int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
{
uint8_t *dst = out;
- uint8_t *end = out + out_size;
+ uint8_t *end;
// no sign extension
const uint8_t *in = in_str;
unsigned bits = 0xff;
unsigned v;
+ if (!out)
+ goto validity_check;
+
+ end = out + out_size;
while (end - dst > 3) {
BASE64_DEC_STEP(0);
BASE64_DEC_STEP(1);
@@ -108,6 +114,7 @@ int av_base64_decode(uint8_t *out, const char *in_str, int out_size)
*dst++ = v;
in += 4;
}
+validity_check:
while (1) {
BASE64_DEC_STEP(0);
in++;
@@ -126,7 +133,7 @@ out2:
*dst++ = v >> 4;
out1:
out0:
- return bits & 1 ? AVERROR_INVALIDDATA : dst - out;
+ return bits & 1 ? AVERROR_INVALIDDATA : out ? dst - out : 0;
}
/*****************************************************************************
diff --git a/media/ffvpx/libavutil/bprint.c b/media/ffvpx/libavutil/bprint.c
index 2f059c5ba6..5b540ebc9e 100644
--- a/media/ffvpx/libavutil/bprint.c
+++ b/media/ffvpx/libavutil/bprint.c
@@ -18,16 +18,16 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
-#include "avassert.h"
#include "avstring.h"
#include "bprint.h"
-#include "common.h"
#include "compat/va_copy.h"
#include "error.h"
+#include "macros.h"
#include "mem.h"
#define av_bprint_room(buf) ((buf)->size - FFMIN((buf)->len, (buf)->size))
@@ -245,10 +245,8 @@ int av_bprint_finalize(AVBPrint *buf, char **ret_str)
str = buf->str;
buf->str = NULL;
} else {
- str = av_malloc(real_size);
- if (str)
- memcpy(str, buf->str, real_size);
- else
+ str = av_memdup(buf->str, real_size);
+ if (!str)
ret = AVERROR(ENOMEM);
}
*ret_str = str;
@@ -283,6 +281,35 @@ void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_cha
av_bprint_chars(dstbuf, '\'', 1);
break;
+ case AV_ESCAPE_MODE_XML:
+ /* escape XML non-markup character data as per 2.4 by default: */
+ /* [^<&]* - ([^<&]* ']]>' [^<&]*) */
+
+ /* additionally, given one of the AV_ESCAPE_FLAG_XML_* flags, */
+ /* escape those specific characters as required. */
+ for (; *src; src++) {
+ switch (*src) {
+ case '&' : av_bprintf(dstbuf, "%s", "&amp;"); break;
+ case '<' : av_bprintf(dstbuf, "%s", "&lt;"); break;
+ case '>' : av_bprintf(dstbuf, "%s", "&gt;"); break;
+ case '\'':
+ if (!(flags & AV_ESCAPE_FLAG_XML_SINGLE_QUOTES))
+ goto XML_DEFAULT_HANDLING;
+
+ av_bprintf(dstbuf, "%s", "&apos;");
+ break;
+ case '"' :
+ if (!(flags & AV_ESCAPE_FLAG_XML_DOUBLE_QUOTES))
+ goto XML_DEFAULT_HANDLING;
+
+ av_bprintf(dstbuf, "%s", "&quot;");
+ break;
+XML_DEFAULT_HANDLING:
+ default: av_bprint_chars(dstbuf, *src, 1);
+ }
+ }
+ break;
+
/* case AV_ESCAPE_MODE_BACKSLASH or unknown mode */
default:
/* \-escape characters */
diff --git a/media/ffvpx/libavutil/bprint.h b/media/ffvpx/libavutil/bprint.h
index c09b1ac1e1..f27d30f723 100644
--- a/media/ffvpx/libavutil/bprint.h
+++ b/media/ffvpx/libavutil/bprint.h
@@ -18,6 +18,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/**
+ * @file
+ * @ingroup lavu_avbprint
+ * AVBPrint public header
+ */
+
#ifndef AVUTIL_BPRINT_H
#define AVUTIL_BPRINT_H
@@ -27,6 +33,14 @@
#include "avstring.h"
/**
+ * @defgroup lavu_avbprint AVBPrint
+ * @ingroup lavu_data
+ *
+ * A buffer to print data progressively
+ * @{
+ */
+
+/**
* Define a structure with extra padding to a fixed size
* This helps ensuring binary compatibility with future versions.
*/
@@ -48,14 +62,14 @@ typedef struct name { \
* Small buffers are kept in the structure itself, and thus require no
* memory allocation at all (unless the contents of the buffer is needed
* after the structure goes out of scope). This is almost as lightweight as
- * declaring a local "char buf[512]".
+ * declaring a local `char buf[512]`.
*
* The length of the string can go beyond the allocated size: the buffer is
* then truncated, but the functions still keep account of the actual total
* length.
*
- * In other words, buf->len can be greater than buf->size and records the
- * total length of what would have been to the buffer if there had been
+ * In other words, AVBPrint.len can be greater than AVBPrint.size and records
+ * the total length of what would have been to the buffer if there had been
* enough memory.
*
* Append operations do not need to be tested for failure: if a memory
@@ -63,20 +77,17 @@ typedef struct name { \
* is still updated. This situation can be tested with
* av_bprint_is_complete().
*
- * The size_max field determines several possible behaviours:
- *
- * size_max = -1 (= UINT_MAX) or any large value will let the buffer be
- * reallocated as necessary, with an amortized linear cost.
- *
- * size_max = 0 prevents writing anything to the buffer: only the total
- * length is computed. The write operations can then possibly be repeated in
- * a buffer with exactly the necessary size
- * (using size_init = size_max = len + 1).
- *
- * size_max = 1 is automatically replaced by the exact size available in the
- * structure itself, thus ensuring no dynamic memory allocation. The
- * internal buffer is large enough to hold a reasonable paragraph of text,
- * such as the current paragraph.
+ * The AVBPrint.size_max field determines several possible behaviours:
+ * - `size_max = -1` (= `UINT_MAX`) or any large value will let the buffer be
+ * reallocated as necessary, with an amortized linear cost.
+ * - `size_max = 0` prevents writing anything to the buffer: only the total
+ * length is computed. The write operations can then possibly be repeated in
+ * a buffer with exactly the necessary size
+ * (using `size_init = size_max = len + 1`).
+ * - `size_max = 1` is automatically replaced by the exact size available in the
+ * structure itself, thus ensuring no dynamic memory allocation. The
+ * internal buffer is large enough to hold a reasonable paragraph of text,
+ * such as the current paragraph.
*/
FF_PAD_STRUCTURE(AVBPrint, 1024,
@@ -88,12 +99,31 @@ FF_PAD_STRUCTURE(AVBPrint, 1024,
)
/**
+ * @name Max size special values
* Convenience macros for special values for av_bprint_init() size_max
* parameter.
+ * @{
+ */
+
+/**
+ * Buffer will be reallocated as necessary, with an amortized linear cost.
*/
#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1)
+/**
+ * Use the exact size available in the AVBPrint structure itself.
+ *
+ * Thus ensuring no dynamic memory allocation. The internal buffer is large
+ * enough to hold a reasonable paragraph of text, such as the current paragraph.
+ */
#define AV_BPRINT_SIZE_AUTOMATIC 1
+/**
+ * Do not write anything to the buffer, only calculate the total length.
+ *
+ * The write operations can then possibly be repeated in a buffer with
+ * exactly the necessary size (using `size_init = size_max = AVBPrint.len + 1`).
+ */
#define AV_BPRINT_SIZE_COUNT_ONLY 0
+/** @} */
/**
* Init a print buffer.
@@ -101,12 +131,12 @@ FF_PAD_STRUCTURE(AVBPrint, 1024,
* @param buf buffer to init
* @param size_init initial size (including the final 0)
* @param size_max maximum size;
- * 0 means do not write anything, just count the length;
- * 1 is replaced by the maximum value for automatic storage;
- * any large value means that the internal buffer will be
- * reallocated as needed up to that limit; -1 is converted to
- * UINT_MAX, the largest limit possible.
- * Check also AV_BPRINT_SIZE_* macros.
+ * - `0` means do not write anything, just count the length
+ * - `1` is replaced by the maximum value for automatic storage
+ * any large value means that the internal buffer will be
+ * reallocated as needed up to that limit
+ * - `-1` is converted to `UINT_MAX`, the largest limit possible.
+ * Check also `AV_BPRINT_SIZE_*` macros.
*/
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
@@ -216,4 +246,6 @@ int av_bprint_finalize(AVBPrint *buf, char **ret_str);
void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
enum AVEscapeMode mode, int flags);
+/** @} */
+
#endif /* AVUTIL_BPRINT_H */
diff --git a/media/ffvpx/libavutil/bswap.h b/media/ffvpx/libavutil/bswap.h
index 91cb79538d..4840ab433f 100644
--- a/media/ffvpx/libavutil/bswap.h
+++ b/media/ffvpx/libavutil/bswap.h
@@ -40,6 +40,8 @@
# include "arm/bswap.h"
#elif ARCH_AVR32
# include "avr32/bswap.h"
+#elif ARCH_RISCV
+# include "riscv/bswap.h"
#elif ARCH_SH4
# include "sh4/bswap.h"
#elif ARCH_X86
diff --git a/media/ffvpx/libavutil/buffer.c b/media/ffvpx/libavutil/buffer.c
index 8d1aa5fa84..e4562a79b1 100644
--- a/media/ffvpx/libavutil/buffer.c
+++ b/media/ffvpx/libavutil/buffer.c
@@ -20,21 +20,17 @@
#include <stdint.h>
#include <string.h>
+#include "avassert.h"
#include "buffer_internal.h"
#include "common.h"
#include "mem.h"
#include "thread.h"
-AVBufferRef *av_buffer_create(uint8_t *data, int size,
- void (*free)(void *opaque, uint8_t *data),
- void *opaque, int flags)
+static AVBufferRef *buffer_create(AVBuffer *buf, uint8_t *data, size_t size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags)
{
AVBufferRef *ref = NULL;
- AVBuffer *buf = NULL;
-
- buf = av_mallocz(sizeof(*buf));
- if (!buf)
- return NULL;
buf->data = data;
buf->size = size;
@@ -43,14 +39,11 @@ AVBufferRef *av_buffer_create(uint8_t *data, int size,
atomic_init(&buf->refcount, 1);
- if (flags & AV_BUFFER_FLAG_READONLY)
- buf->flags |= BUFFER_FLAG_READONLY;
+ buf->flags = flags;
ref = av_mallocz(sizeof(*ref));
- if (!ref) {
- av_freep(&buf);
+ if (!ref)
return NULL;
- }
ref->buffer = buf;
ref->data = data;
@@ -59,12 +52,29 @@ AVBufferRef *av_buffer_create(uint8_t *data, int size,
return ref;
}
+AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags)
+{
+ AVBufferRef *ret;
+ AVBuffer *buf = av_mallocz(sizeof(*buf));
+ if (!buf)
+ return NULL;
+
+ ret = buffer_create(buf, data, size, free, opaque, flags);
+ if (!ret) {
+ av_free(buf);
+ return NULL;
+ }
+ return ret;
+}
+
void av_buffer_default_free(void *opaque, uint8_t *data)
{
av_free(data);
}
-AVBufferRef *av_buffer_alloc(int size)
+AVBufferRef *av_buffer_alloc(size_t size)
{
AVBufferRef *ret = NULL;
uint8_t *data = NULL;
@@ -80,7 +90,7 @@ AVBufferRef *av_buffer_alloc(int size)
return ret;
}
-AVBufferRef *av_buffer_allocz(int size)
+AVBufferRef *av_buffer_allocz(size_t size)
{
AVBufferRef *ret = av_buffer_alloc(size);
if (!ret)
@@ -90,7 +100,7 @@ AVBufferRef *av_buffer_allocz(int size)
return ret;
}
-AVBufferRef *av_buffer_ref(AVBufferRef *buf)
+AVBufferRef *av_buffer_ref(const AVBufferRef *buf)
{
AVBufferRef *ret = av_mallocz(sizeof(*ret));
@@ -116,9 +126,13 @@ static void buffer_replace(AVBufferRef **dst, AVBufferRef **src)
} else
av_freep(dst);
- if (atomic_fetch_add_explicit(&b->refcount, -1, memory_order_acq_rel) == 1) {
+ if (atomic_fetch_sub_explicit(&b->refcount, 1, memory_order_acq_rel) == 1) {
+ /* b->free below might already free the structure containing *b,
+ * so we have to read the flag now to avoid use-after-free. */
+ int free_avbuffer = !(b->flags_internal & BUFFER_FLAG_NO_FREE);
b->free(b->opaque, b->data);
- av_freep(&b);
+ if (free_avbuffer)
+ av_free(b);
}
}
@@ -166,10 +180,11 @@ int av_buffer_make_writable(AVBufferRef **pbuf)
return 0;
}
-int av_buffer_realloc(AVBufferRef **pbuf, int size)
+int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
{
AVBufferRef *buf = *pbuf;
uint8_t *tmp;
+ int ret;
if (!buf) {
/* allocate a new buffer with av_realloc(), so it will be reallocatable
@@ -184,21 +199,21 @@ int av_buffer_realloc(AVBufferRef **pbuf, int size)
return AVERROR(ENOMEM);
}
- buf->buffer->flags |= BUFFER_FLAG_REALLOCATABLE;
+ buf->buffer->flags_internal |= BUFFER_FLAG_REALLOCATABLE;
*pbuf = buf;
return 0;
} else if (buf->size == size)
return 0;
- if (!(buf->buffer->flags & BUFFER_FLAG_REALLOCATABLE) ||
+ if (!(buf->buffer->flags_internal & BUFFER_FLAG_REALLOCATABLE) ||
!av_buffer_is_writable(buf) || buf->data != buf->buffer->data) {
/* cannot realloc, allocate a new reallocable buffer and copy data */
AVBufferRef *new = NULL;
- av_buffer_realloc(&new, size);
- if (!new)
- return AVERROR(ENOMEM);
+ ret = av_buffer_realloc(&new, size);
+ if (ret < 0)
+ return ret;
memcpy(new->data, buf->data, FFMIN(size, buf->size));
@@ -215,8 +230,34 @@ int av_buffer_realloc(AVBufferRef **pbuf, int size)
return 0;
}
-AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
- AVBufferRef* (*alloc)(void *opaque, int size),
+int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
+{
+ AVBufferRef *dst = *pdst;
+ AVBufferRef *tmp;
+
+ if (!src) {
+ av_buffer_unref(pdst);
+ return 0;
+ }
+
+ if (dst && dst->buffer == src->buffer) {
+ /* make sure the data pointers match */
+ dst->data = src->data;
+ dst->size = src->size;
+ return 0;
+ }
+
+ tmp = av_buffer_ref(src);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ av_buffer_unref(pdst);
+ *pdst = tmp;
+ return 0;
+}
+
+AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
+ AVBufferRef* (*alloc)(void *opaque, size_t size),
void (*pool_free)(void *opaque))
{
AVBufferPool *pool = av_mallocz(sizeof(*pool));
@@ -228,6 +269,7 @@ AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
pool->size = size;
pool->opaque = opaque;
pool->alloc2 = alloc;
+ pool->alloc = av_buffer_alloc; // fallback
pool->pool_free = pool_free;
atomic_init(&pool->refcount, 1);
@@ -235,7 +277,7 @@ AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
return pool;
}
-AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size))
+AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size))
{
AVBufferPool *pool = av_mallocz(sizeof(*pool));
if (!pool)
@@ -251,11 +293,7 @@ AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size))
return pool;
}
-/*
- * This function gets called when the pool has been uninited and
- * all the buffers returned to it.
- */
-static void buffer_pool_free(AVBufferPool *pool)
+static void buffer_pool_flush(AVBufferPool *pool)
{
while (pool->pool) {
BufferPoolEntry *buf = pool->pool;
@@ -264,6 +302,15 @@ static void buffer_pool_free(AVBufferPool *pool)
buf->free(buf->opaque, buf->data);
av_freep(&buf);
}
+}
+
+/*
+ * This function gets called when the pool has been uninited and
+ * all the buffers returned to it.
+ */
+static void buffer_pool_free(AVBufferPool *pool)
+{
+ buffer_pool_flush(pool);
ff_mutex_destroy(&pool->mutex);
if (pool->pool_free)
@@ -281,7 +328,11 @@ void av_buffer_pool_uninit(AVBufferPool **ppool)
pool = *ppool;
*ppool = NULL;
- if (atomic_fetch_add_explicit(&pool->refcount, -1, memory_order_acq_rel) == 1)
+ ff_mutex_lock(&pool->mutex);
+ buffer_pool_flush(pool);
+ ff_mutex_unlock(&pool->mutex);
+
+ if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
buffer_pool_free(pool);
}
@@ -290,15 +341,12 @@ static void pool_release_buffer(void *opaque, uint8_t *data)
BufferPoolEntry *buf = opaque;
AVBufferPool *pool = buf->pool;
- if(CONFIG_MEMORY_POISONING)
- memset(buf->data, FF_MEMORY_POISON, pool->size);
-
ff_mutex_lock(&pool->mutex);
buf->next = pool->pool;
pool->pool = buf;
ff_mutex_unlock(&pool->mutex);
- if (atomic_fetch_add_explicit(&pool->refcount, -1, memory_order_acq_rel) == 1)
+ if (atomic_fetch_sub_explicit(&pool->refcount, 1, memory_order_acq_rel) == 1)
buffer_pool_free(pool);
}
@@ -309,6 +357,8 @@ static AVBufferRef *pool_alloc_buffer(AVBufferPool *pool)
BufferPoolEntry *buf;
AVBufferRef *ret;
+ av_assert0(pool->alloc || pool->alloc2);
+
ret = pool->alloc2 ? pool->alloc2(pool->opaque, pool->size) :
pool->alloc(pool->size);
if (!ret)
@@ -339,11 +389,13 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
ff_mutex_lock(&pool->mutex);
buf = pool->pool;
if (buf) {
- ret = av_buffer_create(buf->data, pool->size, pool_release_buffer,
- buf, 0);
+ memset(&buf->buffer, 0, sizeof(buf->buffer));
+ ret = buffer_create(&buf->buffer, buf->data, pool->size,
+ pool_release_buffer, buf, 0);
if (ret) {
pool->pool = buf->next;
buf->next = NULL;
+ buf->buffer.flags_internal |= BUFFER_FLAG_NO_FREE;
}
} else {
ret = pool_alloc_buffer(pool);
@@ -355,3 +407,10 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool)
return ret;
}
+
+void *av_buffer_pool_buffer_get_opaque(const AVBufferRef *ref)
+{
+ BufferPoolEntry *buf = ref->buffer->opaque;
+ av_assert0(buf);
+ return buf->opaque;
+}
diff --git a/media/ffvpx/libavutil/buffer.h b/media/ffvpx/libavutil/buffer.h
index 73b6bd0b14..e1ef5b7f07 100644
--- a/media/ffvpx/libavutil/buffer.h
+++ b/media/ffvpx/libavutil/buffer.h
@@ -25,6 +25,7 @@
#ifndef AVUTIL_BUFFER_H
#define AVUTIL_BUFFER_H
+#include <stddef.h>
#include <stdint.h>
/**
@@ -90,7 +91,7 @@ typedef struct AVBufferRef {
/**
* Size of data in bytes.
*/
- int size;
+ size_t size;
} AVBufferRef;
/**
@@ -98,13 +99,13 @@ typedef struct AVBufferRef {
*
* @return an AVBufferRef of given size or NULL when out of memory
*/
-AVBufferRef *av_buffer_alloc(int size);
+AVBufferRef *av_buffer_alloc(size_t size);
/**
* Same as av_buffer_alloc(), except the returned buffer will be initialized
* to zero.
*/
-AVBufferRef *av_buffer_allocz(int size);
+AVBufferRef *av_buffer_allocz(size_t size);
/**
* Always treat the buffer as read-only, even when it has only one
@@ -127,7 +128,7 @@ AVBufferRef *av_buffer_allocz(int size);
*
* @return an AVBufferRef referring to data on success, NULL on failure.
*/
-AVBufferRef *av_buffer_create(uint8_t *data, int size,
+AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
void (*free)(void *opaque, uint8_t *data),
void *opaque, int flags);
@@ -144,7 +145,7 @@ void av_buffer_default_free(void *opaque, uint8_t *data);
* @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
* failure.
*/
-AVBufferRef *av_buffer_ref(AVBufferRef *buf);
+AVBufferRef *av_buffer_ref(const AVBufferRef *buf);
/**
* Free a given reference and automatically free the buffer if there are no more
@@ -195,7 +196,23 @@ int av_buffer_make_writable(AVBufferRef **buf);
* reference to it (i.e. the one passed to this function). In all other cases
* a new buffer is allocated and the data is copied.
*/
-int av_buffer_realloc(AVBufferRef **buf, int size);
+int av_buffer_realloc(AVBufferRef **buf, size_t size);
+
+/**
+ * Ensure dst refers to the same data as src.
+ *
+ * When *dst is already equivalent to src, do nothing. Otherwise unreference dst
+ * and replace it with a new reference to src.
+ *
+ * @param dst Pointer to either a valid buffer reference or NULL. On success,
+ * this will point to a buffer reference equivalent to src. On
+ * failure, dst will be left untouched.
+ * @param src A buffer reference to replace dst with. May be NULL, then this
+ * function is equivalent to av_buffer_unref(dst).
+ * @return 0 on success
+ * AVERROR(ENOMEM) on memory allocation failure.
+ */
+int av_buffer_replace(AVBufferRef **dst, const AVBufferRef *src);
/**
* @}
@@ -246,7 +263,7 @@ typedef struct AVBufferPool AVBufferPool;
* (av_buffer_alloc()).
* @return newly created buffer pool on success, NULL on error.
*/
-AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
+AVBufferPool *av_buffer_pool_init(size_t size, AVBufferRef* (*alloc)(size_t size));
/**
* Allocate and initialize a buffer pool with a more complex allocator.
@@ -254,16 +271,17 @@ AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
* @param size size of each buffer in this pool
* @param opaque arbitrary user data used by the allocator
* @param alloc a function that will be used to allocate new buffers when the
- * pool is empty.
+ * pool is empty. May be NULL, then the default allocator will be
+ * used (av_buffer_alloc()).
* @param pool_free a function that will be called immediately before the pool
* is freed. I.e. after av_buffer_pool_uninit() is called
* by the caller and all the frames are returned to the pool
* and freed. It is intended to uninitialize the user opaque
- * data.
+ * data. May be NULL.
* @return newly created buffer pool on success, NULL on error.
*/
-AVBufferPool *av_buffer_pool_init2(int size, void *opaque,
- AVBufferRef* (*alloc)(void *opaque, int size),
+AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
+ AVBufferRef* (*alloc)(void *opaque, size_t size),
void (*pool_free)(void *opaque));
/**
@@ -285,6 +303,19 @@ void av_buffer_pool_uninit(AVBufferPool **pool);
AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
/**
+ * Query the original opaque parameter of an allocated buffer in the pool.
+ *
+ * @param ref a buffer reference to a buffer returned by av_buffer_pool_get.
+ * @return the opaque parameter set by the buffer allocator function of the
+ * buffer pool.
+ *
+ * @note the opaque parameter of ref is used by the buffer pool implementation,
+ * therefore you have to use this function to access the original opaque
+ * parameter of an allocated buffer.
+ */
+void *av_buffer_pool_buffer_get_opaque(const AVBufferRef *ref);
+
+/**
* @}
*/
diff --git a/media/ffvpx/libavutil/buffer_internal.h b/media/ffvpx/libavutil/buffer_internal.h
index 54b67047e5..adb916aaa2 100644
--- a/media/ffvpx/libavutil/buffer_internal.h
+++ b/media/ffvpx/libavutil/buffer_internal.h
@@ -26,17 +26,18 @@
#include "thread.h"
/**
- * The buffer is always treated as read-only.
+ * The buffer was av_realloc()ed, so it is reallocatable.
*/
-#define BUFFER_FLAG_READONLY (1 << 0)
+#define BUFFER_FLAG_REALLOCATABLE (1 << 0)
/**
- * The buffer was av_realloc()ed, so it is reallocatable.
+ * The AVBuffer structure is part of a larger structure
+ * and should not be freed.
*/
-#define BUFFER_FLAG_REALLOCATABLE (1 << 1)
+#define BUFFER_FLAG_NO_FREE (1 << 1)
struct AVBuffer {
uint8_t *data; /**< data described by this buffer */
- int size; /**< size of data in bytes */
+ size_t size; /**< size of data in bytes */
/**
* number of existing AVBufferRef instances referring to this buffer
@@ -54,9 +55,14 @@ struct AVBuffer {
void *opaque;
/**
- * A combination of BUFFER_FLAG_*
+ * A combination of AV_BUFFER_FLAG_*
*/
int flags;
+
+ /**
+ * A combination of BUFFER_FLAG_*
+ */
+ int flags_internal;
};
typedef struct BufferPoolEntry {
@@ -71,6 +77,12 @@ typedef struct BufferPoolEntry {
AVBufferPool *pool;
struct BufferPoolEntry *next;
+
+ /*
+ * An AVBuffer structure to (re)use as AVBuffer for subsequent uses
+ * of this BufferPoolEntry.
+ */
+ AVBuffer buffer;
} BufferPoolEntry;
struct AVBufferPool {
@@ -88,10 +100,10 @@ struct AVBufferPool {
*/
atomic_uint refcount;
- int size;
+ size_t size;
void *opaque;
- AVBufferRef* (*alloc)(int size);
- AVBufferRef* (*alloc2)(void *opaque, int size);
+ AVBufferRef* (*alloc)(size_t size);
+ AVBufferRef* (*alloc2)(void *opaque, size_t size);
void (*pool_free)(void *opaque);
};
diff --git a/media/ffvpx/libavutil/channel_layout.c b/media/ffvpx/libavutil/channel_layout.c
index 3bd5ee29b7..e2f7512254 100644
--- a/media/ffvpx/libavutil/channel_layout.c
+++ b/media/ffvpx/libavutil/channel_layout.c
@@ -24,12 +24,19 @@
*/
#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
-#include "avstring.h"
-#include "avutil.h"
+#include "avassert.h"
#include "channel_layout.h"
#include "bprint.h"
#include "common.h"
+#include "error.h"
+#include "macros.h"
+#include "opt.h"
+
+#define CHAN_IS_AMBI(x) ((x) >= AV_CHAN_AMBISONIC_BASE &&\
+ (x) <= AV_CHAN_AMBISONIC_END)
struct channel_name {
const char *name;
@@ -37,75 +44,168 @@ struct channel_name {
};
static const struct channel_name channel_names[] = {
- [0] = { "FL", "front left" },
- [1] = { "FR", "front right" },
- [2] = { "FC", "front center" },
- [3] = { "LFE", "low frequency" },
- [4] = { "BL", "back left" },
- [5] = { "BR", "back right" },
- [6] = { "FLC", "front left-of-center" },
- [7] = { "FRC", "front right-of-center" },
- [8] = { "BC", "back center" },
- [9] = { "SL", "side left" },
- [10] = { "SR", "side right" },
- [11] = { "TC", "top center" },
- [12] = { "TFL", "top front left" },
- [13] = { "TFC", "top front center" },
- [14] = { "TFR", "top front right" },
- [15] = { "TBL", "top back left" },
- [16] = { "TBC", "top back center" },
- [17] = { "TBR", "top back right" },
- [29] = { "DL", "downmix left" },
- [30] = { "DR", "downmix right" },
- [31] = { "WL", "wide left" },
- [32] = { "WR", "wide right" },
- [33] = { "SDL", "surround direct left" },
- [34] = { "SDR", "surround direct right" },
- [35] = { "LFE2", "low frequency 2" },
+ [AV_CHAN_FRONT_LEFT ] = { "FL", "front left" },
+ [AV_CHAN_FRONT_RIGHT ] = { "FR", "front right" },
+ [AV_CHAN_FRONT_CENTER ] = { "FC", "front center" },
+ [AV_CHAN_LOW_FREQUENCY ] = { "LFE", "low frequency" },
+ [AV_CHAN_BACK_LEFT ] = { "BL", "back left" },
+ [AV_CHAN_BACK_RIGHT ] = { "BR", "back right" },
+ [AV_CHAN_FRONT_LEFT_OF_CENTER ] = { "FLC", "front left-of-center" },
+ [AV_CHAN_FRONT_RIGHT_OF_CENTER] = { "FRC", "front right-of-center" },
+ [AV_CHAN_BACK_CENTER ] = { "BC", "back center" },
+ [AV_CHAN_SIDE_LEFT ] = { "SL", "side left" },
+ [AV_CHAN_SIDE_RIGHT ] = { "SR", "side right" },
+ [AV_CHAN_TOP_CENTER ] = { "TC", "top center" },
+ [AV_CHAN_TOP_FRONT_LEFT ] = { "TFL", "top front left" },
+ [AV_CHAN_TOP_FRONT_CENTER ] = { "TFC", "top front center" },
+ [AV_CHAN_TOP_FRONT_RIGHT ] = { "TFR", "top front right" },
+ [AV_CHAN_TOP_BACK_LEFT ] = { "TBL", "top back left" },
+ [AV_CHAN_TOP_BACK_CENTER ] = { "TBC", "top back center" },
+ [AV_CHAN_TOP_BACK_RIGHT ] = { "TBR", "top back right" },
+ [AV_CHAN_STEREO_LEFT ] = { "DL", "downmix left" },
+ [AV_CHAN_STEREO_RIGHT ] = { "DR", "downmix right" },
+ [AV_CHAN_WIDE_LEFT ] = { "WL", "wide left" },
+ [AV_CHAN_WIDE_RIGHT ] = { "WR", "wide right" },
+ [AV_CHAN_SURROUND_DIRECT_LEFT ] = { "SDL", "surround direct left" },
+ [AV_CHAN_SURROUND_DIRECT_RIGHT] = { "SDR", "surround direct right" },
+ [AV_CHAN_LOW_FREQUENCY_2 ] = { "LFE2", "low frequency 2" },
+ [AV_CHAN_TOP_SIDE_LEFT ] = { "TSL", "top side left" },
+ [AV_CHAN_TOP_SIDE_RIGHT ] = { "TSR", "top side right" },
+ [AV_CHAN_BOTTOM_FRONT_CENTER ] = { "BFC", "bottom front center" },
+ [AV_CHAN_BOTTOM_FRONT_LEFT ] = { "BFL", "bottom front left" },
+ [AV_CHAN_BOTTOM_FRONT_RIGHT ] = { "BFR", "bottom front right" },
};
-static const char *get_channel_name(int channel_id)
+static const char *get_channel_name(enum AVChannel channel_id)
{
- if (channel_id < 0 || channel_id >= FF_ARRAY_ELEMS(channel_names))
+ if ((unsigned) channel_id >= FF_ARRAY_ELEMS(channel_names) ||
+ !channel_names[channel_id].name)
return NULL;
return channel_names[channel_id].name;
}
-static const struct {
+void av_channel_name_bprint(AVBPrint *bp, enum AVChannel channel_id)
+{
+ if (channel_id >= AV_CHAN_AMBISONIC_BASE &&
+ channel_id <= AV_CHAN_AMBISONIC_END)
+ av_bprintf(bp, "AMBI%d", channel_id - AV_CHAN_AMBISONIC_BASE);
+ else if ((unsigned)channel_id < FF_ARRAY_ELEMS(channel_names) &&
+ channel_names[channel_id].name)
+ av_bprintf(bp, "%s", channel_names[channel_id].name);
+ else if (channel_id == AV_CHAN_NONE)
+ av_bprintf(bp, "NONE");
+ else
+ av_bprintf(bp, "USR%d", channel_id);
+}
+
+int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel_id)
+{
+ AVBPrint bp;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ av_channel_name_bprint(&bp, channel_id);
+
+ return bp.len;
+}
+
+void av_channel_description_bprint(AVBPrint *bp, enum AVChannel channel_id)
+{
+ if (channel_id >= AV_CHAN_AMBISONIC_BASE &&
+ channel_id <= AV_CHAN_AMBISONIC_END)
+ av_bprintf(bp, "ambisonic ACN %d", channel_id - AV_CHAN_AMBISONIC_BASE);
+ else if ((unsigned)channel_id < FF_ARRAY_ELEMS(channel_names) &&
+ channel_names[channel_id].description)
+ av_bprintf(bp, "%s", channel_names[channel_id].description);
+ else if (channel_id == AV_CHAN_NONE)
+ av_bprintf(bp, "none");
+ else
+ av_bprintf(bp, "user %d", channel_id);
+}
+
+int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel_id)
+{
+ AVBPrint bp;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ av_channel_description_bprint(&bp, channel_id);
+
+ return bp.len;
+}
+
+enum AVChannel av_channel_from_string(const char *str)
+{
+ int i;
+ char *endptr = (char *)str;
+ enum AVChannel id = AV_CHAN_NONE;
+
+ if (!strncmp(str, "AMBI", 4)) {
+ i = strtol(str + 4, NULL, 0);
+ if (i < 0 || i > AV_CHAN_AMBISONIC_END - AV_CHAN_AMBISONIC_BASE)
+ return AV_CHAN_NONE;
+ return AV_CHAN_AMBISONIC_BASE + i;
+ }
+
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(str, channel_names[i].name))
+ return i;
+ }
+ if (!strncmp(str, "USR", 3)) {
+ const char *p = str + 3;
+ id = strtol(p, &endptr, 0);
+ }
+ if (id >= 0 && !*endptr)
+ return id;
+
+ return AV_CHAN_NONE;
+}
+
+struct channel_layout_name {
const char *name;
- int nb_channels;
- uint64_t layout;
-} channel_layout_map[] = {
- { "mono", 1, AV_CH_LAYOUT_MONO },
- { "stereo", 2, AV_CH_LAYOUT_STEREO },
- { "2.1", 3, AV_CH_LAYOUT_2POINT1 },
- { "3.0", 3, AV_CH_LAYOUT_SURROUND },
- { "3.0(back)", 3, AV_CH_LAYOUT_2_1 },
- { "4.0", 4, AV_CH_LAYOUT_4POINT0 },
- { "quad", 4, AV_CH_LAYOUT_QUAD },
- { "quad(side)", 4, AV_CH_LAYOUT_2_2 },
- { "3.1", 4, AV_CH_LAYOUT_3POINT1 },
- { "5.0", 5, AV_CH_LAYOUT_5POINT0_BACK },
- { "5.0(side)", 5, AV_CH_LAYOUT_5POINT0 },
- { "4.1", 5, AV_CH_LAYOUT_4POINT1 },
- { "5.1", 6, AV_CH_LAYOUT_5POINT1_BACK },
- { "5.1(side)", 6, AV_CH_LAYOUT_5POINT1 },
- { "6.0", 6, AV_CH_LAYOUT_6POINT0 },
- { "6.0(front)", 6, AV_CH_LAYOUT_6POINT0_FRONT },
- { "hexagonal", 6, AV_CH_LAYOUT_HEXAGONAL },
- { "6.1", 7, AV_CH_LAYOUT_6POINT1 },
- { "6.1(back)", 7, AV_CH_LAYOUT_6POINT1_BACK },
- { "6.1(front)", 7, AV_CH_LAYOUT_6POINT1_FRONT },
- { "7.0", 7, AV_CH_LAYOUT_7POINT0 },
- { "7.0(front)", 7, AV_CH_LAYOUT_7POINT0_FRONT },
- { "7.1", 8, AV_CH_LAYOUT_7POINT1 },
- { "7.1(wide)", 8, AV_CH_LAYOUT_7POINT1_WIDE_BACK },
- { "7.1(wide-side)", 8, AV_CH_LAYOUT_7POINT1_WIDE },
- { "octagonal", 8, AV_CH_LAYOUT_OCTAGONAL },
- { "hexadecagonal", 16, AV_CH_LAYOUT_HEXADECAGONAL },
- { "downmix", 2, AV_CH_LAYOUT_STEREO_DOWNMIX, },
+ AVChannelLayout layout;
};
+static const struct channel_layout_name channel_layout_map[] = {
+ { "mono", AV_CHANNEL_LAYOUT_MONO },
+ { "stereo", AV_CHANNEL_LAYOUT_STEREO },
+ { "2.1", AV_CHANNEL_LAYOUT_2POINT1 },
+ { "3.0", AV_CHANNEL_LAYOUT_SURROUND },
+ { "3.0(back)", AV_CHANNEL_LAYOUT_2_1 },
+ { "4.0", AV_CHANNEL_LAYOUT_4POINT0 },
+ { "quad", AV_CHANNEL_LAYOUT_QUAD },
+ { "quad(side)", AV_CHANNEL_LAYOUT_2_2 },
+ { "3.1", AV_CHANNEL_LAYOUT_3POINT1 },
+ { "5.0", AV_CHANNEL_LAYOUT_5POINT0_BACK },
+ { "5.0(side)", AV_CHANNEL_LAYOUT_5POINT0 },
+ { "4.1", AV_CHANNEL_LAYOUT_4POINT1 },
+ { "5.1", AV_CHANNEL_LAYOUT_5POINT1_BACK },
+ { "5.1(side)", AV_CHANNEL_LAYOUT_5POINT1 },
+ { "6.0", AV_CHANNEL_LAYOUT_6POINT0 },
+ { "6.0(front)", AV_CHANNEL_LAYOUT_6POINT0_FRONT },
+ { "hexagonal", AV_CHANNEL_LAYOUT_HEXAGONAL },
+ { "6.1", AV_CHANNEL_LAYOUT_6POINT1 },
+ { "6.1(back)", AV_CHANNEL_LAYOUT_6POINT1_BACK },
+ { "6.1(front)", AV_CHANNEL_LAYOUT_6POINT1_FRONT },
+ { "7.0", AV_CHANNEL_LAYOUT_7POINT0 },
+ { "7.0(front)", AV_CHANNEL_LAYOUT_7POINT0_FRONT },
+ { "7.1", AV_CHANNEL_LAYOUT_7POINT1 },
+ { "7.1(wide)", AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK },
+ { "7.1(wide-side)", AV_CHANNEL_LAYOUT_7POINT1_WIDE },
+ { "7.1(top)", AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK },
+ { "octagonal", AV_CHANNEL_LAYOUT_OCTAGONAL },
+ { "cube", AV_CHANNEL_LAYOUT_CUBE },
+ { "hexadecagonal", AV_CHANNEL_LAYOUT_HEXADECAGONAL },
+ { "downmix", AV_CHANNEL_LAYOUT_STEREO_DOWNMIX, },
+ { "22.2", AV_CHANNEL_LAYOUT_22POINT2, },
+};
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
static uint64_t get_channel_layout_single(const char *name, int name_len)
{
int i;
@@ -115,7 +215,7 @@ static uint64_t get_channel_layout_single(const char *name, int name_len)
for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++) {
if (strlen(channel_layout_map[i].name) == name_len &&
!memcmp(channel_layout_map[i].name, name, name_len))
- return channel_layout_map[i].layout;
+ return channel_layout_map[i].layout.u.mask;
}
for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++)
if (channel_names[i].name &&
@@ -183,8 +283,8 @@ void av_bprint_channel_layout(struct AVBPrint *bp,
nb_channels = av_get_channel_layout_nb_channels(channel_layout);
for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
- if (nb_channels == channel_layout_map[i].nb_channels &&
- channel_layout == channel_layout_map[i].layout) {
+ if (nb_channels == channel_layout_map[i].layout.nb_channels &&
+ channel_layout == channel_layout_map[i].layout.u.mask) {
av_bprintf(bp, "%s", channel_layout_map[i].name);
return;
}
@@ -225,8 +325,8 @@ int av_get_channel_layout_nb_channels(uint64_t channel_layout)
int64_t av_get_default_channel_layout(int nb_channels) {
int i;
for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
- if (nb_channels == channel_layout_map[i].nb_channels)
- return channel_layout_map[i].layout;
+ if (nb_channels == channel_layout_map[i].layout.nb_channels)
+ return channel_layout_map[i].layout.u.mask;
return 0;
}
@@ -281,7 +381,626 @@ int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
{
if (index >= FF_ARRAY_ELEMS(channel_layout_map))
return AVERROR_EOF;
- if (layout) *layout = channel_layout_map[index].layout;
+ if (layout) *layout = channel_layout_map[index].layout.u.mask;
if (name) *name = channel_layout_map[index].name;
return 0;
}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+int av_channel_layout_from_mask(AVChannelLayout *channel_layout,
+ uint64_t mask)
+{
+ if (!mask)
+ return AVERROR(EINVAL);
+
+ channel_layout->order = AV_CHANNEL_ORDER_NATIVE;
+ channel_layout->nb_channels = av_popcount64(mask);
+ channel_layout->u.mask = mask;
+
+ return 0;
+}
+
+int av_channel_layout_from_string(AVChannelLayout *channel_layout,
+ const char *str)
+{
+ int i;
+ int channels = 0, nb_channels = 0, native = 1;
+ enum AVChannel highest_channel = AV_CHAN_NONE;
+ const char *dup;
+ char *chlist, *end;
+ uint64_t mask = 0;
+
+ /* channel layout names */
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++) {
+ if (channel_layout_map[i].name && !strcmp(str, channel_layout_map[i].name)) {
+ *channel_layout = channel_layout_map[i].layout;
+ return 0;
+ }
+ }
+
+ /* ambisonic */
+ if (!strncmp(str, "ambisonic ", 10)) {
+ const char *p = str + 10;
+ char *endptr;
+ AVChannelLayout extra = {0};
+ int order;
+
+ order = strtol(p, &endptr, 0);
+ if (order < 0 || order + 1 > INT_MAX / (order + 1) ||
+ (*endptr && *endptr != '+'))
+ return AVERROR(EINVAL);
+
+ channel_layout->order = AV_CHANNEL_ORDER_AMBISONIC;
+ channel_layout->nb_channels = (order + 1) * (order + 1);
+
+ if (*endptr) {
+ int ret = av_channel_layout_from_string(&extra, endptr + 1);
+ if (ret < 0)
+ return ret;
+ if (extra.nb_channels >= INT_MAX - channel_layout->nb_channels) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(EINVAL);
+ }
+
+ if (extra.order == AV_CHANNEL_ORDER_NATIVE) {
+ channel_layout->u.mask = extra.u.mask;
+ } else {
+ channel_layout->order = AV_CHANNEL_ORDER_CUSTOM;
+ channel_layout->u.map =
+ av_calloc(channel_layout->nb_channels + extra.nb_channels,
+ sizeof(*channel_layout->u.map));
+ if (!channel_layout->u.map) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < channel_layout->nb_channels; i++)
+ channel_layout->u.map[i].id = AV_CHAN_AMBISONIC_BASE + i;
+ for (i = 0; i < extra.nb_channels; i++) {
+ enum AVChannel ch = av_channel_layout_channel_from_index(&extra, i);
+ if (CHAN_IS_AMBI(ch)) {
+ av_channel_layout_uninit(&extra);
+ return AVERROR(EINVAL);
+ }
+ channel_layout->u.map[channel_layout->nb_channels + i].id = ch;
+ if (extra.order == AV_CHANNEL_ORDER_CUSTOM &&
+ extra.u.map[i].name[0])
+ av_strlcpy(channel_layout->u.map[channel_layout->nb_channels + i].name,
+ extra.u.map[i].name,
+ sizeof(channel_layout->u.map[channel_layout->nb_channels + i].name));
+ }
+ }
+ channel_layout->nb_channels += extra.nb_channels;
+ av_channel_layout_uninit(&extra);
+ }
+
+ return 0;
+ }
+
+ chlist = av_strdup(str);
+ if (!chlist)
+ return AVERROR(ENOMEM);
+
+ /* channel names */
+ av_sscanf(str, "%d channels (%[^)]", &nb_channels, chlist);
+ end = strchr(str, ')');
+
+ dup = chlist;
+ while (*dup) {
+ char *channel, *chname;
+ int ret = av_opt_get_key_value(&dup, "@", "+", AV_OPT_FLAG_IMPLICIT_KEY, &channel, &chname);
+ if (ret < 0) {
+ av_free(chlist);
+ return ret;
+ }
+ if (*dup)
+ dup++; // skip separator
+ if (channel && !*channel)
+ av_freep(&channel);
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(channel ? channel : chname, channel_names[i].name)) {
+ if (channel || i < highest_channel || mask & (1ULL << i))
+ native = 0; // Not a native layout, use a custom one
+ highest_channel = i;
+ mask |= 1ULL << i;
+ break;
+ }
+ }
+
+ if (!channel && i >= FF_ARRAY_ELEMS(channel_names)) {
+ char *endptr = chname;
+ enum AVChannel id = AV_CHAN_NONE;
+
+ if (!strncmp(chname, "USR", 3)) {
+ const char *p = chname + 3;
+ id = strtol(p, &endptr, 0);
+ }
+ if (id < 0 || *endptr) {
+ native = 0; // Unknown channel name
+ channels = 0;
+ mask = 0;
+ av_free(chname);
+ break;
+ }
+ if (id > 63)
+ native = 0; // Not a native layout, use a custom one
+ else {
+ if (id < highest_channel || mask & (1ULL << id))
+ native = 0; // Not a native layout, use a custom one
+ highest_channel = id;
+ mask |= 1ULL << id;
+ }
+ }
+ channels++;
+ av_free(channel);
+ av_free(chname);
+ }
+
+ if (mask && native) {
+ av_free(chlist);
+ if (nb_channels && ((nb_channels != channels) || (!end || *++end)))
+ return AVERROR(EINVAL);
+ av_channel_layout_from_mask(channel_layout, mask);
+ return 0;
+ }
+
+ /* custom layout of channel names */
+ if (channels && !native) {
+ int idx = 0;
+
+ if (nb_channels && ((nb_channels != channels) || (!end || *++end))) {
+ av_free(chlist);
+ return AVERROR(EINVAL);
+ }
+
+ channel_layout->u.map = av_calloc(channels, sizeof(*channel_layout->u.map));
+ if (!channel_layout->u.map) {
+ av_free(chlist);
+ return AVERROR(ENOMEM);
+ }
+
+ channel_layout->order = AV_CHANNEL_ORDER_CUSTOM;
+ channel_layout->nb_channels = channels;
+
+ dup = chlist;
+ while (*dup) {
+ char *channel, *chname;
+ int ret = av_opt_get_key_value(&dup, "@", "+", AV_OPT_FLAG_IMPLICIT_KEY, &channel, &chname);
+ if (ret < 0) {
+ av_freep(&channel_layout->u.map);
+ av_free(chlist);
+ return ret;
+ }
+ if (*dup)
+ dup++; // skip separator
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_names); i++) {
+ if (channel_names[i].name && !strcmp(channel ? channel : chname, channel_names[i].name)) {
+ channel_layout->u.map[idx].id = i;
+ if (channel)
+ av_strlcpy(channel_layout->u.map[idx].name, chname, sizeof(channel_layout->u.map[idx].name));
+ idx++;
+ break;
+ }
+ }
+ if (i >= FF_ARRAY_ELEMS(channel_names)) {
+ const char *p = (channel ? channel : chname) + 3;
+ channel_layout->u.map[idx].id = strtol(p, NULL, 0);
+ if (channel)
+ av_strlcpy(channel_layout->u.map[idx].name, chname, sizeof(channel_layout->u.map[idx].name));
+ idx++;
+ }
+ av_free(channel);
+ av_free(chname);
+ }
+ av_free(chlist);
+
+ return 0;
+ }
+ av_freep(&chlist);
+
+ errno = 0;
+ mask = strtoull(str, &end, 0);
+
+ /* channel layout mask */
+ if (!errno && !*end && !strchr(str, '-') && mask) {
+ av_channel_layout_from_mask(channel_layout, mask);
+ return 0;
+ }
+
+ errno = 0;
+ channels = strtol(str, &end, 10);
+
+ /* number of channels */
+ if (!errno && !strcmp(end, "c") && channels > 0) {
+ av_channel_layout_default(channel_layout, channels);
+ if (channel_layout->order == AV_CHANNEL_ORDER_NATIVE)
+ return 0;
+ }
+
+ /* number of unordered channels */
+ if (!errno && (!strcmp(end, "C") || !strcmp(end, " channels"))
+ && channels > 0) {
+ channel_layout->order = AV_CHANNEL_ORDER_UNSPEC;
+ channel_layout->nb_channels = channels;
+ return 0;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+void av_channel_layout_uninit(AVChannelLayout *channel_layout)
+{
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM)
+ av_freep(&channel_layout->u.map);
+ memset(channel_layout, 0, sizeof(*channel_layout));
+}
+
+int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
+{
+ av_channel_layout_uninit(dst);
+ *dst = *src;
+ if (src->order == AV_CHANNEL_ORDER_CUSTOM) {
+ dst->u.map = av_malloc_array(src->nb_channels, sizeof(*dst->u.map));
+ if (!dst->u.map)
+ return AVERROR(ENOMEM);
+ memcpy(dst->u.map, src->u.map, src->nb_channels * sizeof(*src->u.map));
+ }
+ return 0;
+}
+
+/**
+ * If the layout is n-th order standard-order ambisonic, with optional
+ * extra non-diegetic channels at the end, return the order.
+ * Return a negative error code otherwise.
+ */
+static int ambisonic_order(const AVChannelLayout *channel_layout)
+{
+ int i, highest_ambi, order;
+
+ highest_ambi = -1;
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC)
+ highest_ambi = channel_layout->nb_channels - av_popcount64(channel_layout->u.mask) - 1;
+ else {
+ const AVChannelCustom *map = channel_layout->u.map;
+ av_assert0(channel_layout->order == AV_CHANNEL_ORDER_CUSTOM);
+
+ for (i = 0; i < channel_layout->nb_channels; i++) {
+ int is_ambi = CHAN_IS_AMBI(map[i].id);
+
+ /* ambisonic following non-ambisonic */
+ if (i > 0 && is_ambi && !CHAN_IS_AMBI(map[i - 1].id))
+ return AVERROR(EINVAL);
+
+ /* non-default ordering */
+ if (is_ambi && map[i].id - AV_CHAN_AMBISONIC_BASE != i)
+ return AVERROR(EINVAL);
+
+ if (CHAN_IS_AMBI(map[i].id))
+ highest_ambi = i;
+ }
+ }
+ /* no ambisonic channels*/
+ if (highest_ambi < 0)
+ return AVERROR(EINVAL);
+
+ order = floor(sqrt(highest_ambi));
+ /* incomplete order - some harmonics are missing */
+ if ((order + 1) * (order + 1) != highest_ambi + 1)
+ return AVERROR(EINVAL);
+
+ return order;
+}
+
+/**
+ * If the custom layout is n-th order standard-order ambisonic, with optional
+ * extra non-diegetic channels at the end, write its string description in bp.
+ * Return a negative error code otherwise.
+ */
+static int try_describe_ambisonic(AVBPrint *bp, const AVChannelLayout *channel_layout)
+{
+ int nb_ambi_channels;
+ int order = ambisonic_order(channel_layout);
+ if (order < 0)
+ return order;
+
+ av_bprintf(bp, "ambisonic %d", order);
+
+ /* extra channels present */
+ nb_ambi_channels = (order + 1) * (order + 1);
+ if (nb_ambi_channels < channel_layout->nb_channels) {
+ AVChannelLayout extra = { 0 };
+
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC) {
+ extra.order = AV_CHANNEL_ORDER_NATIVE;
+ extra.nb_channels = av_popcount64(channel_layout->u.mask);
+ extra.u.mask = channel_layout->u.mask;
+ } else {
+ extra.order = AV_CHANNEL_ORDER_CUSTOM;
+ extra.nb_channels = channel_layout->nb_channels - nb_ambi_channels;
+ extra.u.map = channel_layout->u.map + nb_ambi_channels;
+ }
+
+ av_bprint_chars(bp, '+', 1);
+ av_channel_layout_describe_bprint(&extra, bp);
+ /* Not calling uninit here on extra because we don't own the u.map pointer */
+ }
+
+ return 0;
+}
+
+int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
+ AVBPrint *bp)
+{
+ int i;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (channel_layout->u.mask == channel_layout_map[i].layout.u.mask) {
+ av_bprintf(bp, "%s", channel_layout_map[i].name);
+ return 0;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_CUSTOM:
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM) {
+ int res = try_describe_ambisonic(bp, channel_layout);
+ if (res >= 0)
+ return 0;
+ }
+ if (channel_layout->nb_channels)
+ av_bprintf(bp, "%d channels (", channel_layout->nb_channels);
+ for (i = 0; i < channel_layout->nb_channels; i++) {
+ enum AVChannel ch = av_channel_layout_channel_from_index(channel_layout, i);
+
+ if (i)
+ av_bprintf(bp, "+");
+ av_channel_name_bprint(bp, ch);
+ if (channel_layout->order == AV_CHANNEL_ORDER_CUSTOM &&
+ channel_layout->u.map[i].name[0])
+ av_bprintf(bp, "@%s", channel_layout->u.map[i].name);
+ }
+ if (channel_layout->nb_channels) {
+ av_bprintf(bp, ")");
+ return 0;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_UNSPEC:
+ av_bprintf(bp, "%d channels", channel_layout->nb_channels);
+ return 0;
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ return try_describe_ambisonic(bp, channel_layout);
+ default:
+ return AVERROR(EINVAL);
+ }
+}
+
+int av_channel_layout_describe(const AVChannelLayout *channel_layout,
+ char *buf, size_t buf_size)
+{
+ AVBPrint bp;
+ int ret;
+
+ if (!buf && buf_size)
+ return AVERROR(EINVAL);
+
+ av_bprint_init_for_buffer(&bp, buf, buf_size);
+ ret = av_channel_layout_describe_bprint(channel_layout, &bp);
+ if (ret < 0)
+ return ret;
+
+ return bp.len;
+}
+
+enum AVChannel
+av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout,
+ unsigned int idx)
+{
+ int i;
+
+ if (idx >= channel_layout->nb_channels)
+ return AV_CHAN_NONE;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ return channel_layout->u.map[idx].id;
+ case AV_CHANNEL_ORDER_AMBISONIC: {
+ int ambi_channels = channel_layout->nb_channels - av_popcount64(channel_layout->u.mask);
+ if (idx < ambi_channels)
+ return AV_CHAN_AMBISONIC_BASE + idx;
+ idx -= ambi_channels;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_NATIVE:
+ for (i = 0; i < 64; i++) {
+ if ((1ULL << i) & channel_layout->u.mask && !idx--)
+ return i;
+ }
+ default:
+ return AV_CHAN_NONE;
+ }
+}
+
+enum AVChannel
+av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
+ const char *str)
+{
+ int index = av_channel_layout_index_from_string(channel_layout, str);
+
+ if (index < 0)
+ return AV_CHAN_NONE;
+
+ return av_channel_layout_channel_from_index(channel_layout, index);
+}
+
+int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout,
+ enum AVChannel channel)
+{
+ int i;
+
+ if (channel == AV_CHAN_NONE)
+ return AVERROR(EINVAL);
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ for (i = 0; i < channel_layout->nb_channels; i++)
+ if (channel_layout->u.map[i].id == channel)
+ return i;
+ return AVERROR(EINVAL);
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ case AV_CHANNEL_ORDER_NATIVE: {
+ uint64_t mask = channel_layout->u.mask;
+ int ambi_channels = channel_layout->nb_channels - av_popcount64(mask);
+ if (channel_layout->order == AV_CHANNEL_ORDER_AMBISONIC &&
+ channel >= AV_CHAN_AMBISONIC_BASE) {
+ if (channel - AV_CHAN_AMBISONIC_BASE >= ambi_channels)
+ return AVERROR(EINVAL);
+ return channel - AV_CHAN_AMBISONIC_BASE;
+ }
+ if ((unsigned)channel > 63 || !(mask & (1ULL << channel)))
+ return AVERROR(EINVAL);
+ mask &= (1ULL << channel) - 1;
+ return av_popcount64(mask) + ambi_channels;
+ }
+ default:
+ return AVERROR(EINVAL);
+ }
+}
+
+int av_channel_layout_index_from_string(const AVChannelLayout *channel_layout,
+ const char *str)
+{
+ char *chname;
+ enum AVChannel ch = AV_CHAN_NONE;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_CUSTOM:
+ chname = strstr(str, "@");
+ if (chname) {
+ char buf[16];
+ chname++;
+ av_strlcpy(buf, str, FFMIN(sizeof(buf), chname - str));
+ if (!*chname)
+ chname = NULL;
+ ch = av_channel_from_string(buf);
+ if (ch == AV_CHAN_NONE && *buf)
+ return AVERROR(EINVAL);
+ }
+ for (int i = 0; chname && i < channel_layout->nb_channels; i++) {
+ if (!strcmp(chname, channel_layout->u.map[i].name) &&
+ (ch == AV_CHAN_NONE || ch == channel_layout->u.map[i].id))
+ return i;
+ }
+ // fall-through
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ case AV_CHANNEL_ORDER_NATIVE:
+ ch = av_channel_from_string(str);
+ if (ch == AV_CHAN_NONE)
+ return AVERROR(EINVAL);
+ return av_channel_layout_index_from_channel(channel_layout, ch);
+ }
+
+ return AVERROR(EINVAL);
+}
+
+int av_channel_layout_check(const AVChannelLayout *channel_layout)
+{
+ if (channel_layout->nb_channels <= 0)
+ return 0;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ return av_popcount64(channel_layout->u.mask) == channel_layout->nb_channels;
+ case AV_CHANNEL_ORDER_CUSTOM:
+ if (!channel_layout->u.map)
+ return 0;
+ for (int i = 0; i < channel_layout->nb_channels; i++) {
+ if (channel_layout->u.map[i].id == AV_CHAN_NONE)
+ return 0;
+ }
+ return 1;
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ /* If non-diegetic channels are present, ensure they are taken into account */
+ return av_popcount64(channel_layout->u.mask) < channel_layout->nb_channels;
+ case AV_CHANNEL_ORDER_UNSPEC:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
+{
+ int i;
+
+ /* different channel counts -> not equal */
+ if (chl->nb_channels != chl1->nb_channels)
+ return 1;
+
+ /* if only one is unspecified -> not equal */
+ if ((chl->order == AV_CHANNEL_ORDER_UNSPEC) !=
+ (chl1->order == AV_CHANNEL_ORDER_UNSPEC))
+ return 1;
+ /* both are unspecified -> equal */
+ else if (chl->order == AV_CHANNEL_ORDER_UNSPEC)
+ return 0;
+
+ /* can compare masks directly */
+ if ((chl->order == AV_CHANNEL_ORDER_NATIVE ||
+ chl->order == AV_CHANNEL_ORDER_AMBISONIC) &&
+ chl->order == chl1->order)
+ return chl->u.mask != chl1->u.mask;
+
+ /* compare channel by channel */
+ for (i = 0; i < chl->nb_channels; i++)
+ if (av_channel_layout_channel_from_index(chl, i) !=
+ av_channel_layout_channel_from_index(chl1, i))
+ return 1;
+ return 0;
+}
+
+void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(channel_layout_map); i++)
+ if (nb_channels == channel_layout_map[i].layout.nb_channels) {
+ *ch_layout = channel_layout_map[i].layout;
+ return;
+ }
+
+ ch_layout->order = AV_CHANNEL_ORDER_UNSPEC;
+ ch_layout->nb_channels = nb_channels;
+}
+
+const AVChannelLayout *av_channel_layout_standard(void **opaque)
+{
+ uintptr_t i = (uintptr_t)*opaque;
+ const AVChannelLayout *ch_layout = NULL;
+
+ if (i < FF_ARRAY_ELEMS(channel_layout_map)) {
+ ch_layout = &channel_layout_map[i].layout;
+ *opaque = (void*)(i + 1);
+ }
+
+ return ch_layout;
+}
+
+uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
+ uint64_t mask)
+{
+ uint64_t ret = 0;
+ int i;
+
+ switch (channel_layout->order) {
+ case AV_CHANNEL_ORDER_NATIVE:
+ case AV_CHANNEL_ORDER_AMBISONIC:
+ return channel_layout->u.mask & mask;
+ case AV_CHANNEL_ORDER_CUSTOM:
+ for (i = 0; i < 64; i++)
+ if (mask & (1ULL << i) && av_channel_layout_index_from_channel(channel_layout, i) >= 0)
+ ret |= (1ULL << i);
+ break;
+ }
+
+ return ret;
+}
diff --git a/media/ffvpx/libavutil/channel_layout.h b/media/ffvpx/libavutil/channel_layout.h
index 50bb8f03c5..f345415c55 100644
--- a/media/ffvpx/libavutil/channel_layout.h
+++ b/media/ffvpx/libavutil/channel_layout.h
@@ -23,17 +23,132 @@
#define AVUTIL_CHANNEL_LAYOUT_H
#include <stdint.h>
+#include <stdlib.h>
+
+#include "version.h"
+#include "attributes.h"
/**
* @file
- * audio channel layout utility functions
+ * @ingroup lavu_audio_channels
+ * Public libavutil channel layout APIs header.
*/
+
/**
- * @addtogroup lavu_audio
+ * @defgroup lavu_audio_channels Audio channels
+ * @ingroup lavu_audio
+ *
+ * Audio channel layout utility functions
+ *
* @{
*/
+enum AVChannel {
+ ///< Invalid channel index
+ AV_CHAN_NONE = -1,
+ AV_CHAN_FRONT_LEFT,
+ AV_CHAN_FRONT_RIGHT,
+ AV_CHAN_FRONT_CENTER,
+ AV_CHAN_LOW_FREQUENCY,
+ AV_CHAN_BACK_LEFT,
+ AV_CHAN_BACK_RIGHT,
+ AV_CHAN_FRONT_LEFT_OF_CENTER,
+ AV_CHAN_FRONT_RIGHT_OF_CENTER,
+ AV_CHAN_BACK_CENTER,
+ AV_CHAN_SIDE_LEFT,
+ AV_CHAN_SIDE_RIGHT,
+ AV_CHAN_TOP_CENTER,
+ AV_CHAN_TOP_FRONT_LEFT,
+ AV_CHAN_TOP_FRONT_CENTER,
+ AV_CHAN_TOP_FRONT_RIGHT,
+ AV_CHAN_TOP_BACK_LEFT,
+ AV_CHAN_TOP_BACK_CENTER,
+ AV_CHAN_TOP_BACK_RIGHT,
+ /** Stereo downmix. */
+ AV_CHAN_STEREO_LEFT = 29,
+ /** See above. */
+ AV_CHAN_STEREO_RIGHT,
+ AV_CHAN_WIDE_LEFT,
+ AV_CHAN_WIDE_RIGHT,
+ AV_CHAN_SURROUND_DIRECT_LEFT,
+ AV_CHAN_SURROUND_DIRECT_RIGHT,
+ AV_CHAN_LOW_FREQUENCY_2,
+ AV_CHAN_TOP_SIDE_LEFT,
+ AV_CHAN_TOP_SIDE_RIGHT,
+ AV_CHAN_BOTTOM_FRONT_CENTER,
+ AV_CHAN_BOTTOM_FRONT_LEFT,
+ AV_CHAN_BOTTOM_FRONT_RIGHT,
+
+ /** Channel is empty can be safely skipped. */
+ AV_CHAN_UNUSED = 0x200,
+
+ /** Channel contains data, but its position is unknown. */
+ AV_CHAN_UNKNOWN = 0x300,
+
+ /**
+ * Range of channels between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END represent Ambisonic components using the ACN system.
+ *
+ * Given a channel id `<i>` between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the ACN index of the channel `<n>` is
+ * `<n> = <i> - AV_CHAN_AMBISONIC_BASE`.
+ *
+ * @note these values are only used for AV_CHANNEL_ORDER_CUSTOM channel
+ * orderings, the AV_CHANNEL_ORDER_AMBISONIC ordering orders the channels
+ * implicitly by their position in the stream.
+ */
+ AV_CHAN_AMBISONIC_BASE = 0x400,
+ // leave space for 1024 ids, which correspond to maximum order-32 harmonics,
+ // which should be enough for the foreseeable use cases
+ AV_CHAN_AMBISONIC_END = 0x7ff,
+};
+
+enum AVChannelOrder {
+ /**
+ * Only the channel count is specified, without any further information
+ * about the channel order.
+ */
+ AV_CHANNEL_ORDER_UNSPEC,
+ /**
+ * The native channel order, i.e. the channels are in the same order in
+ * which they are defined in the AVChannel enum. This supports up to 63
+ * different channels.
+ */
+ AV_CHANNEL_ORDER_NATIVE,
+ /**
+ * The channel order does not correspond to any other predefined order and
+ * is stored as an explicit map. For example, this could be used to support
+ * layouts with 64 or more channels, or with empty/skipped (AV_CHAN_SILENCE)
+ * channels at arbitrary positions.
+ */
+ AV_CHANNEL_ORDER_CUSTOM,
+ /**
+ * The audio is represented as the decomposition of the sound field into
+ * spherical harmonics. Each channel corresponds to a single expansion
+ * component. Channels are ordered according to ACN (Ambisonic Channel
+ * Number).
+ *
+ * The channel with the index n in the stream contains the spherical
+ * harmonic of degree l and order m given by
+ * @code{.unparsed}
+ * l = floor(sqrt(n)),
+ * m = n - l * (l + 1).
+ * @endcode
+ *
+ * Conversely given a spherical harmonic of degree l and order m, the
+ * corresponding channel index n is given by
+ * @code{.unparsed}
+ * n = l * (l + 1) + m.
+ * @endcode
+ *
+ * Normalization is assumed to be SN3D (Schmidt Semi-Normalization)
+ * as defined in AmbiX format $ 2.1.
+ */
+ AV_CHANNEL_ORDER_AMBISONIC,
+};
+
+
/**
* @defgroup channel_masks Audio channel masks
*
@@ -46,36 +161,46 @@
*
* @{
*/
-#define AV_CH_FRONT_LEFT 0x00000001
-#define AV_CH_FRONT_RIGHT 0x00000002
-#define AV_CH_FRONT_CENTER 0x00000004
-#define AV_CH_LOW_FREQUENCY 0x00000008
-#define AV_CH_BACK_LEFT 0x00000010
-#define AV_CH_BACK_RIGHT 0x00000020
-#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
-#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
-#define AV_CH_BACK_CENTER 0x00000100
-#define AV_CH_SIDE_LEFT 0x00000200
-#define AV_CH_SIDE_RIGHT 0x00000400
-#define AV_CH_TOP_CENTER 0x00000800
-#define AV_CH_TOP_FRONT_LEFT 0x00001000
-#define AV_CH_TOP_FRONT_CENTER 0x00002000
-#define AV_CH_TOP_FRONT_RIGHT 0x00004000
-#define AV_CH_TOP_BACK_LEFT 0x00008000
-#define AV_CH_TOP_BACK_CENTER 0x00010000
-#define AV_CH_TOP_BACK_RIGHT 0x00020000
-#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
-#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
-#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
-#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
-#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
-#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
-#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
+#define AV_CH_FRONT_LEFT (1ULL << AV_CHAN_FRONT_LEFT )
+#define AV_CH_FRONT_RIGHT (1ULL << AV_CHAN_FRONT_RIGHT )
+#define AV_CH_FRONT_CENTER (1ULL << AV_CHAN_FRONT_CENTER )
+#define AV_CH_LOW_FREQUENCY (1ULL << AV_CHAN_LOW_FREQUENCY )
+#define AV_CH_BACK_LEFT (1ULL << AV_CHAN_BACK_LEFT )
+#define AV_CH_BACK_RIGHT (1ULL << AV_CHAN_BACK_RIGHT )
+#define AV_CH_FRONT_LEFT_OF_CENTER (1ULL << AV_CHAN_FRONT_LEFT_OF_CENTER )
+#define AV_CH_FRONT_RIGHT_OF_CENTER (1ULL << AV_CHAN_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_BACK_CENTER (1ULL << AV_CHAN_BACK_CENTER )
+#define AV_CH_SIDE_LEFT (1ULL << AV_CHAN_SIDE_LEFT )
+#define AV_CH_SIDE_RIGHT (1ULL << AV_CHAN_SIDE_RIGHT )
+#define AV_CH_TOP_CENTER (1ULL << AV_CHAN_TOP_CENTER )
+#define AV_CH_TOP_FRONT_LEFT (1ULL << AV_CHAN_TOP_FRONT_LEFT )
+#define AV_CH_TOP_FRONT_CENTER (1ULL << AV_CHAN_TOP_FRONT_CENTER )
+#define AV_CH_TOP_FRONT_RIGHT (1ULL << AV_CHAN_TOP_FRONT_RIGHT )
+#define AV_CH_TOP_BACK_LEFT (1ULL << AV_CHAN_TOP_BACK_LEFT )
+#define AV_CH_TOP_BACK_CENTER (1ULL << AV_CHAN_TOP_BACK_CENTER )
+#define AV_CH_TOP_BACK_RIGHT (1ULL << AV_CHAN_TOP_BACK_RIGHT )
+#define AV_CH_STEREO_LEFT (1ULL << AV_CHAN_STEREO_LEFT )
+#define AV_CH_STEREO_RIGHT (1ULL << AV_CHAN_STEREO_RIGHT )
+#define AV_CH_WIDE_LEFT (1ULL << AV_CHAN_WIDE_LEFT )
+#define AV_CH_WIDE_RIGHT (1ULL << AV_CHAN_WIDE_RIGHT )
+#define AV_CH_SURROUND_DIRECT_LEFT (1ULL << AV_CHAN_SURROUND_DIRECT_LEFT )
+#define AV_CH_SURROUND_DIRECT_RIGHT (1ULL << AV_CHAN_SURROUND_DIRECT_RIGHT)
+#define AV_CH_LOW_FREQUENCY_2 (1ULL << AV_CHAN_LOW_FREQUENCY_2 )
+#define AV_CH_TOP_SIDE_LEFT (1ULL << AV_CHAN_TOP_SIDE_LEFT )
+#define AV_CH_TOP_SIDE_RIGHT (1ULL << AV_CHAN_TOP_SIDE_RIGHT )
+#define AV_CH_BOTTOM_FRONT_CENTER (1ULL << AV_CHAN_BOTTOM_FRONT_CENTER )
+#define AV_CH_BOTTOM_FRONT_LEFT (1ULL << AV_CHAN_BOTTOM_FRONT_LEFT )
+#define AV_CH_BOTTOM_FRONT_RIGHT (1ULL << AV_CHAN_BOTTOM_FRONT_RIGHT )
+#if FF_API_OLD_CHANNEL_LAYOUT
/** Channel mask value used for AVCodecContext.request_channel_layout
to indicate that the user requests the channel order of the decoder output
- to be the native codec channel order. */
+ to be the native codec channel order.
+ @deprecated channel order is now indicated in a special field in
+ AVChannelLayout
+ */
#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+#endif
/**
* @}
@@ -107,9 +232,12 @@
#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_TOP_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_CUBE (AV_CH_LAYOUT_QUAD|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT)
#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+#define AV_CH_LAYOUT_22POINT2 (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER|AV_CH_BACK_CENTER|AV_CH_LOW_FREQUENCY_2|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_CENTER|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_SIDE_LEFT|AV_CH_TOP_SIDE_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_BOTTOM_FRONT_CENTER|AV_CH_BOTTOM_FRONT_LEFT|AV_CH_BOTTOM_FRONT_RIGHT)
enum AVMatrixEncoding {
AV_MATRIX_ENCODING_NONE,
@@ -123,6 +251,164 @@ enum AVMatrixEncoding {
};
/**
+ * @}
+ */
+
+/**
+ * An AVChannelCustom defines a single channel within a custom order layout
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelCustom) is a part of the
+ * public ABI.
+ *
+ * No new fields may be added to it without a major version bump.
+ */
+typedef struct AVChannelCustom {
+ enum AVChannel id;
+ char name[16];
+ void *opaque;
+} AVChannelCustom;
+
+/**
+ * An AVChannelLayout holds information about the channel layout of audio data.
+ *
+ * A channel layout here is defined as a set of channels ordered in a specific
+ * way (unless the channel order is AV_CHANNEL_ORDER_UNSPEC, in which case an
+ * AVChannelLayout carries only the channel count).
+ * All orders may be treated as if they were AV_CHANNEL_ORDER_UNSPEC by
+ * ignoring everything but the channel count, as long as av_channel_layout_check()
+ * considers they are valid.
+ *
+ * Unlike most structures in FFmpeg, sizeof(AVChannelLayout) is a part of the
+ * public ABI and may be used by the caller. E.g. it may be allocated on stack
+ * or embedded in caller-defined structs.
+ *
+ * AVChannelLayout can be initialized as follows:
+ * - default initialization with {0}, followed by setting all used fields
+ * correctly;
+ * - by assigning one of the predefined AV_CHANNEL_LAYOUT_* initializers;
+ * - with a constructor function, such as av_channel_layout_default(),
+ * av_channel_layout_from_mask() or av_channel_layout_from_string().
+ *
+ * The channel layout must be unitialized with av_channel_layout_uninit()
+ *
+ * Copying an AVChannelLayout via assigning is forbidden,
+ * av_channel_layout_copy() must be used instead (and its return value should
+ * be checked)
+ *
+ * No new fields may be added to it without a major version bump, except for
+ * new elements of the union fitting in sizeof(uint64_t).
+ */
+typedef struct AVChannelLayout {
+ /**
+ * Channel order used in this layout.
+ * This is a mandatory field.
+ */
+ enum AVChannelOrder order;
+
+ /**
+ * Number of channels in this layout. Mandatory field.
+ */
+ int nb_channels;
+
+ /**
+ * Details about which channels are present in this layout.
+ * For AV_CHANNEL_ORDER_UNSPEC, this field is undefined and must not be
+ * used.
+ */
+ union {
+ /**
+ * This member must be used for AV_CHANNEL_ORDER_NATIVE, and may be used
+ * for AV_CHANNEL_ORDER_AMBISONIC to signal non-diegetic channels.
+ * It is a bitmask, where the position of each set bit means that the
+ * AVChannel with the corresponding value is present.
+ *
+ * I.e. when (mask & (1 << AV_CHAN_FOO)) is non-zero, then AV_CHAN_FOO
+ * is present in the layout. Otherwise it is not present.
+ *
+ * @note when a channel layout using a bitmask is constructed or
+ * modified manually (i.e. not using any of the av_channel_layout_*
+ * functions), the code doing it must ensure that the number of set bits
+ * is equal to nb_channels.
+ */
+ uint64_t mask;
+ /**
+ * This member must be used when the channel order is
+ * AV_CHANNEL_ORDER_CUSTOM. It is a nb_channels-sized array, with each
+ * element signalling the presence of the AVChannel with the
+ * corresponding value in map[i].id.
+ *
+ * I.e. when map[i].id is equal to AV_CHAN_FOO, then AV_CH_FOO is the
+ * i-th channel in the audio data.
+ *
+ * When map[i].id is in the range between AV_CHAN_AMBISONIC_BASE and
+ * AV_CHAN_AMBISONIC_END (inclusive), the channel contains an ambisonic
+ * component with ACN index (as defined above)
+ * n = map[i].id - AV_CHAN_AMBISONIC_BASE.
+ *
+ * map[i].name may be filled with a 0-terminated string, in which case
+ * it will be used for the purpose of identifying the channel with the
+ * convenience functions below. Otherise it must be zeroed.
+ */
+ AVChannelCustom *map;
+ } u;
+
+ /**
+ * For some private data of the user.
+ */
+ void *opaque;
+} AVChannelLayout;
+
+#define AV_CHANNEL_LAYOUT_MASK(nb, m) \
+ { .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = (nb), .u = { .mask = (m) }}
+
+/**
+ * @name Common pre-defined channel layouts
+ * @{
+ */
+#define AV_CHANNEL_LAYOUT_MONO AV_CHANNEL_LAYOUT_MASK(1, AV_CH_LAYOUT_MONO)
+#define AV_CHANNEL_LAYOUT_STEREO AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO)
+#define AV_CHANNEL_LAYOUT_2POINT1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2POINT1)
+#define AV_CHANNEL_LAYOUT_2_1 AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_2_1)
+#define AV_CHANNEL_LAYOUT_SURROUND AV_CHANNEL_LAYOUT_MASK(3, AV_CH_LAYOUT_SURROUND)
+#define AV_CHANNEL_LAYOUT_3POINT1 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_3POINT1)
+#define AV_CHANNEL_LAYOUT_4POINT0 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_4POINT0)
+#define AV_CHANNEL_LAYOUT_4POINT1 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_4POINT1)
+#define AV_CHANNEL_LAYOUT_2_2 AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_2_2)
+#define AV_CHANNEL_LAYOUT_QUAD AV_CHANNEL_LAYOUT_MASK(4, AV_CH_LAYOUT_QUAD)
+#define AV_CHANNEL_LAYOUT_5POINT0 AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0)
+#define AV_CHANNEL_LAYOUT_5POINT1 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1)
+#define AV_CHANNEL_LAYOUT_5POINT0_BACK AV_CHANNEL_LAYOUT_MASK(5, AV_CH_LAYOUT_5POINT0_BACK)
+#define AV_CHANNEL_LAYOUT_5POINT1_BACK AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_5POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT0 AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0)
+#define AV_CHANNEL_LAYOUT_6POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_6POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_HEXAGONAL AV_CHANNEL_LAYOUT_MASK(6, AV_CH_LAYOUT_HEXAGONAL)
+#define AV_CHANNEL_LAYOUT_6POINT1 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1)
+#define AV_CHANNEL_LAYOUT_6POINT1_BACK AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_BACK)
+#define AV_CHANNEL_LAYOUT_6POINT1_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_6POINT1_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT0 AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0)
+#define AV_CHANNEL_LAYOUT_7POINT0_FRONT AV_CHANNEL_LAYOUT_MASK(7, AV_CH_LAYOUT_7POINT0_FRONT)
+#define AV_CHANNEL_LAYOUT_7POINT1 AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE)
+#define AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_WIDE_BACK)
+#define AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_7POINT1_TOP_BACK)
+#define AV_CHANNEL_LAYOUT_OCTAGONAL AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_OCTAGONAL)
+#define AV_CHANNEL_LAYOUT_CUBE AV_CHANNEL_LAYOUT_MASK(8, AV_CH_LAYOUT_CUBE)
+#define AV_CHANNEL_LAYOUT_HEXADECAGONAL AV_CHANNEL_LAYOUT_MASK(16, AV_CH_LAYOUT_HEXADECAGONAL)
+#define AV_CHANNEL_LAYOUT_STEREO_DOWNMIX AV_CHANNEL_LAYOUT_MASK(2, AV_CH_LAYOUT_STEREO_DOWNMIX)
+#define AV_CHANNEL_LAYOUT_22POINT2 AV_CHANNEL_LAYOUT_MASK(24, AV_CH_LAYOUT_22POINT2)
+#define AV_CHANNEL_LAYOUT_AMBISONIC_FIRST_ORDER \
+ { .order = AV_CHANNEL_ORDER_AMBISONIC, .nb_channels = 4, .u = { .mask = 0 }}
+/** @} */
+
+struct AVBPrint;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+/**
+ * @name Deprecated Functions
+ * @{
+ */
+
+/**
* Return a channel layout id that matches name, or 0 if no match is found.
*
* name can be one or several of the following notations,
@@ -138,7 +424,10 @@ enum AVMatrixEncoding {
* AV_CH_* macros).
*
* Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ *
+ * @deprecated use av_channel_layout_from_string()
*/
+attribute_deprecated
uint64_t av_get_channel_layout(const char *name);
/**
@@ -152,7 +441,9 @@ uint64_t av_get_channel_layout(const char *name);
* @param[out] nb_channels number of channels
*
* @return 0 on success, AVERROR(EINVAL) if the parsing fails.
+ * @deprecated use av_channel_layout_from_string()
*/
+attribute_deprecated
int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels);
/**
@@ -161,47 +452,66 @@ int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, i
*
* @param buf put here the string containing the channel layout
* @param buf_size size in bytes of the buffer
+ * @param nb_channels number of channels
+ * @param channel_layout channel layout bitset
+ * @deprecated use av_channel_layout_describe()
*/
+attribute_deprecated
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
-struct AVBPrint;
/**
* Append a description of a channel layout to a bprint buffer.
+ * @deprecated use av_channel_layout_describe()
*/
+attribute_deprecated
void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
/**
* Return the number of channels in the channel layout.
+ * @deprecated use AVChannelLayout.nb_channels
*/
+attribute_deprecated
int av_get_channel_layout_nb_channels(uint64_t channel_layout);
/**
* Return default channel layout for a given number of channels.
+ *
+ * @deprecated use av_channel_layout_default()
*/
+attribute_deprecated
int64_t av_get_default_channel_layout(int nb_channels);
/**
* Get the index of a channel in channel_layout.
*
+ * @param channel_layout channel layout bitset
* @param channel a channel layout describing exactly one channel which must be
* present in channel_layout.
*
* @return index of channel in channel_layout on success, a negative AVERROR
* on error.
+ *
+ * @deprecated use av_channel_layout_index_from_channel()
*/
+attribute_deprecated
int av_get_channel_layout_channel_index(uint64_t channel_layout,
uint64_t channel);
/**
* Get the channel with the given index in channel_layout.
+ * @deprecated use av_channel_layout_channel_from_index()
*/
+attribute_deprecated
uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
/**
* Get the name of a given channel.
*
* @return channel name on success, NULL on error.
+ *
+ * @deprecated use av_channel_name()
*/
+attribute_deprecated
const char *av_get_channel_name(uint64_t channel);
/**
@@ -209,7 +519,9 @@ const char *av_get_channel_name(uint64_t channel);
*
* @param channel a channel layout with a single channel
* @return channel description on success, NULL on error
+ * @deprecated use av_channel_description()
*/
+attribute_deprecated
const char *av_get_channel_description(uint64_t channel);
/**
@@ -220,12 +532,251 @@ const char *av_get_channel_description(uint64_t channel);
* @param[out] name name of the layout
* @return 0 if the layout exists,
* <0 if index is beyond the limits
+ * @deprecated use av_channel_layout_standard()
*/
+attribute_deprecated
int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
const char **name);
-
/**
* @}
+ */
+#endif
+
+/**
+ * Get a human readable string in an abbreviated form describing a given channel.
+ * This is the inverse function of @ref av_channel_from_string().
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose name to get
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_name(char *buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_name().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_name_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
+
+/**
+ * Get a human readable string describing a given channel.
+ *
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @param channel the AVChannel whose description to get
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_description(char *buf, size_t buf_size, enum AVChannel channel);
+
+/**
+ * bprint variant of av_channel_description().
+ *
+ * @note the string will be appended to the bprint buffer.
+ */
+void av_channel_description_bprint(struct AVBPrint *bp, enum AVChannel channel_id);
+
+/**
+ * This is the inverse function of @ref av_channel_name().
+ *
+ * @return the channel with the given name
+ * AV_CHAN_NONE when name does not identify a known channel
+ */
+enum AVChannel av_channel_from_string(const char *name);
+
+/**
+ * Initialize a native channel layout from a bitmask indicating which channels
+ * are present.
+ *
+ * @param channel_layout the layout structure to be initialized
+ * @param mask bitmask describing the channel layout
+ *
+ * @return 0 on success
+ * AVERROR(EINVAL) for invalid mask values
+ */
+int av_channel_layout_from_mask(AVChannelLayout *channel_layout, uint64_t mask);
+
+/**
+ * Initialize a channel layout from a given string description.
+ * The input string can be represented by:
+ * - the formal channel layout name (returned by av_channel_layout_describe())
+ * - single or multiple channel names (returned by av_channel_name(), eg. "FL",
+ * or concatenated with "+", each optionally containing a custom name after
+ * a "@", eg. "FL@Left+FR@Right+LFE")
+ * - a decimal or hexadecimal value of a native channel layout (eg. "4" or "0x4")
+ * - the number of channels with default layout (eg. "4c")
+ * - the number of unordered channels (eg. "4C" or "4 channels")
+ * - the ambisonic order followed by optional non-diegetic channels (eg.
+ * "ambisonic 2+stereo")
+ *
+ * @param channel_layout input channel layout
+ * @param str string describing the channel layout
+ * @return 0 channel layout was detected, AVERROR_INVALIDATATA otherwise
+ */
+int av_channel_layout_from_string(AVChannelLayout *channel_layout,
+ const char *str);
+
+/**
+ * Get the default channel layout for a given number of channels.
+ *
+ * @param ch_layout the layout structure to be initialized
+ * @param nb_channels number of channels
+ */
+void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels);
+
+/**
+ * Iterate over all standard channel layouts.
+ *
+ * @param opaque a pointer where libavutil will store the iteration state. Must
+ * point to NULL to start the iteration.
+ *
+ * @return the standard channel layout or NULL when the iteration is
+ * finished
+ */
+const AVChannelLayout *av_channel_layout_standard(void **opaque);
+
+/**
+ * Free any allocated data in the channel layout and reset the channel
+ * count to 0.
+ *
+ * @param channel_layout the layout structure to be uninitialized
+ */
+void av_channel_layout_uninit(AVChannelLayout *channel_layout);
+
+/**
+ * Make a copy of a channel layout. This differs from just assigning src to dst
+ * in that it allocates and copies the map for AV_CHANNEL_ORDER_CUSTOM.
+ *
+ * @note the destination channel_layout will be always uninitialized before copy.
+ *
+ * @param dst destination channel layout
+ * @param src source channel layout
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src);
+
+/**
+ * Get a human-readable string describing the channel layout properties.
+ * The string will be in the same format that is accepted by
+ * @ref av_channel_layout_from_string(), allowing to rebuild the same
+ * channel layout, except for opaque pointers.
+ *
+ * @param channel_layout channel layout to be described
+ * @param buf pre-allocated buffer where to put the generated string
+ * @param buf_size size in bytes of the buffer.
+ * @return amount of bytes needed to hold the output string, or a negative AVERROR
+ * on failure. If the returned value is bigger than buf_size, then the
+ * string was truncated.
+ */
+int av_channel_layout_describe(const AVChannelLayout *channel_layout,
+ char *buf, size_t buf_size);
+
+/**
+ * bprint variant of av_channel_layout_describe().
+ *
+ * @note the string will be appended to the bprint buffer.
+ * @return 0 on success, or a negative AVERROR value on failure.
+ */
+int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout,
+ struct AVBPrint *bp);
+
+/**
+ * Get the channel with the given index in a channel layout.
+ *
+ * @param channel_layout input channel layout
+ * @param idx index of the channel
+ * @return channel with the index idx in channel_layout on success or
+ * AV_CHAN_NONE on failure (if idx is not valid or the channel order is
+ * unspecified)
+ */
+enum AVChannel
+av_channel_layout_channel_from_index(const AVChannelLayout *channel_layout, unsigned int idx);
+
+/**
+ * Get the index of a given channel in a channel layout. In case multiple
+ * channels are found, only the first match will be returned.
+ *
+ * @param channel_layout input channel layout
+ * @param channel the channel whose index to obtain
+ * @return index of channel in channel_layout on success or a negative number if
+ * channel is not present in channel_layout.
+ */
+int av_channel_layout_index_from_channel(const AVChannelLayout *channel_layout,
+ enum AVChannel channel);
+
+/**
+ * Get the index in a channel layout of a channel described by the given string.
+ * In case multiple channels are found, only the first match will be returned.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel whose index to obtain
+ * @return a channel index described by the given string, or a negative AVERROR
+ * value.
+ */
+int av_channel_layout_index_from_string(const AVChannelLayout *channel_layout,
+ const char *name);
+
+/**
+ * Get a channel described by the given string.
+ *
+ * This function accepts channel names in the same format as
+ * @ref av_channel_from_string().
+ *
+ * @param channel_layout input channel layout
+ * @param name string describing the channel to obtain
+ * @return a channel described by the given string in channel_layout on success
+ * or AV_CHAN_NONE on failure (if the string is not valid or the channel
+ * order is unspecified)
+ */
+enum AVChannel
+av_channel_layout_channel_from_string(const AVChannelLayout *channel_layout,
+ const char *name);
+
+/**
+ * Find out what channels from a given set are present in a channel layout,
+ * without regard for their positions.
+ *
+ * @param channel_layout input channel layout
+ * @param mask a combination of AV_CH_* representing a set of channels
+ * @return a bitfield representing all the channels from mask that are present
+ * in channel_layout
+ */
+uint64_t av_channel_layout_subset(const AVChannelLayout *channel_layout,
+ uint64_t mask);
+
+/**
+ * Check whether a channel layout is valid, i.e. can possibly describe audio
+ * data.
+ *
+ * @param channel_layout input channel layout
+ * @return 1 if channel_layout is valid, 0 otherwise.
+ */
+int av_channel_layout_check(const AVChannelLayout *channel_layout);
+
+/**
+ * Check whether two channel layouts are semantically the same, i.e. the same
+ * channels are present on the same positions in both.
+ *
+ * If one of the channel layouts is AV_CHANNEL_ORDER_UNSPEC, while the other is
+ * not, they are considered to be unequal. If both are AV_CHANNEL_ORDER_UNSPEC,
+ * they are considered equal iff the channel counts are the same in both.
+ *
+ * @param chl input channel layout
+ * @param chl1 input channel layout
+ * @return 0 if chl and chl1 are equal, 1 if they are not equal. A negative
+ * AVERROR code if one or both are invalid.
+ */
+int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1);
+
+/**
* @}
*/
diff --git a/media/ffvpx/libavutil/common.h b/media/ffvpx/libavutil/common.h
index bad43e426e..fd1404be6c 100644
--- a/media/ffvpx/libavutil/common.h
+++ b/media/ffvpx/libavutil/common.h
@@ -41,19 +41,11 @@
#include "attributes.h"
#include "macros.h"
-#include "version.h"
-#include "libavutil/avconfig.h"
-
-#if AV_HAVE_BIGENDIAN
-# define AV_NE(be, le) (be)
-#else
-# define AV_NE(be, le) (le)
-#endif
//rounded division & shift
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
/* assume b>0 */
-#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+#define ROUNDED_DIV(a,b) (((a)>=0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
/* Fast a/(1<<b) rounded toward +inf. Assume a>=0 and b>=0 */
#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
: ((a) + (1<<(b)) - 1) >> (b))
@@ -81,23 +73,13 @@
#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
/**
- * Comparator.
- * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
- * if x == y. This is useful for instance in a qsort comparator callback.
- * Furthermore, compilers are able to optimize this to branchless code, and
- * there is no risk of overflow with signed types.
- * As with many macros, this evaluates its argument multiple times, it thus
- * must not have a side-effect.
+ * Unsigned Absolute value.
+ * This takes the absolute value of a signed int and returns it as a unsigned.
+ * This also works with INT_MIN which would otherwise not be representable
+ * As with many macros, this evaluates its argument twice.
*/
-#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
-
-#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
-#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
-#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
-#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
-
-#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
-#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFABSU(a) ((a) <= 0 ? -(unsigned)(a) : (unsigned)(a))
+#define FFABS64U(a) ((a) <= 0 ? -(uint64_t)(a) : (uint64_t)(a))
/* misc math functions */
@@ -106,8 +88,72 @@
# include "intmath.h"
#endif
-/* Pull in unguarded fallback defines at the end of this file. */
-#include "common.h"
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_intp2
+# define av_clip_intp2 av_clip_intp2_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_mod_uintp2
+# define av_mod_uintp2 av_mod_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_sat_sub32
+# define av_sat_sub32 av_sat_sub32_c
+#endif
+#ifndef av_sat_dsub32
+# define av_sat_dsub32 av_sat_dsub32_c
+#endif
+#ifndef av_sat_add64
+# define av_sat_add64 av_sat_add64_c
+#endif
+#ifndef av_sat_sub64
+# define av_sat_sub64 av_sat_sub64_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
+#ifndef av_parity
+# define av_parity av_parity_c
+#endif
#ifndef av_log2
av_const int av_log2(unsigned v);
@@ -240,7 +286,7 @@ static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
*/
static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p)
{
- return a & ((1 << p) - 1);
+ return a & ((1U << p) - 1);
}
/**
@@ -292,7 +338,48 @@ static av_always_inline int av_sat_dsub32_c(int a, int b)
}
/**
+ * Add two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int64_t av_sat_add64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_add_overflow)
+ int64_t tmp;
+ return !__builtin_add_overflow(a, b, &tmp) ? tmp : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ int64_t s = a+(uint64_t)b;
+ if ((int64_t)(a^b | ~s^b) >= 0)
+ return INT64_MAX ^ (b >> 63);
+ return s;
+#endif
+}
+
+/**
+ * Subtract two signed 64-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return difference with signed saturation
+ */
+static av_always_inline int64_t av_sat_sub64_c(int64_t a, int64_t b) {
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_sub_overflow)
+ int64_t tmp;
+ return !__builtin_sub_overflow(a, b, &tmp) ? tmp : (tmp < 0 ? INT64_MAX : INT64_MIN);
+#else
+ if (b <= 0 && a >= INT64_MAX + b)
+ return INT64_MAX;
+ if (b >= 0 && a <= INT64_MIN + b)
+ return INT64_MIN;
+ return a - b;
+#endif
+}
+
+/**
* Clip a float value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
@@ -303,13 +390,13 @@ static av_always_inline av_const float av_clipf_c(float a, float amin, float ama
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
- if (a < amin) return amin;
- else if (a > amax) return amax;
- else return a;
+ return FFMIN(FFMAX(a, amin), amax);
}
/**
* Clip a double value into the amin-amax range.
+ * If a is nan or -inf amin will be returned.
+ * If a is +inf amax will be returned.
* @param a value to clip
* @param amin minimum value of the clip range
* @param amax maximum value of the clip range
@@ -320,9 +407,7 @@ static av_always_inline av_const double av_clipd_c(double a, double amin, double
#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
- if (a < amin) return amin;
- else if (a > amax) return amax;
- else return a;
+ return FFMIN(FFMAX(a, amin), amax);
}
/** Compute ceil(log2(x)).
@@ -363,9 +448,6 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
return av_popcount(v) & 1;
}
-#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
-#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
-
/**
* Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
*
@@ -373,7 +455,9 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
* @param GET_BYTE Expression reading one byte from the input.
* Evaluated up to 7 times (4 for the currently
* assigned Unicode range). With a memory buffer
- * input, this could be *ptr++.
+ * input, this could be *ptr++, or if you want to make sure
+ * that *ptr stops at the end of a NULL terminated string then
+ * *ptr ? *ptr++ : 0
* @param ERROR Expression to be evaluated on invalid input,
* typically a goto statement.
*
@@ -387,11 +471,11 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
{\
uint32_t top = (val & 128) >> 1;\
if ((val & 0xc0) == 0x80 || val >= 0xFE)\
- ERROR\
+ {ERROR}\
while (val & top) {\
- int tmp= (GET_BYTE) - 128;\
+ unsigned int tmp = (GET_BYTE) - 128;\
if(tmp>>6)\
- ERROR\
+ {ERROR}\
val= (val<<6) + tmp;\
top <<= 5;\
}\
@@ -408,13 +492,13 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
* typically a goto statement.
*/
#define GET_UTF16(val, GET_16BIT, ERROR)\
- val = GET_16BIT;\
+ val = (GET_16BIT);\
{\
unsigned int hi = val - 0xD800;\
if (hi < 0x800) {\
- val = GET_16BIT - 0xDC00;\
+ val = (GET_16BIT) - 0xDC00;\
if (val > 0x3FFU || hi > 0x3FFU)\
- ERROR\
+ {ERROR}\
val += (hi<<10) + 0x10000;\
}\
}\
@@ -492,69 +576,3 @@ static av_always_inline av_const int av_parity_c(uint32_t v)
#endif /* HAVE_AV_CONFIG_H */
#endif /* AVUTIL_COMMON_H */
-
-/*
- * The following definitions are outside the multiple inclusion guard
- * to ensure they are immediately available in intmath.h.
- */
-
-#ifndef av_ceil_log2
-# define av_ceil_log2 av_ceil_log2_c
-#endif
-#ifndef av_clip
-# define av_clip av_clip_c
-#endif
-#ifndef av_clip64
-# define av_clip64 av_clip64_c
-#endif
-#ifndef av_clip_uint8
-# define av_clip_uint8 av_clip_uint8_c
-#endif
-#ifndef av_clip_int8
-# define av_clip_int8 av_clip_int8_c
-#endif
-#ifndef av_clip_uint16
-# define av_clip_uint16 av_clip_uint16_c
-#endif
-#ifndef av_clip_int16
-# define av_clip_int16 av_clip_int16_c
-#endif
-#ifndef av_clipl_int32
-# define av_clipl_int32 av_clipl_int32_c
-#endif
-#ifndef av_clip_intp2
-# define av_clip_intp2 av_clip_intp2_c
-#endif
-#ifndef av_clip_uintp2
-# define av_clip_uintp2 av_clip_uintp2_c
-#endif
-#ifndef av_mod_uintp2
-# define av_mod_uintp2 av_mod_uintp2_c
-#endif
-#ifndef av_sat_add32
-# define av_sat_add32 av_sat_add32_c
-#endif
-#ifndef av_sat_dadd32
-# define av_sat_dadd32 av_sat_dadd32_c
-#endif
-#ifndef av_sat_sub32
-# define av_sat_sub32 av_sat_sub32_c
-#endif
-#ifndef av_sat_dsub32
-# define av_sat_dsub32 av_sat_dsub32_c
-#endif
-#ifndef av_clipf
-# define av_clipf av_clipf_c
-#endif
-#ifndef av_clipd
-# define av_clipd av_clipd_c
-#endif
-#ifndef av_popcount
-# define av_popcount av_popcount_c
-#endif
-#ifndef av_popcount64
-# define av_popcount64 av_popcount64_c
-#endif
-#ifndef av_parity
-# define av_parity av_parity_c
-#endif
diff --git a/media/ffvpx/libavutil/cpu.c b/media/ffvpx/libavutil/cpu.c
index 2cf1a38297..4b881e5adc 100644
--- a/media/ffvpx/libavutil/cpu.c
+++ b/media/ffvpx/libavutil/cpu.c
@@ -16,6 +16,15 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+
+#if HAVE_SCHED_GETAFFINITY
+#ifndef _GNU_SOURCE
+# define _GNU_SOURCE
+#endif
+#include <sched.h>
+#endif
+
#include <stddef.h>
#include <stdint.h>
#include <stdatomic.h>
@@ -23,16 +32,9 @@
#include "attributes.h"
#include "cpu.h"
#include "cpu_internal.h"
-#include "config.h"
#include "opt.h"
#include "common.h"
-#if HAVE_SCHED_GETAFFINITY
-#ifndef _GNU_SOURCE
-# define _GNU_SOURCE
-#endif
-#include <sched.h>
-#endif
#if HAVE_GETPROCESSAFFINITYMASK || HAVE_WINRT
#include <windows.h>
#endif
@@ -48,41 +50,48 @@
#endif
static atomic_int cpu_flags = ATOMIC_VAR_INIT(-1);
+static atomic_int cpu_count = ATOMIC_VAR_INIT(-1);
static int get_cpu_flags(void)
{
- #if ARCH_AARCH64 == 1
- return ff_get_cpu_flags_aarch64();
- #elif ARCH_ARM == 1
- return ff_get_cpu_flags_arm();
- #elif ARCH_PPC == 1
- return ff_get_cpu_flags_ppc();
- #elif ARCH_X86 == 1
- return ff_get_cpu_flags_x86();
- #endif
+#if ARCH_MIPS == 1
+ return ff_get_cpu_flags_mips();
+#elif ARCH_AARCH64 == 1
+ return ff_get_cpu_flags_aarch64();
+#elif ARCH_ARM == 1
+ return ff_get_cpu_flags_arm();
+#elif ARCH_PPC == 1
+ return ff_get_cpu_flags_ppc();
+#elif ARCH_RISCV == 1
+ return ff_get_cpu_flags_riscv();
+#elif ARCH_X86 == 1
+ return ff_get_cpu_flags_x86();
+#elif ARCH_LOONGARCH == 1
+ return ff_get_cpu_flags_loongarch();
+#endif
return 0;
}
void av_force_cpu_flags(int arg){
#if ARCH_X86 == 1
- if ((arg & (AV_CPU_FLAG_3DNOW |
- AV_CPU_FLAG_3DNOWEXT |
- AV_CPU_FLAG_MMXEXT |
- AV_CPU_FLAG_SSE |
- AV_CPU_FLAG_SSE2 |
- AV_CPU_FLAG_SSE2SLOW |
- AV_CPU_FLAG_SSE3 |
- AV_CPU_FLAG_SSE3SLOW |
- AV_CPU_FLAG_SSSE3 |
- AV_CPU_FLAG_SSE4 |
- AV_CPU_FLAG_SSE42 |
- AV_CPU_FLAG_AVX |
- AV_CPU_FLAG_AVXSLOW |
- AV_CPU_FLAG_XOP |
- AV_CPU_FLAG_FMA3 |
- AV_CPU_FLAG_FMA4 |
- AV_CPU_FLAG_AVX2 |
- AV_CPU_FLAG_AVX512 ))
+ if ((arg & ( AV_CPU_FLAG_3DNOW |
+ AV_CPU_FLAG_3DNOWEXT |
+ AV_CPU_FLAG_MMXEXT |
+ AV_CPU_FLAG_SSE |
+ AV_CPU_FLAG_SSE2 |
+ AV_CPU_FLAG_SSE2SLOW |
+ AV_CPU_FLAG_SSE3 |
+ AV_CPU_FLAG_SSE3SLOW |
+ AV_CPU_FLAG_SSSE3 |
+ AV_CPU_FLAG_SSE4 |
+ AV_CPU_FLAG_SSE42 |
+ AV_CPU_FLAG_AVX |
+ AV_CPU_FLAG_AVXSLOW |
+ AV_CPU_FLAG_XOP |
+ AV_CPU_FLAG_FMA3 |
+ AV_CPU_FLAG_FMA4 |
+ AV_CPU_FLAG_AVX2 |
+ AV_CPU_FLAG_AVX512 ))
&& !(arg & AV_CPU_FLAG_MMX)) {
av_log(NULL, AV_LOG_WARNING, "MMX implied by specified flags\n");
arg |= AV_CPU_FLAG_MMX;
@@ -102,94 +111,6 @@ int av_get_cpu_flags(void)
return flags;
}
-void av_set_cpu_flags_mask(int mask)
-{
- atomic_store_explicit(&cpu_flags, get_cpu_flags() & mask,
- memory_order_relaxed);
-}
-
-int av_parse_cpu_flags(const char *s)
-{
-#define CPUFLAG_MMXEXT (AV_CPU_FLAG_MMX | AV_CPU_FLAG_MMXEXT | AV_CPU_FLAG_CMOV)
-#define CPUFLAG_3DNOW (AV_CPU_FLAG_3DNOW | AV_CPU_FLAG_MMX)
-#define CPUFLAG_3DNOWEXT (AV_CPU_FLAG_3DNOWEXT | CPUFLAG_3DNOW)
-#define CPUFLAG_SSE (AV_CPU_FLAG_SSE | CPUFLAG_MMXEXT)
-#define CPUFLAG_SSE2 (AV_CPU_FLAG_SSE2 | CPUFLAG_SSE)
-#define CPUFLAG_SSE2SLOW (AV_CPU_FLAG_SSE2SLOW | CPUFLAG_SSE2)
-#define CPUFLAG_SSE3 (AV_CPU_FLAG_SSE3 | CPUFLAG_SSE2)
-#define CPUFLAG_SSE3SLOW (AV_CPU_FLAG_SSE3SLOW | CPUFLAG_SSE3)
-#define CPUFLAG_SSSE3 (AV_CPU_FLAG_SSSE3 | CPUFLAG_SSE3)
-#define CPUFLAG_SSE4 (AV_CPU_FLAG_SSE4 | CPUFLAG_SSSE3)
-#define CPUFLAG_SSE42 (AV_CPU_FLAG_SSE42 | CPUFLAG_SSE4)
-#define CPUFLAG_AVX (AV_CPU_FLAG_AVX | CPUFLAG_SSE42)
-#define CPUFLAG_AVXSLOW (AV_CPU_FLAG_AVXSLOW | CPUFLAG_AVX)
-#define CPUFLAG_XOP (AV_CPU_FLAG_XOP | CPUFLAG_AVX)
-#define CPUFLAG_FMA3 (AV_CPU_FLAG_FMA3 | CPUFLAG_AVX)
-#define CPUFLAG_FMA4 (AV_CPU_FLAG_FMA4 | CPUFLAG_AVX)
-#define CPUFLAG_AVX2 (AV_CPU_FLAG_AVX2 | CPUFLAG_AVX)
-#define CPUFLAG_BMI2 (AV_CPU_FLAG_BMI2 | AV_CPU_FLAG_BMI1)
-#define CPUFLAG_AESNI (AV_CPU_FLAG_AESNI | CPUFLAG_SSE42)
-#define CPUFLAG_AVX512 (AV_CPU_FLAG_AVX512 | CPUFLAG_AVX2)
- static const AVOption cpuflags_opts[] = {
- { "flags" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
-#if ARCH_PPC
- { "altivec" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ALTIVEC }, .unit = "flags" },
-#elif ARCH_X86
- { "mmx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMX }, .unit = "flags" },
- { "mmxext" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_MMXEXT }, .unit = "flags" },
- { "sse" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE }, .unit = "flags" },
- { "sse2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2 }, .unit = "flags" },
- { "sse2slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE2SLOW }, .unit = "flags" },
- { "sse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3 }, .unit = "flags" },
- { "sse3slow", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE3SLOW }, .unit = "flags" },
- { "ssse3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSSE3 }, .unit = "flags" },
- { "atom" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ATOM }, .unit = "flags" },
- { "sse4.1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE4 }, .unit = "flags" },
- { "sse4.2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_SSE42 }, .unit = "flags" },
- { "avx" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX }, .unit = "flags" },
- { "avxslow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVXSLOW }, .unit = "flags" },
- { "xop" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_XOP }, .unit = "flags" },
- { "fma3" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA3 }, .unit = "flags" },
- { "fma4" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_FMA4 }, .unit = "flags" },
- { "avx2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX2 }, .unit = "flags" },
- { "bmi1" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_BMI1 }, .unit = "flags" },
- { "bmi2" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_BMI2 }, .unit = "flags" },
- { "3dnow" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOW }, .unit = "flags" },
- { "3dnowext", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_3DNOWEXT }, .unit = "flags" },
- { "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" },
- { "aesni" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AESNI }, .unit = "flags" },
- { "avx512" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CPUFLAG_AVX512 }, .unit = "flags" },
-#elif ARCH_ARM
- { "armv5te", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV5TE }, .unit = "flags" },
- { "armv6", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6 }, .unit = "flags" },
- { "armv6t2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV6T2 }, .unit = "flags" },
- { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
- { "vfp_vm", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP_VM }, .unit = "flags" },
- { "vfpv3", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFPV3 }, .unit = "flags" },
- { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
-#elif ARCH_AARCH64
- { "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
- { "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
- { "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
-#endif
- { NULL },
- };
- static const AVClass class = {
- .class_name = "cpuflags",
- .item_name = av_default_item_name,
- .option = cpuflags_opts,
- .version = LIBAVUTIL_VERSION_INT,
- };
-
- int flags = 0, ret;
- const AVClass *pclass = &class;
-
- if ((ret = av_opt_eval_flags(&pclass, &cpuflags_opts[0], s, &flags)) < 0)
- return ret;
-
- return flags & INT_MAX;
-}
-
int av_parse_cpu_caps(unsigned *flags, const char *s)
{
static const AVOption cpuflags_opts[] = {
@@ -222,6 +143,8 @@ int av_parse_cpu_caps(unsigned *flags, const char *s)
{ "cmov", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_CMOV }, .unit = "flags" },
{ "aesni", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AESNI }, .unit = "flags" },
{ "avx512" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512 }, .unit = "flags" },
+ { "avx512icl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_AVX512ICL }, .unit = "flags" },
+ { "slowgather", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_SLOW_GATHER }, .unit = "flags" },
#define CPU_FLAG_P2 AV_CPU_FLAG_CMOV | AV_CPU_FLAG_MMX
#define CPU_FLAG_P3 CPU_FLAG_P2 | AV_CPU_FLAG_MMX2 | AV_CPU_FLAG_SSE
@@ -252,6 +175,21 @@ int av_parse_cpu_caps(unsigned *flags, const char *s)
{ "armv8", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_ARMV8 }, .unit = "flags" },
{ "neon", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_NEON }, .unit = "flags" },
{ "vfp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_VFP }, .unit = "flags" },
+#elif ARCH_MIPS
+ { "mmi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MMI }, .unit = "flags" },
+ { "msa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_MSA }, .unit = "flags" },
+#elif ARCH_LOONGARCH
+ { "lsx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LSX }, .unit = "flags" },
+ { "lasx", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_LASX }, .unit = "flags" },
+#elif ARCH_RISCV
+ { "rvi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVI }, .unit = "flags" },
+ { "rvf", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVF }, .unit = "flags" },
+ { "rvd", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVD }, .unit = "flags" },
+ { "rvv-i32", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I32 }, .unit = "flags" },
+ { "rvv-f32", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F32 }, .unit = "flags" },
+ { "rvv-i64", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_I64 }, .unit = "flags" },
+ { "rvv", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVV_F64 }, .unit = "flags" },
+ { "rvb-basic",NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_CPU_FLAG_RVB_BASIC }, .unit = "flags" },
#endif
{ NULL },
};
@@ -268,9 +206,10 @@ int av_parse_cpu_caps(unsigned *flags, const char *s)
int av_cpu_count(void)
{
- static volatile int printed;
+ static atomic_int printed = ATOMIC_VAR_INIT(0);
int nb_cpus = 1;
+ int count = 0;
#if HAVE_WINRT
SYSTEM_INFO sysinfo;
#endif
@@ -285,6 +224,12 @@ int av_cpu_count(void)
DWORD_PTR proc_aff, sys_aff;
if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
nb_cpus = av_popcount64(proc_aff);
+#elif HAVE_SYSCTL && defined(HW_NCPUONLINE)
+ int mib[2] = { CTL_HW, HW_NCPUONLINE };
+ size_t len = sizeof(nb_cpus);
+
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
+ nb_cpus = 0;
#elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus);
@@ -300,25 +245,39 @@ int av_cpu_count(void)
nb_cpus = sysinfo.dwNumberOfProcessors;
#endif
- if (!printed) {
+ if (!atomic_exchange_explicit(&printed, 1, memory_order_relaxed))
av_log(NULL, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus);
- printed = 1;
+
+ count = atomic_load_explicit(&cpu_count, memory_order_relaxed);
+
+ if (count > 0) {
+ nb_cpus = count;
+ av_log(NULL, AV_LOG_DEBUG, "overriding to %d logical cores\n", nb_cpus);
}
return nb_cpus;
}
+void av_cpu_force_count(int count)
+{
+ atomic_store_explicit(&cpu_count, count, memory_order_relaxed);
+}
+
size_t av_cpu_max_align(void)
{
- #if ARCH_AARCH64 == 1
- return ff_get_cpu_max_align_aarch64();
- #elif ARCH_ARM == 1
- return ff_get_cpu_max_align_arm();
- #elif ARCH_PPC == 1
- return ff_get_cpu_max_align_ppc();
- #elif ARCH_X86 == 1
- return ff_get_cpu_max_align_x86();
- #endif
+#if ARCH_MIPS
+ return ff_get_cpu_max_align_mips();
+#elif ARCH_AARCH64
+ return ff_get_cpu_max_align_aarch64();
+#elif ARCH_ARM
+ return ff_get_cpu_max_align_arm();
+#elif ARCH_PPC
+ return ff_get_cpu_max_align_ppc();
+#elif ARCH_X86
+ return ff_get_cpu_max_align_x86();
+#elif ARCH_LOONGARCH
+ return ff_get_cpu_max_align_loongarch();
+#endif
return 8;
}
diff --git a/media/ffvpx/libavutil/cpu.h b/media/ffvpx/libavutil/cpu.h
index 8bb9eb606b..8fa5ea9199 100644
--- a/media/ffvpx/libavutil/cpu.h
+++ b/media/ffvpx/libavutil/cpu.h
@@ -23,8 +23,6 @@
#include <stddef.h>
-#include "attributes.h"
-
#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
/* lower 16 bits - CPU features */
@@ -56,6 +54,8 @@
#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
#define AV_CPU_FLAG_AVX512 0x100000 ///< AVX-512 functions: requires OS support even if YMM/ZMM registers aren't used
+#define AV_CPU_FLAG_AVX512ICL 0x200000 ///< F/CD/BW/DQ/VL/VNNI/IFMA/VBMI/VBMI2/VPOPCNTDQ/BITALG/GFNI/VAES/VPCLMULQDQ
+#define AV_CPU_FLAG_SLOW_GATHER 0x2000000 ///< CPU has slow gathers.
#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
@@ -71,6 +71,23 @@
#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations
#define AV_CPU_FLAG_SETEND (1 <<16)
+#define AV_CPU_FLAG_MMI (1 << 0)
+#define AV_CPU_FLAG_MSA (1 << 1)
+
+//Loongarch SIMD extension.
+#define AV_CPU_FLAG_LSX (1 << 0)
+#define AV_CPU_FLAG_LASX (1 << 1)
+
+// RISC-V extensions
+#define AV_CPU_FLAG_RVI (1 << 0) ///< I (full GPR bank)
+#define AV_CPU_FLAG_RVF (1 << 1) ///< F (single precision FP)
+#define AV_CPU_FLAG_RVD (1 << 2) ///< D (double precision FP)
+#define AV_CPU_FLAG_RVV_I32 (1 << 3) ///< Vectors of 8/16/32-bit int's */
+#define AV_CPU_FLAG_RVV_F32 (1 << 4) ///< Vectors of float's */
+#define AV_CPU_FLAG_RVV_I64 (1 << 5) ///< Vectors of 64-bit int's */
+#define AV_CPU_FLAG_RVV_F64 (1 << 6) ///< Vectors of double's
+#define AV_CPU_FLAG_RVB_BASIC (1 << 7) ///< Basic bit-manipulations
+
/**
* Return the flags which specify extensions supported by the CPU.
* The returned value is affected by av_force_cpu_flags() if that was used
@@ -86,25 +103,6 @@ int av_get_cpu_flags(void);
void av_force_cpu_flags(int flags);
/**
- * Set a mask on flags returned by av_get_cpu_flags().
- * This function is mainly useful for testing.
- * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
- */
-attribute_deprecated void av_set_cpu_flags_mask(int mask);
-
-/**
- * Parse CPU flags from a string.
- *
- * The returned flags contain the specified flags as well as related unspecified flags.
- *
- * This function exists only for compatibility with libav.
- * Please use av_parse_cpu_caps() when possible.
- * @return a combination of AV_CPU_* flags, negative on error.
- */
-attribute_deprecated
-int av_parse_cpu_flags(const char *s);
-
-/**
* Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
*
* @return negative on error.
@@ -117,6 +115,12 @@ int av_parse_cpu_caps(unsigned *flags, const char *s);
int av_cpu_count(void);
/**
+ * Overrides cpu count detection and forces the specified count.
+ * Count < 1 disables forcing of specific count.
+ */
+void av_cpu_force_count(int count);
+
+/**
* Get the maximum data alignment that may be required by FFmpeg.
*
* Note that this is affected by the build configuration and the CPU flags mask,
diff --git a/media/ffvpx/libavutil/cpu_internal.h b/media/ffvpx/libavutil/cpu_internal.h
index 37122d1c5f..634f28bac4 100644
--- a/media/ffvpx/libavutil/cpu_internal.h
+++ b/media/ffvpx/libavutil/cpu_internal.h
@@ -30,25 +30,33 @@
(HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext) && \
!((flags) & AV_CPU_FLAG_ ## slow_cpuext ## SLOW))
+#define CPUEXT_SUFFIX_SLOW(flags, suffix, cpuext) \
+ (HAVE_ ## cpuext ## suffix && \
+ ((flags) & (AV_CPU_FLAG_ ## cpuext | AV_CPU_FLAG_ ## cpuext ## SLOW)))
+
#define CPUEXT_SUFFIX_SLOW2(flags, suffix, cpuext, slow_cpuext) \
(HAVE_ ## cpuext ## suffix && ((flags) & AV_CPU_FLAG_ ## cpuext) && \
- ((flags) & AV_CPU_FLAG_ ## slow_cpuext ## SLOW))
+ ((flags) & (AV_CPU_FLAG_ ## slow_cpuext | AV_CPU_FLAG_ ## slow_cpuext ## SLOW)))
#define CPUEXT_SUFFIX_FAST(flags, suffix, cpuext) CPUEXT_SUFFIX_FAST2(flags, suffix, cpuext, cpuext)
-#define CPUEXT_SUFFIX_SLOW(flags, suffix, cpuext) CPUEXT_SUFFIX_SLOW2(flags, suffix, cpuext, cpuext)
#define CPUEXT(flags, cpuext) CPUEXT_SUFFIX(flags, , cpuext)
#define CPUEXT_FAST(flags, cpuext) CPUEXT_SUFFIX_FAST(flags, , cpuext)
#define CPUEXT_SLOW(flags, cpuext) CPUEXT_SUFFIX_SLOW(flags, , cpuext)
+int ff_get_cpu_flags_mips(void);
int ff_get_cpu_flags_aarch64(void);
int ff_get_cpu_flags_arm(void);
int ff_get_cpu_flags_ppc(void);
+int ff_get_cpu_flags_riscv(void);
int ff_get_cpu_flags_x86(void);
+int ff_get_cpu_flags_loongarch(void);
+size_t ff_get_cpu_max_align_mips(void);
size_t ff_get_cpu_max_align_aarch64(void);
size_t ff_get_cpu_max_align_arm(void);
size_t ff_get_cpu_max_align_ppc(void);
size_t ff_get_cpu_max_align_x86(void);
+size_t ff_get_cpu_max_align_loongarch(void);
#endif /* AVUTIL_CPU_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/crc.c b/media/ffvpx/libavutil/crc.c
index c45ea63a62..703b56f4e0 100644
--- a/media/ffvpx/libavutil/crc.c
+++ b/media/ffvpx/libavutil/crc.c
@@ -23,8 +23,8 @@
#include "thread.h"
#include "avassert.h"
#include "bswap.h"
-#include "common.h"
#include "crc.h"
+#include "error.h"
#if CONFIG_HARDCODED_TABLES
static const AVCRC av_crc_table[AV_CRC_MAX][257] = {
diff --git a/media/ffvpx/libavutil/crc.h b/media/ffvpx/libavutil/crc.h
index 47e22b4c78..7f59812a18 100644
--- a/media/ffvpx/libavutil/crc.h
+++ b/media/ffvpx/libavutil/crc.h
@@ -30,7 +30,6 @@
#include <stdint.h>
#include <stddef.h>
#include "attributes.h"
-#include "version.h"
/**
* @defgroup lavu_crc32 CRC
@@ -85,7 +84,10 @@ const AVCRC *av_crc_get_table(AVCRCId crc_id);
/**
* Calculate the CRC of a block.
+ * @param ctx initialized AVCRC array (see av_crc_init())
* @param crc CRC of previous blocks if any or initial value for CRC
+ * @param buffer buffer whose CRC to calculate
+ * @param length length of the buffer
* @return CRC updated with the data from the given block
*
* @see av_crc_init() "le" parameter
diff --git a/media/ffvpx/libavutil/dict.c b/media/ffvpx/libavutil/dict.c
index 0ea71386e5..f673977a98 100644
--- a/media/ffvpx/libavutil/dict.c
+++ b/media/ffvpx/libavutil/dict.c
@@ -20,8 +20,10 @@
#include <string.h>
+#include "avassert.h"
#include "avstring.h"
#include "dict.h"
+#include "dict_internal.h"
#include "internal.h"
#include "mem.h"
#include "time_internal.h"
@@ -37,21 +39,35 @@ int av_dict_count(const AVDictionary *m)
return m ? m->count : 0;
}
-AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
- const AVDictionaryEntry *prev, int flags)
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev)
{
- unsigned int i, j;
+ int i = 0;
if (!m)
return NULL;
if (prev)
i = prev - m->elems + 1;
- else
- i = 0;
- for (; i < m->count; i++) {
- const char *s = m->elems[i].key;
+ av_assert2(i >= 0);
+ if (i >= m->count)
+ return NULL;
+
+ return &m->elems[i];
+}
+
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags)
+{
+ const AVDictionaryEntry *entry = prev;
+ unsigned int j;
+
+ if (!key)
+ return NULL;
+
+ while ((entry = av_dict_iterate(m, entry))) {
+ const char *s = entry->key;
if (flags & AV_DICT_MATCH_CASE)
for (j = 0; s[j] == key[j] && key[j]; j++)
;
@@ -62,7 +78,7 @@ AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
continue;
if (s[j] && !(flags & AV_DICT_IGNORE_SUFFIX))
continue;
- return &m->elems[i];
+ return (AVDictionaryEntry *)entry;
}
return NULL;
}
@@ -72,8 +88,17 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value,
{
AVDictionary *m = *pm;
AVDictionaryEntry *tag = NULL;
- char *oldval = NULL, *copy_key = NULL, *copy_value = NULL;
+ char *copy_key = NULL, *copy_value = NULL;
+ int err;
+ if (flags & AV_DICT_DONT_STRDUP_VAL)
+ copy_value = (void *)value;
+ else if (value)
+ copy_value = av_strdup(value);
+ if (!key) {
+ err = AVERROR(EINVAL);
+ goto err_out;
+ }
if (!(flags & AV_DICT_MULTIKEY)) {
tag = av_dict_get(m, key, NULL, flags);
}
@@ -81,14 +106,10 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value,
copy_key = (void *)key;
else
copy_key = av_strdup(key);
- if (flags & AV_DICT_DONT_STRDUP_VAL)
- copy_value = (void *)value;
- else if (copy_key)
- copy_value = av_strdup(value);
if (!m)
m = *pm = av_mallocz(sizeof(*m));
- if (!m || (key && !copy_key) || (value && !copy_value))
- goto err_out;
+ if (!m || !copy_key || (value && !copy_value))
+ goto enomem;
if (tag) {
if (flags & AV_DICT_DONT_OVERWRITE) {
@@ -96,44 +117,43 @@ int av_dict_set(AVDictionary **pm, const char *key, const char *value,
av_free(copy_value);
return 0;
}
- if (flags & AV_DICT_APPEND)
- oldval = tag->value;
- else
+ if (copy_value && flags & AV_DICT_APPEND) {
+ size_t oldlen = strlen(tag->value);
+ size_t new_part_len = strlen(copy_value);
+ size_t len = oldlen + new_part_len + 1;
+ char *newval = av_realloc(tag->value, len);
+ if (!newval)
+ goto enomem;
+ memcpy(newval + oldlen, copy_value, new_part_len + 1);
+ av_freep(&copy_value);
+ copy_value = newval;
+ } else
av_free(tag->value);
av_free(tag->key);
*tag = m->elems[--m->count];
} else if (copy_value) {
- AVDictionaryEntry *tmp = av_realloc(m->elems,
- (m->count + 1) * sizeof(*m->elems));
+ AVDictionaryEntry *tmp = av_realloc_array(m->elems,
+ m->count + 1, sizeof(*m->elems));
if (!tmp)
- goto err_out;
+ goto enomem;
m->elems = tmp;
}
if (copy_value) {
m->elems[m->count].key = copy_key;
m->elems[m->count].value = copy_value;
- if (oldval && flags & AV_DICT_APPEND) {
- size_t len = strlen(oldval) + strlen(copy_value) + 1;
- char *newval = av_mallocz(len);
- if (!newval)
- goto err_out;
- av_strlcat(newval, oldval, len);
- av_freep(&oldval);
- av_strlcat(newval, copy_value, len);
- m->elems[m->count].value = newval;
- av_freep(&copy_value);
- }
m->count++;
} else {
+ if (!m->count) {
+ av_freep(&m->elems);
+ av_freep(pm);
+ }
av_freep(&copy_key);
}
- if (!m->count) {
- av_freep(&m->elems);
- av_freep(pm);
- }
return 0;
+enomem:
+ err = AVERROR(ENOMEM);
err_out:
if (m && !m->count) {
av_freep(&m->elems);
@@ -141,7 +161,7 @@ err_out:
}
av_free(copy_key);
av_free(copy_value);
- return AVERROR(ENOMEM);
+ return err;
}
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value,
@@ -216,9 +236,9 @@ void av_dict_free(AVDictionary **pm)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
- while ((t = av_dict_get(src, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(src, t))) {
int ret = av_dict_set(dst, t->key, t->value, flags);
if (ret < 0)
return ret;
@@ -230,7 +250,7 @@ int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
int av_dict_get_string(const AVDictionary *m, char **buffer,
const char key_val_sep, const char pairs_sep)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
AVBPrint bprint;
int cnt = 0;
char special_chars[] = {pairs_sep, key_val_sep, '\0'};
@@ -245,7 +265,7 @@ int av_dict_get_string(const AVDictionary *m, char **buffer,
}
av_bprint_init(&bprint, 64, AV_BPRINT_SIZE_UNLIMITED);
- while ((t = av_dict_get(m, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(m, t))) {
if (cnt++)
av_bprint_append_data(&bprint, &pairs_sep, 1);
av_bprint_escape(&bprint, t->key, special_chars, AV_ESCAPE_MODE_BACKSLASH, 0);
diff --git a/media/ffvpx/libavutil/dict.h b/media/ffvpx/libavutil/dict.h
index 118f1f00ed..713c9e361a 100644
--- a/media/ffvpx/libavutil/dict.h
+++ b/media/ffvpx/libavutil/dict.h
@@ -32,8 +32,6 @@
#include <stdint.h>
-#include "version.h"
-
/**
* @addtogroup lavu_dict AVDictionary
* @ingroup lavu_data
@@ -41,13 +39,15 @@
* @brief Simple key:value store
*
* @{
- * Dictionaries are used for storing key:value pairs. To create
- * an AVDictionary, simply pass an address of a NULL pointer to
- * av_dict_set(). NULL can be used as an empty dictionary wherever
- * a pointer to an AVDictionary is required.
- * Use av_dict_get() to retrieve an entry or iterate over all
- * entries and finally av_dict_free() to free the dictionary
- * and all its contents.
+ * Dictionaries are used for storing key-value pairs.
+ *
+ * - To **create an AVDictionary**, simply pass an address of a NULL
+ * pointer to av_dict_set(). NULL can be used as an empty dictionary
+ * wherever a pointer to an AVDictionary is required.
+ * - To **insert an entry**, use av_dict_set().
+ * - Use av_dict_get() to **retrieve an entry**.
+ * - To **iterate over all entries**, use av_dict_iterate().
+ * - In order to **free the dictionary and all its contents**, use av_dict_free().
*
@code
AVDictionary *d = NULL; // "create" an empty dictionary
@@ -59,13 +59,18 @@
char *v = av_strdup("value"); // you can avoid copying them like this
av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
- while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
- <....> // iterate over all entries in d
+ while ((t = av_dict_iterate(d, t))) {
+ <....> // iterate over all entries in d
}
av_dict_free(&d);
@endcode
*/
+/**
+ * @name AVDictionary Flags
+ * Flags that influence behavior of the matching of keys or insertion to the dictionary.
+ * @{
+ */
#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
@@ -73,10 +78,13 @@
allocated with av_malloc() or another memory allocation function. */
#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
allocated with av_malloc() or another memory allocation function. */
-#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_DONT_OVERWRITE 16 /**< Don't overwrite existing entries. */
#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
- delimiter is added, the strings are simply concatenated. */
+ delimiter is added, the strings are simply concatenated. */
#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */
+/**
+ * @}
+ */
typedef struct AVDictionaryEntry {
char *key;
@@ -91,19 +99,45 @@ typedef struct AVDictionary AVDictionary;
* The returned entry key or value must not be changed, or it will
* cause undefined behavior.
*
- * To iterate through all the dictionary entries, you can set the matching key
- * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key Matching key
+ * @param flags A collection of AV_DICT_* flags controlling how the
+ * entry is retrieved
*
- * @param prev Set to the previous matching element to find the next.
- * If set to NULL the first matching element is returned.
- * @param key matching key
- * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
- * @return found entry or NULL in case no matching entry was found in the dictionary
+ * @return Found entry or NULL in case no matching entry was found in the dictionary
*/
AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
const AVDictionaryEntry *prev, int flags);
/**
+ * Iterate over a dictionary
+ *
+ * Iterates through all entries in the dictionary.
+ *
+ * @warning The returned AVDictionaryEntry key/value must not be changed.
+ *
+ * @warning As av_dict_set() invalidates all previous entries returned
+ * by this function, it must not be called while iterating over the dict.
+ *
+ * Typical usage:
+ * @code
+ * const AVDictionaryEntry *e = NULL;
+ * while ((e = av_dict_iterate(m, e))) {
+ * // ...
+ * }
+ * @endcode
+ *
+ * @param m The dictionary to iterate over
+ * @param prev Pointer to the previous AVDictionaryEntry, NULL initially
+ *
+ * @retval AVDictionaryEntry* The next element in the dictionary
+ * @retval NULL No more elements in the dictionary
+ */
+const AVDictionaryEntry *av_dict_iterate(const AVDictionary *m,
+ const AVDictionaryEntry *prev);
+
+/**
* Get number of entries in dictionary.
*
* @param m dictionary
@@ -117,23 +151,24 @@ int av_dict_count(const AVDictionary *m);
* Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
* these arguments will be freed on error.
*
- * Warning: Adding a new entry to a dictionary invalidates all existing entries
- * previously returned with av_dict_get.
+ * @warning Adding a new entry to a dictionary invalidates all existing entries
+ * previously returned with av_dict_get() or av_dict_iterate().
*
- * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
- * a dictionary struct is allocated and put in *pm.
- * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
- * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
- * Passing a NULL value will cause an existing entry to be deleted.
- * @return >= 0 on success otherwise an error code <0
+ * @param pm Pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key Entry key to add to *pm (will either be av_strduped or added as a new key depending on flags)
+ * @param value Entry value to add to *pm (will be av_strduped or added as a new key depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ *
+ * @return >= 0 on success otherwise an error code <0
*/
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
/**
- * Convenience wrapper for av_dict_set that converts the value to a string
+ * Convenience wrapper for av_dict_set() that converts the value to a string
* and stores it.
*
- * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ * Note: If ::AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
*/
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
@@ -143,14 +178,15 @@ int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags
* In case of failure, all the successfully set entries are stored in
* *pm. You may need to manually free the created dictionary.
*
- * @param key_val_sep a 0-terminated list of characters used to separate
+ * @param key_val_sep A 0-terminated list of characters used to separate
* key from value
- * @param pairs_sep a 0-terminated list of characters used to separate
+ * @param pairs_sep A 0-terminated list of characters used to separate
* two pairs from each other
- * @param flags flags to use when adding to dictionary.
- * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ * @param flags Flags to use when adding to the dictionary.
+ * ::AV_DICT_DONT_STRDUP_KEY and ::AV_DICT_DONT_STRDUP_VAL
* are ignored since the key/value tokens will always
* be duplicated.
+ *
* @return 0 on success, negative AVERROR code on failure
*/
int av_dict_parse_string(AVDictionary **pm, const char *str,
@@ -159,11 +195,14 @@ int av_dict_parse_string(AVDictionary **pm, const char *str,
/**
* Copy entries from one AVDictionary struct into another.
- * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
- * this function will allocate a struct for you and put it in *dst
- * @param src pointer to source AVDictionary struct
- * @param flags flags to use when setting entries in *dst
- * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @note Metadata is read using the ::AV_DICT_IGNORE_SUFFIX flag
+ *
+ * @param dst Pointer to a pointer to a AVDictionary struct to copy into. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src Pointer to the source AVDictionary struct to copy items from.
+ * @param flags Flags to use when setting entries in *dst
+ *
* @return 0 on success, negative AVERROR code on failure. If dst was allocated
* by this function, callers should free the associated memory.
*/
@@ -182,13 +221,15 @@ void av_dict_free(AVDictionary **m);
* Such string may be passed back to av_dict_parse_string().
* @note String is escaped with backslashes ('\').
*
- * @param[in] m dictionary
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ *
+ * @param[in] m The dictionary
* @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
* Buffer must be freed by the caller when is no longer needed.
- * @param[in] key_val_sep character used to separate key from value
- * @param[in] pairs_sep character used to separate two pairs from each other
+ * @param[in] key_val_sep Character used to separate key from value
+ * @param[in] pairs_sep Character used to separate two pairs from each other
+ *
* @return >= 0 on success, negative on error
- * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
*/
int av_dict_get_string(const AVDictionary *m, char **buffer,
const char key_val_sep, const char pairs_sep);
diff --git a/media/ffvpx/libavutil/dict_internal.h b/media/ffvpx/libavutil/dict_internal.h
new file mode 100644
index 0000000000..6d5b0dc2b0
--- /dev/null
+++ b/media/ffvpx/libavutil/dict_internal.h
@@ -0,0 +1,37 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DICT_INTERNAL_H
+#define AVUTIL_DICT_INTERNAL_H
+
+#include <stdint.h>
+
+#include "dict.h"
+
+/**
+ * Set a dictionary value to an ISO-8601 compliant timestamp string.
+ *
+ * @param dict pointer to a pointer to a dictionary struct. If *dict is NULL
+ * a dictionary struct is allocated and put in *dict.
+ * @param key metadata key
+ * @param timestamp unix timestamp in microseconds
+ * @return <0 on error
+ */
+int avpriv_dict_set_timestamp(AVDictionary **dict, const char *key, int64_t timestamp);
+
+#endif /* AVUTIL_DICT_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/error.c b/media/ffvpx/libavutil/error.c
index b96304837b..938a8bc000 100644
--- a/media/ffvpx/libavutil/error.c
+++ b/media/ffvpx/libavutil/error.c
@@ -18,9 +18,12 @@
#undef _GNU_SOURCE
#define _XOPEN_SOURCE 600 /* XSI-compliant version of strerror_r */
-#include "avutil.h"
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
#include "avstring.h"
-#include "common.h"
+#include "error.h"
+#include "macros.h"
struct error_entry {
int num;
diff --git a/media/ffvpx/libavutil/error.h b/media/ffvpx/libavutil/error.h
index 71df4da353..0d3269aa6d 100644
--- a/media/ffvpx/libavutil/error.h
+++ b/media/ffvpx/libavutil/error.h
@@ -27,6 +27,8 @@
#include <errno.h>
#include <stddef.h>
+#include "macros.h"
+
/**
* @addtogroup lavu_error
*
diff --git a/media/ffvpx/libavutil/eval.c b/media/ffvpx/libavutil/eval.c
index 046a45090a..17381256fb 100644
--- a/media/ffvpx/libavutil/eval.c
+++ b/media/ffvpx/libavutil/eval.c
@@ -163,10 +163,11 @@ struct AVExpr {
e_last, e_st, e_while, e_taylor, e_root, e_floor, e_ceil, e_trunc, e_round,
e_sqrt, e_not, e_random, e_hypot, e_gcd,
e_if, e_ifnot, e_print, e_bitand, e_bitor, e_between, e_clip, e_atan2, e_lerp,
+ e_sgn,
} type;
double value; // is sign in other types
+ int const_index;
union {
- int const_index;
double (*func0)(double);
double (*func1)(void *, double);
double (*func2)(void *, double, double);
@@ -184,7 +185,7 @@ static double eval_expr(Parser *p, AVExpr *e)
{
switch (e->type) {
case e_value: return e->value;
- case e_const: return e->value * p->const_values[e->a.const_index];
+ case e_const: return e->value * p->const_values[e->const_index];
case e_func0: return e->value * e->a.func0(eval_expr(p, e->param[0]));
case e_func1: return e->value * e->a.func1(p->opaque, eval_expr(p, e->param[0]));
case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1]));
@@ -197,6 +198,7 @@ static double eval_expr(Parser *p, AVExpr *e)
case e_ceil : return e->value * ceil (eval_expr(p, e->param[0]));
case e_trunc: return e->value * trunc(eval_expr(p, e->param[0]));
case e_round: return e->value * round(eval_expr(p, e->param[0]));
+ case e_sgn: return e->value * FFDIFFSIGN(eval_expr(p, e->param[0]), 0);
case e_sqrt: return e->value * sqrt (eval_expr(p, e->param[0]));
case e_not: return e->value * (eval_expr(p, e->param[0]) == 0);
case e_if: return e->value * (eval_expr(p, e->param[0]) ? eval_expr(p, e->param[1]) :
@@ -315,7 +317,7 @@ static double eval_expr(Parser *p, AVExpr *e)
case e_lte: return e->value * (d <= d2 ? 1.0 : 0.0);
case e_pow: return e->value * pow(d, d2);
case e_mul: return e->value * (d * d2);
- case e_div: return e->value * ((!CONFIG_FTRAPV || d2 ) ? (d / d2) : d * INFINITY);
+ case e_div: return e->value * (d2 ? (d / d2) : d * INFINITY);
case e_add: return e->value * (d + d2);
case e_last:return e->value * d2;
case e_st : return e->value * (p->var[av_clip(d, 0, VARS-1)]= d2);
@@ -365,7 +367,7 @@ static int parse_primary(AVExpr **e, Parser *p)
if (strmatch(p->s, p->const_names[i])) {
p->s+= strlen(p->const_names[i]);
d->type = e_const;
- d->a.const_index = i;
+ d->const_index = i;
*e = d;
return 0;
}
@@ -470,11 +472,13 @@ static int parse_primary(AVExpr **e, Parser *p)
else if (strmatch(next, "clip" )) d->type = e_clip;
else if (strmatch(next, "atan2" )) d->type = e_atan2;
else if (strmatch(next, "lerp" )) d->type = e_lerp;
+ else if (strmatch(next, "sgn" )) d->type = e_sgn;
else {
for (i=0; p->func1_names && p->func1_names[i]; i++) {
if (strmatch(next, p->func1_names[i])) {
d->a.func1 = p->funcs1[i];
d->type = e_func1;
+ d->const_index = i;
*e = d;
return 0;
}
@@ -484,6 +488,7 @@ static int parse_primary(AVExpr **e, Parser *p)
if (strmatch(next, p->func2_names[i])) {
d->a.func2 = p->funcs2[i];
d->type = e_func2;
+ d->const_index = i;
*e = d;
return 0;
}
@@ -657,6 +662,7 @@ static int verify_expr(AVExpr *e)
case e_sqrt:
case e_not:
case e_random:
+ case e_sgn:
return verify_expr(e->param[0]) && !e->param[1];
case e_print:
return verify_expr(e->param[0])
@@ -731,6 +737,32 @@ end:
return ret;
}
+static int expr_count(AVExpr *e, unsigned *counter, int size, int type)
+{
+ int i;
+
+ if (!e || !counter || !size)
+ return AVERROR(EINVAL);
+
+ for (i = 0; e->type != type && i < 3 && e->param[i]; i++)
+ expr_count(e->param[i], counter, size, type);
+
+ if (e->type == type && e->const_index < size)
+ counter[e->const_index]++;
+
+ return 0;
+}
+
+int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
+{
+ return expr_count(e, counter, size, e_const);
+}
+
+int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg)
+{
+ return expr_count(e, counter, size, ((int[]){e_const, e_func1, e_func2})[arg]);
+}
+
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
{
Parser p = { 0 };
diff --git a/media/ffvpx/libavutil/eval.h b/media/ffvpx/libavutil/eval.h
index dacd22b96e..ee8cffb057 100644
--- a/media/ffvpx/libavutil/eval.h
+++ b/media/ffvpx/libavutil/eval.h
@@ -26,8 +26,6 @@
#ifndef AVUTIL_EVAL_H
#define AVUTIL_EVAL_H
-#include "avutil.h"
-
typedef struct AVExpr AVExpr;
/**
@@ -44,6 +42,7 @@ typedef struct AVExpr AVExpr;
* @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
* @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
* @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_offset log level offset, can be used to silence error messages
* @param log_ctx parent logging context
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code otherwise
@@ -67,6 +66,7 @@ int av_expr_parse_and_eval(double *res, const char *s,
* @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
* @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
* @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_offset log level offset, can be used to silence error messages
* @param log_ctx parent logging context
* @return >= 0 in case of success, a negative value corresponding to an
* AVERROR code otherwise
@@ -80,6 +80,7 @@ int av_expr_parse(AVExpr **expr, const char *s,
/**
* Evaluate a previously parsed expression.
*
+ * @param e the AVExpr to evaluate
* @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
* @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
* @return the value of the expression
@@ -87,6 +88,32 @@ int av_expr_parse(AVExpr **expr, const char *s,
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
/**
+ * Track the presence of variables and their number of occurrences in a parsed expression
+ *
+ * @param e the AVExpr to track variables in
+ * @param counter a zero-initialized array where the count of each variable will be stored
+ * @param size size of array
+ * @return 0 on success, a negative value indicates that no expression or array was passed
+ * or size was zero
+ */
+int av_expr_count_vars(AVExpr *e, unsigned *counter, int size);
+
+/**
+ * Track the presence of user provided functions and their number of occurrences
+ * in a parsed expression.
+ *
+ * @param e the AVExpr to track user provided functions in
+ * @param counter a zero-initialized array where the count of each function will be stored
+ * if you passed 5 functions with 2 arguments to av_expr_parse()
+ * then for arg=2 this will use upto 5 entries.
+ * @param size size of array
+ * @param arg number of arguments the counted functions have
+ * @return 0 on success, a negative value indicates that no expression or array was passed
+ * or size was zero
+ */
+int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg);
+
+/**
* Free a parsed expression previously created with av_expr_parse().
*/
void av_expr_free(AVExpr *e);
diff --git a/media/ffvpx/libavutil/fftime.h b/media/ffvpx/libavutil/fftime.h
index dc169b064a..8f3b320e38 100644
--- a/media/ffvpx/libavutil/fftime.h
+++ b/media/ffvpx/libavutil/fftime.h
@@ -22,6 +22,7 @@
#define AVUTIL_TIME_H
#include <stdint.h>
+#include <time.h>
/**
* Get the current time in microseconds.
diff --git a/media/ffvpx/libavutil/fifo.c b/media/ffvpx/libavutil/fifo.c
index 1060aedf13..51a5af6f39 100644
--- a/media/ffvpx/libavutil/fifo.c
+++ b/media/ffvpx/libavutil/fifo.c
@@ -20,13 +20,291 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <stdint.h>
+#include <string.h>
+
#include "avassert.h"
-#include "common.h"
+#include "error.h"
#include "fifo.h"
+#include "macros.h"
+#include "mem.h"
+
+// by default the FIFO can be auto-grown to 1MB
+#define AUTO_GROW_DEFAULT_BYTES (1024 * 1024)
+
+struct AVFifo {
+ uint8_t *buffer;
+
+ size_t elem_size, nb_elems;
+ size_t offset_r, offset_w;
+ // distinguishes the ambiguous situation offset_r == offset_w
+ int is_empty;
+
+ unsigned int flags;
+ size_t auto_grow_limit;
+};
+
+AVFifo *av_fifo_alloc2(size_t nb_elems, size_t elem_size,
+ unsigned int flags)
+{
+ AVFifo *f;
+ void *buffer = NULL;
+
+ if (!elem_size)
+ return NULL;
+
+ if (nb_elems) {
+ buffer = av_realloc_array(NULL, nb_elems, elem_size);
+ if (!buffer)
+ return NULL;
+ }
+ f = av_mallocz(sizeof(*f));
+ if (!f) {
+ av_free(buffer);
+ return NULL;
+ }
+ f->buffer = buffer;
+ f->nb_elems = nb_elems;
+ f->elem_size = elem_size;
+ f->is_empty = 1;
+
+ f->flags = flags;
+ f->auto_grow_limit = FFMAX(AUTO_GROW_DEFAULT_BYTES / elem_size, 1);
+
+ return f;
+}
+
+void av_fifo_auto_grow_limit(AVFifo *f, size_t max_elems)
+{
+ f->auto_grow_limit = max_elems;
+}
+
+size_t av_fifo_elem_size(const AVFifo *f)
+{
+ return f->elem_size;
+}
+
+size_t av_fifo_can_read(const AVFifo *f)
+{
+ if (f->offset_w <= f->offset_r && !f->is_empty)
+ return f->nb_elems - f->offset_r + f->offset_w;
+ return f->offset_w - f->offset_r;
+}
+
+size_t av_fifo_can_write(const AVFifo *f)
+{
+ return f->nb_elems - av_fifo_can_read(f);
+}
+
+int av_fifo_grow2(AVFifo *f, size_t inc)
+{
+ uint8_t *tmp;
+
+ if (inc > SIZE_MAX - f->nb_elems)
+ return AVERROR(EINVAL);
+
+ tmp = av_realloc_array(f->buffer, f->nb_elems + inc, f->elem_size);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+ f->buffer = tmp;
+
+ // move the data from the beginning of the ring buffer
+ // to the newly allocated space
+ if (f->offset_w <= f->offset_r && !f->is_empty) {
+ const size_t copy = FFMIN(inc, f->offset_w);
+ memcpy(tmp + f->nb_elems * f->elem_size, tmp, copy * f->elem_size);
+ if (copy < f->offset_w) {
+ memmove(tmp, tmp + copy * f->elem_size,
+ (f->offset_w - copy) * f->elem_size);
+ f->offset_w -= copy;
+ } else
+ f->offset_w = copy == inc ? 0 : f->nb_elems + copy;
+ }
+
+ f->nb_elems += inc;
+
+ return 0;
+}
+
+static int fifo_check_space(AVFifo *f, size_t to_write)
+{
+ const size_t can_write = av_fifo_can_write(f);
+ const size_t need_grow = to_write > can_write ? to_write - can_write : 0;
+ size_t can_grow;
+
+ if (!need_grow)
+ return 0;
+
+ can_grow = f->auto_grow_limit > f->nb_elems ?
+ f->auto_grow_limit - f->nb_elems : 0;
+ if ((f->flags & AV_FIFO_FLAG_AUTO_GROW) && need_grow <= can_grow) {
+ // allocate a bit more than necessary, if we can
+ const size_t inc = (need_grow < can_grow / 2 ) ? need_grow * 2 : can_grow;
+ return av_fifo_grow2(f, inc);
+ }
+
+ return AVERROR(ENOSPC);
+}
+
+static int fifo_write_common(AVFifo *f, const uint8_t *buf, size_t *nb_elems,
+ AVFifoCB read_cb, void *opaque)
+{
+ size_t to_write = *nb_elems;
+ size_t offset_w;
+ int ret = 0;
+
+ ret = fifo_check_space(f, to_write);
+ if (ret < 0)
+ return ret;
+
+ offset_w = f->offset_w;
+
+ while (to_write > 0) {
+ size_t len = FFMIN(f->nb_elems - offset_w, to_write);
+ uint8_t *wptr = f->buffer + offset_w * f->elem_size;
+
+ if (read_cb) {
+ ret = read_cb(opaque, wptr, &len);
+ if (ret < 0 || len == 0)
+ break;
+ } else {
+ memcpy(wptr, buf, len * f->elem_size);
+ buf += len * f->elem_size;
+ }
+ offset_w += len;
+ if (offset_w >= f->nb_elems)
+ offset_w = 0;
+ to_write -= len;
+ }
+ f->offset_w = offset_w;
+
+ if (*nb_elems != to_write)
+ f->is_empty = 0;
+ *nb_elems -= to_write;
+
+ return ret;
+}
+
+int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
+{
+ return fifo_write_common(f, buf, &nb_elems, NULL, NULL);
+}
+
+int av_fifo_write_from_cb(AVFifo *f, AVFifoCB read_cb,
+ void *opaque, size_t *nb_elems)
+{
+ return fifo_write_common(f, NULL, nb_elems, read_cb, opaque);
+}
+
+static int fifo_peek_common(const AVFifo *f, uint8_t *buf, size_t *nb_elems,
+ size_t offset, AVFifoCB write_cb, void *opaque)
+{
+ size_t to_read = *nb_elems;
+ size_t offset_r = f->offset_r;
+ size_t can_read = av_fifo_can_read(f);
+ int ret = 0;
+
+ if (offset > can_read || to_read > can_read - offset) {
+ *nb_elems = 0;
+ return AVERROR(EINVAL);
+ }
+
+ if (offset_r >= f->nb_elems - offset)
+ offset_r -= f->nb_elems - offset;
+ else
+ offset_r += offset;
+
+ while (to_read > 0) {
+ size_t len = FFMIN(f->nb_elems - offset_r, to_read);
+ uint8_t *rptr = f->buffer + offset_r * f->elem_size;
+
+ if (write_cb) {
+ ret = write_cb(opaque, rptr, &len);
+ if (ret < 0 || len == 0)
+ break;
+ } else {
+ memcpy(buf, rptr, len * f->elem_size);
+ buf += len * f->elem_size;
+ }
+ offset_r += len;
+ if (offset_r >= f->nb_elems)
+ offset_r = 0;
+ to_read -= len;
+ }
+
+ *nb_elems -= to_read;
+
+ return ret;
+}
+
+int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
+{
+ int ret = fifo_peek_common(f, buf, &nb_elems, 0, NULL, NULL);
+ av_fifo_drain2(f, nb_elems);
+ return ret;
+}
+
+int av_fifo_read_to_cb(AVFifo *f, AVFifoCB write_cb,
+ void *opaque, size_t *nb_elems)
+{
+ int ret = fifo_peek_common(f, NULL, nb_elems, 0, write_cb, opaque);
+ av_fifo_drain2(f, *nb_elems);
+ return ret;
+}
+
+int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset)
+{
+ return fifo_peek_common(f, buf, &nb_elems, offset, NULL, NULL);
+}
+
+int av_fifo_peek_to_cb(AVFifo *f, AVFifoCB write_cb, void *opaque,
+ size_t *nb_elems, size_t offset)
+{
+ return fifo_peek_common(f, NULL, nb_elems, offset, write_cb, opaque);
+}
+
+void av_fifo_drain2(AVFifo *f, size_t size)
+{
+ const size_t cur_size = av_fifo_can_read(f);
+
+ av_assert0(cur_size >= size);
+ if (cur_size == size)
+ f->is_empty = 1;
+
+ if (f->offset_r >= f->nb_elems - size)
+ f->offset_r -= f->nb_elems - size;
+ else
+ f->offset_r += size;
+}
+
+void av_fifo_reset2(AVFifo *f)
+{
+ f->offset_r = f->offset_w = 0;
+ f->is_empty = 1;
+}
+
+void av_fifo_freep2(AVFifo **f)
+{
+ if (*f) {
+ av_freep(&(*f)->buffer);
+ av_freep(f);
+ }
+}
+
+
+#if FF_API_FIFO_OLD_API
+FF_DISABLE_DEPRECATION_WARNINGS
+#define OLD_FIFO_SIZE_MAX (size_t)FFMIN3(INT_MAX, UINT32_MAX, SIZE_MAX)
-static AVFifoBuffer *fifo_alloc_common(void *buffer, size_t size)
+AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size)
{
AVFifoBuffer *f;
+ void *buffer;
+
+ if (nmemb > OLD_FIFO_SIZE_MAX / size)
+ return NULL;
+
+ buffer = av_realloc_array(NULL, nmemb, size);
if (!buffer)
return NULL;
f = av_mallocz(sizeof(AVFifoBuffer));
@@ -35,21 +313,14 @@ static AVFifoBuffer *fifo_alloc_common(void *buffer, size_t size)
return NULL;
}
f->buffer = buffer;
- f->end = f->buffer + size;
+ f->end = f->buffer + nmemb * size;
av_fifo_reset(f);
return f;
}
AVFifoBuffer *av_fifo_alloc(unsigned int size)
{
- void *buffer = av_malloc(size);
- return fifo_alloc_common(buffer, size);
-}
-
-AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size)
-{
- void *buffer = av_malloc_array(nmemb, size);
- return fifo_alloc_common(buffer, nmemb * size);
+ return av_fifo_alloc_array(size, 1);
}
void av_fifo_free(AVFifoBuffer *f)
@@ -88,18 +359,35 @@ int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
{
unsigned int old_size = f->end - f->buffer;
+ if (new_size > OLD_FIFO_SIZE_MAX)
+ return AVERROR(EINVAL);
+
if (old_size < new_size) {
- int len = av_fifo_size(f);
- AVFifoBuffer *f2 = av_fifo_alloc(new_size);
+ size_t offset_r = f->rptr - f->buffer;
+ size_t offset_w = f->wptr - f->buffer;
+ uint8_t *tmp;
- if (!f2)
+ tmp = av_realloc(f->buffer, new_size);
+ if (!tmp)
return AVERROR(ENOMEM);
- av_fifo_generic_read(f, f2->buffer, len, NULL);
- f2->wptr += len;
- f2->wndx += len;
- av_free(f->buffer);
- *f = *f2;
- av_free(f2);
+
+ // move the data from the beginning of the ring buffer
+ // to the newly allocated space
+ // the second condition distinguishes full vs empty fifo
+ if (offset_w <= offset_r && av_fifo_size(f)) {
+ const size_t copy = FFMIN(new_size - old_size, offset_w);
+ memcpy(tmp + old_size, tmp, copy);
+ if (copy < offset_w) {
+ memmove(tmp, tmp + copy , offset_w - copy);
+ offset_w -= copy;
+ } else
+ offset_w = old_size + copy;
+ }
+
+ f->buffer = tmp;
+ f->end = f->buffer + new_size;
+ f->rptr = f->buffer + offset_r;
+ f->wptr = f->buffer + offset_w;
}
return 0;
}
@@ -126,6 +414,9 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size,
uint32_t wndx= f->wndx;
uint8_t *wptr= f->wptr;
+ if (size > av_fifo_space(f))
+ return AVERROR(ENOSPC);
+
do {
int len = FFMIN(f->end - wptr, size);
if (func) {
@@ -136,7 +427,6 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size,
memcpy(wptr, src, len);
src = (uint8_t *)src + len;
}
-// Write memory barrier needed for SMP here in theory
wptr += len;
if (wptr >= f->end)
wptr = f->buffer;
@@ -152,13 +442,8 @@ int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_siz
{
uint8_t *rptr = f->rptr;
- av_assert2(offset >= 0);
-
- /*
- * *ndx are indexes modulo 2^32, they are intended to overflow,
- * to handle *ndx greater than 4gb.
- */
- av_assert2(buf_size + (unsigned)offset <= f->wndx - f->rndx);
+ if (offset < 0 || buf_size > av_fifo_size(f) - offset)
+ return AVERROR(EINVAL);
if (offset >= f->end - rptr)
rptr += offset - (f->end - f->buffer);
@@ -189,31 +474,15 @@ int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_siz
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size,
void (*func)(void *, void *, int))
{
-// Read memory barrier needed for SMP here in theory
- uint8_t *rptr = f->rptr;
-
- do {
- int len = FFMIN(f->end - rptr, buf_size);
- if (func)
- func(dest, rptr, len);
- else {
- memcpy(dest, rptr, len);
- dest = (uint8_t *)dest + len;
- }
-// memory barrier needed for SMP here in theory
- rptr += len;
- if (rptr >= f->end)
- rptr -= f->end - f->buffer;
- buf_size -= len;
- } while (buf_size > 0);
-
- return 0;
+ return av_fifo_generic_peek_at(f, dest, 0, buf_size, func);
}
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size,
void (*func)(void *, void *, int))
{
-// Read memory barrier needed for SMP here in theory
+ if (buf_size > av_fifo_size(f))
+ return AVERROR(EINVAL);
+
do {
int len = FFMIN(f->end - f->rptr, buf_size);
if (func)
@@ -222,7 +491,6 @@ int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size,
memcpy(dest, f->rptr, len);
dest = (uint8_t *)dest + len;
}
-// memory barrier needed for SMP here in theory
av_fifo_drain(f, len);
buf_size -= len;
} while (buf_size > 0);
@@ -238,3 +506,5 @@ void av_fifo_drain(AVFifoBuffer *f, int size)
f->rptr -= f->end - f->buffer;
f->rndx += size;
}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
diff --git a/media/ffvpx/libavutil/fifo.h b/media/ffvpx/libavutil/fifo.h
index dc7bc6f0dd..70f9376d97 100644
--- a/media/ffvpx/libavutil/fifo.h
+++ b/media/ffvpx/libavutil/fifo.h
@@ -18,16 +18,229 @@
/**
* @file
- * a very simple circular buffer FIFO implementation
+ * @ingroup lavu_fifo
+ * A generic FIFO API
*/
#ifndef AVUTIL_FIFO_H
#define AVUTIL_FIFO_H
+#include <stddef.h>
#include <stdint.h>
-#include "avutil.h"
+
#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_fifo AVFifo
+ * @ingroup lavu_data
+ *
+ * @{
+ * A generic FIFO API
+ */
+
+typedef struct AVFifo AVFifo;
+
+/**
+ * Callback for writing or reading from a FIFO, passed to (and invoked from) the
+ * av_fifo_*_cb() functions. It may be invoked multiple times from a single
+ * av_fifo_*_cb() call and may process less data than the maximum size indicated
+ * by nb_elems.
+ *
+ * @param opaque the opaque pointer provided to the av_fifo_*_cb() function
+ * @param buf the buffer for reading or writing the data, depending on which
+ * av_fifo_*_cb function is called
+ * @param nb_elems On entry contains the maximum number of elements that can be
+ * read from / written into buf. On success, the callback should
+ * update it to contain the number of elements actually written.
+ *
+ * @return 0 on success, a negative error code on failure (will be returned from
+ * the invoking av_fifo_*_cb() function)
+ */
+typedef int AVFifoCB(void *opaque, void *buf, size_t *nb_elems);
+
+/**
+ * Automatically resize the FIFO on writes, so that the data fits. This
+ * automatic resizing happens up to a limit that can be modified with
+ * av_fifo_auto_grow_limit().
+ */
+#define AV_FIFO_FLAG_AUTO_GROW (1 << 0)
+
+/**
+ * Allocate and initialize an AVFifo with a given element size.
+ *
+ * @param elems initial number of elements that can be stored in the FIFO
+ * @param elem_size Size in bytes of a single element. Further operations on
+ * the returned FIFO will implicitly use this element size.
+ * @param flags a combination of AV_FIFO_FLAG_*
+ *
+ * @return newly-allocated AVFifo on success, a negative error code on failure
+ */
+AVFifo *av_fifo_alloc2(size_t elems, size_t elem_size,
+ unsigned int flags);
+
+/**
+ * @return Element size for FIFO operations. This element size is set at
+ * FIFO allocation and remains constant during its lifetime
+ */
+size_t av_fifo_elem_size(const AVFifo *f);
+
+/**
+ * Set the maximum size (in elements) to which the FIFO can be resized
+ * automatically. Has no effect unless AV_FIFO_FLAG_AUTO_GROW is used.
+ */
+void av_fifo_auto_grow_limit(AVFifo *f, size_t max_elems);
+
+/**
+ * @return number of elements available for reading from the given FIFO.
+ */
+size_t av_fifo_can_read(const AVFifo *f);
+
+/**
+ * @return Number of elements that can be written into the given FIFO without
+ * growing it.
+ *
+ * In other words, this number of elements or less is guaranteed to fit
+ * into the FIFO. More data may be written when the
+ * AV_FIFO_FLAG_AUTO_GROW flag was specified at FIFO creation, but this
+ * may involve memory allocation, which can fail.
+ */
+size_t av_fifo_can_write(const AVFifo *f);
+
+/**
+ * Enlarge an AVFifo.
+ *
+ * On success, the FIFO will be large enough to hold exactly
+ * inc + av_fifo_can_read() + av_fifo_can_write()
+ * elements. In case of failure, the old FIFO is kept unchanged.
+ *
+ * @param f AVFifo to resize
+ * @param inc number of elements to allocate for, in addition to the current
+ * allocated size
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_grow2(AVFifo *f, size_t inc);
+
+/**
+ * Write data into a FIFO.
+ *
+ * In case nb_elems > av_fifo_can_write(f) and the AV_FIFO_FLAG_AUTO_GROW flag
+ * was not specified at FIFO creation, nothing is written and an error
+ * is returned.
+ *
+ * Calling function is guaranteed to succeed if nb_elems <= av_fifo_can_write(f).
+ *
+ * @param f the FIFO buffer
+ * @param buf Data to be written. nb_elems * av_fifo_elem_size(f) bytes will be
+ * read from buf on success.
+ * @param nb_elems number of elements to write into FIFO
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems);
+
+/**
+ * Write data from a user-provided callback into a FIFO.
+ *
+ * @param f the FIFO buffer
+ * @param read_cb Callback supplying the data to the FIFO. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to read_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * written. Will be updated to contain the number of elements
+ * actually written.
+ *
+ * @return non-negative number on success, a negative error code on failure
+ */
+int av_fifo_write_from_cb(AVFifo *f, AVFifoCB read_cb,
+ void *opaque, size_t *nb_elems);
+
+/**
+ * Read data from a FIFO.
+ *
+ * In case nb_elems > av_fifo_can_read(f), nothing is read and an error
+ * is returned.
+ *
+ * @param f the FIFO buffer
+ * @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
+ * will be written into buf on success.
+ * @param nb_elems number of elements to read from FIFO
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems);
+
+/**
+ * Feed data from a FIFO into a user-provided callback.
+ *
+ * @param f the FIFO buffer
+ * @param write_cb Callback the data will be supplied to. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to write_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * read. Will be updated to contain the total number of elements
+ * actually sent to the callback.
+ *
+ * @return non-negative number on success, a negative error code on failure
+ */
+int av_fifo_read_to_cb(AVFifo *f, AVFifoCB write_cb,
+ void *opaque, size_t *nb_elems);
+
+/**
+ * Read data from a FIFO without modifying FIFO state.
+ *
+ * Returns an error if an attempt is made to peek to nonexistent elements
+ * (i.e. if offset + nb_elems is larger than av_fifo_can_read(f)).
+ *
+ * @param f the FIFO buffer
+ * @param buf Buffer to store the data. nb_elems * av_fifo_elem_size(f) bytes
+ * will be written into buf.
+ * @param nb_elems number of elements to read from FIFO
+ * @param offset number of initial elements to skip.
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset);
+
+/**
+ * Feed data from a FIFO into a user-provided callback.
+ *
+ * @param f the FIFO buffer
+ * @param write_cb Callback the data will be supplied to. May be called
+ * multiple times.
+ * @param opaque opaque user data to be provided to write_cb
+ * @param nb_elems Should point to the maximum number of elements that can be
+ * read. Will be updated to contain the total number of elements
+ * actually sent to the callback.
+ * @param offset number of initial elements to skip; offset + *nb_elems must not
+ * be larger than av_fifo_can_read(f).
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ */
+int av_fifo_peek_to_cb(AVFifo *f, AVFifoCB write_cb, void *opaque,
+ size_t *nb_elems, size_t offset);
+/**
+ * Discard the specified amount of data from an AVFifo.
+ * @param size number of elements to discard, MUST NOT be larger than
+ * av_fifo_can_read(f)
+ */
+void av_fifo_drain2(AVFifo *f, size_t size);
+
+/*
+ * Empty the AVFifo.
+ * @param f AVFifo to reset
+ */
+void av_fifo_reset2(AVFifo *f);
+
+/**
+ * Free an AVFifo and reset pointer to NULL.
+ * @param f Pointer to an AVFifo to free. *f == NULL is allowed.
+ */
+void av_fifo_freep2(AVFifo **f);
+
+
+#if FF_API_FIFO_OLD_API
typedef struct AVFifoBuffer {
uint8_t *buffer;
uint8_t *rptr, *wptr, *end;
@@ -38,7 +251,9 @@ typedef struct AVFifoBuffer {
* Initialize an AVFifoBuffer.
* @param size of FIFO
* @return AVFifoBuffer or NULL in case of memory allocation failure
+ * @deprecated use av_fifo_alloc2()
*/
+attribute_deprecated
AVFifoBuffer *av_fifo_alloc(unsigned int size);
/**
@@ -46,25 +261,33 @@ AVFifoBuffer *av_fifo_alloc(unsigned int size);
* @param nmemb number of elements
* @param size size of the single element
* @return AVFifoBuffer or NULL in case of memory allocation failure
+ * @deprecated use av_fifo_alloc2()
*/
+attribute_deprecated
AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
/**
* Free an AVFifoBuffer.
* @param f AVFifoBuffer to free
+ * @deprecated use the AVFifo API with av_fifo_freep2()
*/
+attribute_deprecated
void av_fifo_free(AVFifoBuffer *f);
/**
* Free an AVFifoBuffer and reset pointer to NULL.
* @param f AVFifoBuffer to free
+ * @deprecated use the AVFifo API with av_fifo_freep2()
*/
+attribute_deprecated
void av_fifo_freep(AVFifoBuffer **f);
/**
* Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
* @param f AVFifoBuffer to reset
+ * @deprecated use av_fifo_reset2() with the new AVFifo-API
*/
+attribute_deprecated
void av_fifo_reset(AVFifoBuffer *f);
/**
@@ -72,7 +295,9 @@ void av_fifo_reset(AVFifoBuffer *f);
* amount of data you can read from it.
* @param f AVFifoBuffer to read from
* @return size
+ * @deprecated use av_fifo_can_read() with the new AVFifo-API
*/
+attribute_deprecated
int av_fifo_size(const AVFifoBuffer *f);
/**
@@ -80,7 +305,9 @@ int av_fifo_size(const AVFifoBuffer *f);
* amount of data you can write into it.
* @param f AVFifoBuffer to write into
* @return size
+ * @deprecated use av_fifo_can_write() with the new AVFifo-API
*/
+attribute_deprecated
int av_fifo_space(const AVFifoBuffer *f);
/**
@@ -91,7 +318,13 @@ int av_fifo_space(const AVFifoBuffer *f);
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
+ * av_fifo_peek_to_cb() otherwise
*/
+attribute_deprecated
int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int));
/**
@@ -101,7 +334,13 @@ int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_siz
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_peek() when func == NULL,
+ * av_fifo_peek_to_cb() otherwise
*/
+attribute_deprecated
int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/**
@@ -110,7 +349,13 @@ int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)
* @param buf_size number of bytes to read
* @param func generic read function
* @param dest data destination
+ *
+ * @return a non-negative number on success, a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_read() when func == NULL,
+ * av_fifo_read_to_cb() otherwise
*/
+attribute_deprecated
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
/**
@@ -124,8 +369,12 @@ int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)
* func must return the number of bytes written to dest_buf, or <= 0 to
* indicate no more data available to write.
* If func is NULL, src is interpreted as a simple byte array for source data.
- * @return the number of bytes written to the FIFO
+ * @return the number of bytes written to the FIFO or a negative error code on failure
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_write() when func == NULL,
+ * av_fifo_write_from_cb() otherwise
*/
+attribute_deprecated
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
/**
@@ -135,7 +384,11 @@ int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void
* @param f AVFifoBuffer to resize
* @param size new AVFifoBuffer size in bytes
* @return <0 for failure, >=0 otherwise
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_grow2() to increase FIFO size,
+ * decreasing FIFO size is not supported
*/
+attribute_deprecated
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
/**
@@ -146,16 +399,24 @@ int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
* @param f AVFifoBuffer to resize
* @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
* @return <0 for failure, >=0 otherwise
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_grow2(); note that unlike
+ * this function it adds to the allocated size, rather than to the used size
*/
+attribute_deprecated
int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
/**
* Read and discard the specified amount of data from an AVFifoBuffer.
* @param f AVFifoBuffer to read from
* @param size amount of data to read in bytes
+ *
+ * @deprecated use the new AVFifo-API with av_fifo_drain2()
*/
+attribute_deprecated
void av_fifo_drain(AVFifoBuffer *f, int size);
+#if FF_API_FIFO_PEEK2
/**
* Return a pointer to the data stored in a FIFO buffer at a certain offset.
* The FIFO buffer is not modified.
@@ -165,7 +426,9 @@ void av_fifo_drain(AVFifoBuffer *f, int size);
* than the used buffer size or the returned pointer will
* point outside to the buffer data.
* The used buffer size can be checked with av_fifo_size().
+ * @deprecated use the new AVFifo-API with av_fifo_peek() or av_fifo_peek_to_cb()
*/
+attribute_deprecated
static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
{
uint8_t *ptr = f->rptr + offs;
@@ -175,5 +438,11 @@ static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
ptr = f->end - (f->buffer - ptr);
return ptr;
}
+#endif
+#endif
+
+/**
+ * @}
+ */
#endif /* AVUTIL_FIFO_H */
diff --git a/media/ffvpx/libavutil/fixed_dsp.c b/media/ffvpx/libavutil/fixed_dsp.c
index f1b195f184..5ab47d55d0 100644
--- a/media/ffvpx/libavutil/fixed_dsp.c
+++ b/media/ffvpx/libavutil/fixed_dsp.c
@@ -45,6 +45,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "common.h"
#include "fixed_dsp.h"
static void vector_fmul_add_c(int *dst, const int *src0, const int *src1, const int *src2, int len){
@@ -134,7 +135,7 @@ static int scalarproduct_fixed_c(const int *v1, const int *v2, int len)
return (int)(p >> 31);
}
-static void butterflies_fixed_c(int *v1s, int *v2, int len)
+static void butterflies_fixed_c(int *av_restrict v1s, int *av_restrict v2, int len)
{
int i;
unsigned int *v1 = v1s;
@@ -161,8 +162,11 @@ AVFixedDSPContext * avpriv_alloc_fixed_dsp(int bit_exact)
fdsp->butterflies_fixed = butterflies_fixed_c;
fdsp->scalarproduct_fixed = scalarproduct_fixed_c;
- if (ARCH_X86)
- ff_fixed_dsp_init_x86(fdsp);
+#if ARCH_RISCV
+ ff_fixed_dsp_init_riscv(fdsp);
+#elif ARCH_X86
+ ff_fixed_dsp_init_x86(fdsp);
+#endif
return fdsp;
}
diff --git a/media/ffvpx/libavutil/fixed_dsp.h b/media/ffvpx/libavutil/fixed_dsp.h
index f554cb5038..1217d3a53b 100644
--- a/media/ffvpx/libavutil/fixed_dsp.h
+++ b/media/ffvpx/libavutil/fixed_dsp.h
@@ -49,8 +49,8 @@
#define AVUTIL_FIXED_DSP_H
#include <stdint.h>
+#include "config.h"
#include "attributes.h"
-#include "common.h"
#include "libavcodec/mathops.h"
typedef struct AVFixedDSPContext {
@@ -161,6 +161,7 @@ typedef struct AVFixedDSPContext {
*/
AVFixedDSPContext * avpriv_alloc_fixed_dsp(int strict);
+void ff_fixed_dsp_init_riscv(AVFixedDSPContext *fdsp);
void ff_fixed_dsp_init_x86(AVFixedDSPContext *fdsp);
/**
diff --git a/media/ffvpx/libavutil/float_dsp.c b/media/ffvpx/libavutil/float_dsp.c
index 6c30dafc56..6e7b4cd67d 100644
--- a/media/ffvpx/libavutil/float_dsp.c
+++ b/media/ffvpx/libavutil/float_dsp.c
@@ -150,16 +150,18 @@ av_cold AVFloatDSPContext *avpriv_float_dsp_alloc(int bit_exact)
fdsp->butterflies_float = butterflies_float_c;
fdsp->scalarproduct_float = avpriv_scalarproduct_float_c;
- #if ARCH_AARCH64 == 1
- ff_float_dsp_init_aarch64(fdsp);
- #elif ARCH_ARM == 1
- ff_float_dsp_init_arm(fdsp);
- #elif ARCH_PPC == 1
- ff_float_dsp_init_ppc(fdsp, bit_exact);
- #elif ARCH_X86 == 1
- ff_float_dsp_init_x86(fdsp);
- #elif ARCH_MIPS == 1
- ff_float_dsp_init_mips(fdsp);
- #endif
+#if ARCH_AARCH64 == 1
+ ff_float_dsp_init_aarch64(fdsp);
+#elif ARCH_ARM == 1
+ ff_float_dsp_init_arm(fdsp);
+#elif ARCH_PPC == 1
+ ff_float_dsp_init_ppc(fdsp, bit_exact);
+#elif ARCH_RISCV == 1
+ ff_float_dsp_init_riscv(fdsp);
+#elif ARCH_X86 == 1
+ ff_float_dsp_init_x86(fdsp);
+#elif ARCH_MIPS == 1
+ ff_float_dsp_init_mips(fdsp);
+#endif
return fdsp;
}
diff --git a/media/ffvpx/libavutil/float_dsp.h b/media/ffvpx/libavutil/float_dsp.h
index 9c664592bd..7cad9fc622 100644
--- a/media/ffvpx/libavutil/float_dsp.h
+++ b/media/ffvpx/libavutil/float_dsp.h
@@ -205,6 +205,7 @@ float avpriv_scalarproduct_float_c(const float *v1, const float *v2, int len);
void ff_float_dsp_init_aarch64(AVFloatDSPContext *fdsp);
void ff_float_dsp_init_arm(AVFloatDSPContext *fdsp);
void ff_float_dsp_init_ppc(AVFloatDSPContext *fdsp, int strict);
+void ff_float_dsp_init_riscv(AVFloatDSPContext *fdsp);
void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp);
void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp);
diff --git a/media/ffvpx/libavutil/frame.c b/media/ffvpx/libavutil/frame.c
index dcf1fc3d17..9545477acc 100644
--- a/media/ffvpx/libavutil/frame.c
+++ b/media/ffvpx/libavutil/frame.c
@@ -20,139 +20,37 @@
#include "avassert.h"
#include "buffer.h"
#include "common.h"
+#include "cpu.h"
#include "dict.h"
#include "frame.h"
#include "imgutils.h"
#include "mem.h"
#include "samplefmt.h"
+#include "hwcontext.h"
-#if FF_API_FRAME_GET_SET
-MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
-MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
-MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
-MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
-MAKE_ACCESSORS(AVFrame, frame, int, channels)
-MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
-MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
-MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
-MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
-MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
-MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
-#endif
-
+#if FF_API_OLD_CHANNEL_LAYOUT
#define CHECK_CHANNELS_CONSISTENCY(frame) \
av_assert2(!(frame)->channel_layout || \
(frame)->channels == \
av_get_channel_layout_nb_channels((frame)->channel_layout))
-
-#if FF_API_FRAME_QP
-struct qp_properties {
- int stride;
- int type;
-};
-
-int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
-{
- struct qp_properties *p;
- AVFrameSideData *sd;
- AVBufferRef *ref;
-
-FF_DISABLE_DEPRECATION_WARNINGS
- av_buffer_unref(&f->qp_table_buf);
-
- f->qp_table_buf = buf;
- f->qscale_table = buf->data;
- f->qstride = stride;
- f->qscale_type = qp_type;
-FF_ENABLE_DEPRECATION_WARNINGS
-
- av_frame_remove_side_data(f, AV_FRAME_DATA_QP_TABLE_PROPERTIES);
- av_frame_remove_side_data(f, AV_FRAME_DATA_QP_TABLE_DATA);
-
- ref = av_buffer_ref(buf);
- if (!av_frame_new_side_data_from_buf(f, AV_FRAME_DATA_QP_TABLE_DATA, ref)) {
- av_buffer_unref(&ref);
- return AVERROR(ENOMEM);
- }
-
- sd = av_frame_new_side_data(f, AV_FRAME_DATA_QP_TABLE_PROPERTIES,
- sizeof(struct qp_properties));
- if (!sd)
- return AVERROR(ENOMEM);
-
- p = (struct qp_properties *)sd->data;
- p->stride = stride;
- p->type = qp_type;
-
- return 0;
-}
-
-int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
-{
- AVBufferRef *buf = NULL;
-
- *stride = 0;
- *type = 0;
-
-FF_DISABLE_DEPRECATION_WARNINGS
- if (f->qp_table_buf) {
- *stride = f->qstride;
- *type = f->qscale_type;
- buf = f->qp_table_buf;
-FF_ENABLE_DEPRECATION_WARNINGS
- } else {
- AVFrameSideData *sd;
- struct qp_properties *p;
- sd = av_frame_get_side_data(f, AV_FRAME_DATA_QP_TABLE_PROPERTIES);
- if (!sd)
- return NULL;
- p = (struct qp_properties *)sd->data;
- sd = av_frame_get_side_data(f, AV_FRAME_DATA_QP_TABLE_DATA);
- if (!sd)
- return NULL;
- *stride = p->stride;
- *type = p->type;
- buf = sd->buf;
- }
-
- return buf ? buf->data : NULL;
-}
#endif
-const char *av_get_colorspace_name(enum AVColorSpace val)
-{
- static const char * const name[] = {
- [AVCOL_SPC_RGB] = "GBR",
- [AVCOL_SPC_BT709] = "bt709",
- [AVCOL_SPC_FCC] = "fcc",
- [AVCOL_SPC_BT470BG] = "bt470bg",
- [AVCOL_SPC_SMPTE170M] = "smpte170m",
- [AVCOL_SPC_SMPTE240M] = "smpte240m",
- [AVCOL_SPC_YCOCG] = "YCgCo",
- };
- if ((unsigned)val >= FF_ARRAY_ELEMS(name))
- return NULL;
- return name[val];
-}
-
static void get_frame_defaults(AVFrame *frame)
{
- if (frame->extended_data != frame->data)
- av_freep(&frame->extended_data);
-
memset(frame, 0, sizeof(*frame));
frame->pts =
frame->pkt_dts = AV_NOPTS_VALUE;
-#if FF_API_PKT_PTS
+ frame->best_effort_timestamp = AV_NOPTS_VALUE;
+ frame->duration = 0;
+#if FF_API_PKT_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
- frame->pkt_pts = AV_NOPTS_VALUE;
+ frame->pkt_duration = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- frame->best_effort_timestamp = AV_NOPTS_VALUE;
- frame->pkt_duration = 0;
frame->pkt_pos = -1;
frame->pkt_size = -1;
+ frame->time_base = (AVRational){ 0, 1 };
frame->key_frame = 1;
frame->sample_aspect_ratio = (AVRational){ 0, 1 };
frame->format = -1; /* unknown */
@@ -188,12 +86,11 @@ static void wipe_side_data(AVFrame *frame)
AVFrame *av_frame_alloc(void)
{
- AVFrame *frame = av_mallocz(sizeof(*frame));
+ AVFrame *frame = av_malloc(sizeof(*frame));
if (!frame)
return NULL;
- frame->extended_data = NULL;
get_frame_defaults(frame);
return frame;
@@ -211,8 +108,10 @@ void av_frame_free(AVFrame **frame)
static int get_video_buffer(AVFrame *frame, int align)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
- int ret, i, padded_height;
+ int ret, i, padded_height, total_size;
int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
+ ptrdiff_t linesizes[4];
+ size_t sizes[4];
if (!desc)
return AVERROR(EINVAL);
@@ -237,12 +136,22 @@ static int get_video_buffer(AVFrame *frame, int align)
frame->linesize[i] = FFALIGN(frame->linesize[i], align);
}
+ for (i = 0; i < 4; i++)
+ linesizes[i] = frame->linesize[i];
+
padded_height = FFALIGN(frame->height, 32);
- if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
- NULL, frame->linesize)) < 0)
+ if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
+ padded_height, linesizes)) < 0)
return ret;
- frame->buf[0] = av_buffer_alloc(ret + 4*plane_padding);
+ total_size = 4*plane_padding;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - total_size)
+ return AVERROR(EINVAL);
+ total_size += sizes[i];
+ }
+
+ frame->buf[0] = av_buffer_alloc(total_size);
if (!frame->buf[0]) {
ret = AVERROR(ENOMEM);
goto fail;
@@ -267,18 +176,27 @@ fail:
static int get_audio_buffer(AVFrame *frame, int align)
{
- int channels;
int planar = av_sample_fmt_is_planar(frame->format);
- int planes;
+ int channels, planes;
int ret, i;
- if (!frame->channels)
- frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
-
- channels = frame->channels;
- planes = planar ? channels : 1;
-
- CHECK_CHANNELS_CONSISTENCY(frame);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!frame->ch_layout.nb_channels) {
+ if (frame->channel_layout) {
+ av_channel_layout_from_mask(&frame->ch_layout, frame->channel_layout);
+ } else {
+ frame->ch_layout.nb_channels = frame->channels;
+ frame->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ }
+ frame->channels = frame->ch_layout.nb_channels;
+ frame->channel_layout = frame->ch_layout.order == AV_CHANNEL_ORDER_NATIVE ?
+ frame->ch_layout.u.mask : 0;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ channels = frame->ch_layout.nb_channels;
+ planes = planar ? channels : 1;
if (!frame->linesize[0]) {
ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
frame->nb_samples, frame->format,
@@ -288,9 +206,9 @@ static int get_audio_buffer(AVFrame *frame, int align)
}
if (planes > AV_NUM_DATA_POINTERS) {
- frame->extended_data = av_mallocz_array(planes,
+ frame->extended_data = av_calloc(planes,
sizeof(*frame->extended_data));
- frame->extended_buf = av_mallocz_array((planes - AV_NUM_DATA_POINTERS),
+ frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
sizeof(*frame->extended_buf));
if (!frame->extended_data || !frame->extended_buf) {
av_freep(&frame->extended_data);
@@ -326,17 +244,24 @@ int av_frame_get_buffer(AVFrame *frame, int align)
if (frame->format < 0)
return AVERROR(EINVAL);
+FF_DISABLE_DEPRECATION_WARNINGS
if (frame->width > 0 && frame->height > 0)
return get_video_buffer(frame, align);
- else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
+ else if (frame->nb_samples > 0 &&
+ (av_channel_layout_check(&frame->ch_layout)
+#if FF_API_OLD_CHANNEL_LAYOUT
+ || frame->channel_layout || frame->channels > 0
+#endif
+ ))
return get_audio_buffer(frame, align);
+FF_ENABLE_DEPRECATION_WARNINGS
return AVERROR(EINVAL);
}
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
{
- int i;
+ int ret, i;
dst->key_frame = src->key_frame;
dst->pict_type = src->pict_type;
@@ -346,26 +271,35 @@ static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
dst->crop_left = src->crop_left;
dst->crop_right = src->crop_right;
dst->pts = src->pts;
+ dst->duration = src->duration;
dst->repeat_pict = src->repeat_pict;
dst->interlaced_frame = src->interlaced_frame;
dst->top_field_first = src->top_field_first;
dst->palette_has_changed = src->palette_has_changed;
dst->sample_rate = src->sample_rate;
dst->opaque = src->opaque;
-#if FF_API_PKT_PTS
-FF_DISABLE_DEPRECATION_WARNINGS
- dst->pkt_pts = src->pkt_pts;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
dst->pkt_dts = src->pkt_dts;
dst->pkt_pos = src->pkt_pos;
dst->pkt_size = src->pkt_size;
+#if FF_API_PKT_DURATION
+FF_DISABLE_DEPRECATION_WARNINGS
dst->pkt_duration = src->pkt_duration;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ dst->time_base = src->time_base;
+#if FF_API_REORDERED_OPAQUE
+FF_DISABLE_DEPRECATION_WARNINGS
dst->reordered_opaque = src->reordered_opaque;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
dst->quality = src->quality;
dst->best_effort_timestamp = src->best_effort_timestamp;
+#if FF_API_FRAME_PICTURE_NUMBER
+FF_DISABLE_DEPRECATION_WARNINGS
dst->coded_picture_number = src->coded_picture_number;
dst->display_picture_number = src->display_picture_number;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
dst->flags = src->flags;
dst->decode_error_flags = src->decode_error_flags;
dst->color_primaries = src->color_primaries;
@@ -376,12 +310,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_dict_copy(&dst->metadata, src->metadata, 0);
-#if FF_API_ERROR_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
- memcpy(dst->error, src->error, sizeof(dst->error));
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
for (i = 0; i < src->nb_side_data; i++) {
const AVFrameSideData *sd_src = src->side_data[i];
AVFrameSideData *sd_dst;
@@ -408,36 +336,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
}
-#if FF_API_FRAME_QP
-FF_DISABLE_DEPRECATION_WARNINGS
- dst->qscale_table = NULL;
- dst->qstride = 0;
- dst->qscale_type = 0;
- av_buffer_unref(&dst->qp_table_buf);
- if (src->qp_table_buf) {
- dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
- if (dst->qp_table_buf) {
- dst->qscale_table = dst->qp_table_buf->data;
- dst->qstride = src->qstride;
- dst->qscale_type = src->qscale_type;
- }
- }
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-
- av_buffer_unref(&dst->opaque_ref);
- av_buffer_unref(&dst->private_ref);
- if (src->opaque_ref) {
- dst->opaque_ref = av_buffer_ref(src->opaque_ref);
- if (!dst->opaque_ref)
- return AVERROR(ENOMEM);
- }
- if (src->private_ref) {
- dst->private_ref = av_buffer_ref(src->private_ref);
- if (!dst->private_ref)
- return AVERROR(ENOMEM);
- }
- return 0;
+ ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
+ ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
+ return ret;
}
int av_frame_ref(AVFrame *dst, const AVFrame *src)
@@ -445,30 +346,55 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
int i, ret = 0;
av_assert1(dst->width == 0 && dst->height == 0);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
av_assert1(dst->channels == 0);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_assert1(dst->ch_layout.nb_channels == 0 &&
+ dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
dst->format = src->format;
dst->width = src->width;
dst->height = src->height;
+ dst->nb_samples = src->nb_samples;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
dst->channels = src->channels;
dst->channel_layout = src->channel_layout;
- dst->nb_samples = src->nb_samples;
+ if (!av_channel_layout_check(&src->ch_layout)) {
+ if (src->channel_layout)
+ av_channel_layout_from_mask(&dst->ch_layout, src->channel_layout);
+ else {
+ dst->ch_layout.nb_channels = src->channels;
+ dst->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
+ }
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
ret = frame_copy_props(dst, src, 0);
if (ret < 0)
- return ret;
+ goto fail;
+
+ // this check is needed only until FF_API_OLD_CHANNEL_LAYOUT is out
+ if (av_channel_layout_check(&src->ch_layout)) {
+ ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
+ if (ret < 0)
+ goto fail;
+ }
/* duplicate the frame data if it's not refcounted */
if (!src->buf[0]) {
- ret = av_frame_get_buffer(dst, 32);
+ ret = av_frame_get_buffer(dst, 0);
if (ret < 0)
- return ret;
+ goto fail;
ret = av_frame_copy(dst, src);
if (ret < 0)
- av_frame_unref(dst);
+ goto fail;
- return ret;
+ return 0;
}
/* ref the buffers */
@@ -483,8 +409,8 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
}
if (src->extended_buf) {
- dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
- src->nb_extended_buf);
+ dst->extended_buf = av_calloc(src->nb_extended_buf,
+ sizeof(*dst->extended_buf));
if (!dst->extended_buf) {
ret = AVERROR(ENOMEM);
goto fail;
@@ -510,13 +436,12 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
/* duplicate extended data */
if (src->extended_data != src->data) {
- int ch = src->channels;
+ int ch = dst->ch_layout.nb_channels;
if (!ch) {
ret = AVERROR(EINVAL);
goto fail;
}
- CHECK_CHANNELS_CONSISTENCY(src);
dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
if (!dst->extended_data) {
@@ -565,29 +490,34 @@ void av_frame_unref(AVFrame *frame)
av_buffer_unref(&frame->extended_buf[i]);
av_freep(&frame->extended_buf);
av_dict_free(&frame->metadata);
-#if FF_API_FRAME_QP
-FF_DISABLE_DEPRECATION_WARNINGS
- av_buffer_unref(&frame->qp_table_buf);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
av_buffer_unref(&frame->hw_frames_ctx);
av_buffer_unref(&frame->opaque_ref);
av_buffer_unref(&frame->private_ref);
+ if (frame->extended_data != frame->data)
+ av_freep(&frame->extended_data);
+
+ av_channel_layout_uninit(&frame->ch_layout);
+
get_frame_defaults(frame);
}
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
{
av_assert1(dst->width == 0 && dst->height == 0);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
av_assert1(dst->channels == 0);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_assert1(dst->ch_layout.nb_channels == 0 &&
+ dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
*dst = *src;
if (src->extended_data == src->data)
dst->extended_data = dst->data;
- memset(src, 0, sizeof(*src));
get_frame_defaults(src);
}
@@ -613,9 +543,6 @@ int av_frame_make_writable(AVFrame *frame)
AVFrame tmp;
int ret;
- if (!frame->buf[0])
- return AVERROR(EINVAL);
-
if (av_frame_is_writable(frame))
return 0;
@@ -623,10 +550,23 @@ int av_frame_make_writable(AVFrame *frame)
tmp.format = frame->format;
tmp.width = frame->width;
tmp.height = frame->height;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
tmp.channels = frame->channels;
tmp.channel_layout = frame->channel_layout;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
tmp.nb_samples = frame->nb_samples;
- ret = av_frame_get_buffer(&tmp, 32);
+ ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
+ if (ret < 0) {
+ av_frame_unref(&tmp);
+ return ret;
+ }
+
+ if (frame->hw_frames_ctx)
+ ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
+ else
+ ret = av_frame_get_buffer(&tmp, 0);
if (ret < 0)
return ret;
@@ -662,10 +602,18 @@ AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
int planes, i;
if (frame->nb_samples) {
- int channels = frame->channels;
+ int channels = frame->ch_layout.nb_channels;
+
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels) {
+ channels = frame->channels;
+ CHECK_CHANNELS_CONSISTENCY(frame);
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
if (!channels)
return NULL;
- CHECK_CHANNELS_CONSISTENCY(frame);
planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
} else
planes = 4;
@@ -721,7 +669,7 @@ AVFrameSideData *av_frame_new_side_data_from_buf(AVFrame *frame,
AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
enum AVFrameSideDataType type,
- int size)
+ size_t size)
{
AVFrameSideData *ret;
AVBufferRef *buf = av_buffer_alloc(size);
@@ -752,6 +700,9 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
dst->height < src->height)
return AVERROR(EINVAL);
+ if (src->hw_frames_ctx || dst->hw_frames_ctx)
+ return av_hwframe_transfer_data(dst, src, 0);
+
planes = av_pix_fmt_count_planes(dst->format);
for (i = 0; i < planes; i++)
if (!dst->data[i] || !src->data[i])
@@ -768,16 +719,35 @@ static int frame_copy_video(AVFrame *dst, const AVFrame *src)
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
{
int planar = av_sample_fmt_is_planar(dst->format);
- int channels = dst->channels;
+ int channels = dst->ch_layout.nb_channels;
int planes = planar ? channels : 1;
int i;
- if (dst->nb_samples != src->nb_samples ||
- dst->channels != src->channels ||
- dst->channel_layout != src->channel_layout)
- return AVERROR(EINVAL);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
+ if (!channels || !src->ch_layout.nb_channels) {
+ if (dst->channels != src->channels ||
+ dst->channel_layout != src->channel_layout)
+ return AVERROR(EINVAL);
+ CHECK_CHANNELS_CONSISTENCY(src);
+ }
+ if (!channels) {
+ channels = dst->channels;
+ planes = planar ? channels : 1;
+ }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
- CHECK_CHANNELS_CONSISTENCY(src);
+ if (dst->nb_samples != src->nb_samples ||
+#if FF_API_OLD_CHANNEL_LAYOUT
+ (av_channel_layout_check(&dst->ch_layout) &&
+ av_channel_layout_check(&src->ch_layout) &&
+#endif
+ av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
+#if FF_API_OLD_CHANNEL_LAYOUT
+ )
+#endif
+ return AVERROR(EINVAL);
for (i = 0; i < planes; i++)
if (!dst->extended_data[i] || !src->extended_data[i])
@@ -794,10 +764,17 @@ int av_frame_copy(AVFrame *dst, const AVFrame *src)
if (dst->format != src->format || dst->format < 0)
return AVERROR(EINVAL);
+FF_DISABLE_DEPRECATION_WARNINGS
if (dst->width > 0 && dst->height > 0)
return frame_copy_video(dst, src);
- else if (dst->nb_samples > 0 && dst->channels > 0)
+ else if (dst->nb_samples > 0 &&
+ (av_channel_layout_check(&dst->ch_layout)
+#if FF_API_OLD_CHANNEL_LAYOUT
+ || dst->channels > 0
+#endif
+ ))
return frame_copy_audio(dst, src);
+FF_ENABLE_DEPRECATION_WARNINGS
return AVERROR(EINVAL);
}
@@ -806,7 +783,7 @@ void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
{
int i;
- for (i = 0; i < frame->nb_side_data; i++) {
+ for (i = frame->nb_side_data - 1; i >= 0; i--) {
AVFrameSideData *sd = frame->side_data[i];
if (sd->type == type) {
free_side_data(&frame->side_data[i]);
@@ -836,12 +813,16 @@ const char *av_frame_side_data_name(enum AVFrameSideDataType type)
case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode";
case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping";
case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
-#if FF_API_FRAME_QP
- case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
- case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
-#endif
case AV_FRAME_DATA_DYNAMIC_HDR_PLUS: return "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)";
+ case AV_FRAME_DATA_DYNAMIC_HDR_VIVID: return "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)";
case AV_FRAME_DATA_REGIONS_OF_INTEREST: return "Regions Of Interest";
+ case AV_FRAME_DATA_VIDEO_ENC_PARAMS: return "Video encoding parameters";
+ case AV_FRAME_DATA_SEI_UNREGISTERED: return "H.26[45] User Data Unregistered SEI message";
+ case AV_FRAME_DATA_FILM_GRAIN_PARAMS: return "Film grain parameters";
+ case AV_FRAME_DATA_DETECTION_BBOXES: return "Bounding boxes for object detection and classification";
+ case AV_FRAME_DATA_DOVI_RPU_BUFFER: return "Dolby Vision RPU Data";
+ case AV_FRAME_DATA_DOVI_METADATA: return "Dolby Vision Metadata";
+ case AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT: return "Ambient viewing environment";
}
return NULL;
}
@@ -856,7 +837,7 @@ static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
- if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
offsets[i] = 0;
break;
}
diff --git a/media/ffvpx/libavutil/frame.h b/media/ffvpx/libavutil/frame.h
index 5d3231e7bb..2580269549 100644
--- a/media/ffvpx/libavutil/frame.h
+++ b/media/ffvpx/libavutil/frame.h
@@ -30,6 +30,7 @@
#include "avutil.h"
#include "buffer.h"
+#include "channel_layout.h"
#include "dict.h"
#include "rational.h"
#include "samplefmt.h"
@@ -142,28 +143,11 @@ enum AVFrameSideDataType {
*/
AV_FRAME_DATA_ICC_PROFILE,
-#if FF_API_FRAME_QP
- /**
- * Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
- * The contents of this side data are undocumented and internal; use
- * av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a
- * meaningful way instead.
- */
- AV_FRAME_DATA_QP_TABLE_PROPERTIES,
-
- /**
- * Raw QP table data. Its format is described by
- * AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and
- * av_frame_get_qp_table() to access this instead.
- */
- AV_FRAME_DATA_QP_TABLE_DATA,
-#endif
-
/**
* Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t
* where the first uint32_t describes how many (1-3) of the other timecodes are used.
- * The timecode format is described in the av_timecode_get_smpte_from_framenum()
- * function in libavutil/timecode.c.
+ * The timecode format is described in the documentation of av_timecode_get_smpte_from_framenum()
+ * function in libavutil/timecode.h.
*/
AV_FRAME_DATA_S12M_TIMECODE,
@@ -179,6 +163,57 @@ enum AVFrameSideDataType {
* array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size.
*/
AV_FRAME_DATA_REGIONS_OF_INTEREST,
+
+ /**
+ * Encoding parameters for a video frame, as described by AVVideoEncParams.
+ */
+ AV_FRAME_DATA_VIDEO_ENC_PARAMS,
+
+ /**
+ * User data unregistered metadata associated with a video frame.
+ * This is the H.26[45] UDU SEI message, and shouldn't be used for any other purpose
+ * The data is stored as uint8_t in AVFrameSideData.data which is 16 bytes of
+ * uuid_iso_iec_11578 followed by AVFrameSideData.size - 16 bytes of user_data_payload_byte.
+ */
+ AV_FRAME_DATA_SEI_UNREGISTERED,
+
+ /**
+ * Film grain parameters for a frame, described by AVFilmGrainParams.
+ * Must be present for every frame which should have film grain applied.
+ */
+ AV_FRAME_DATA_FILM_GRAIN_PARAMS,
+
+ /**
+ * Bounding boxes for object detection and classification,
+ * as described by AVDetectionBBoxHeader.
+ */
+ AV_FRAME_DATA_DETECTION_BBOXES,
+
+ /**
+ * Dolby Vision RPU raw data, suitable for passing to x265
+ * or other libraries. Array of uint8_t, with NAL emulation
+ * bytes intact.
+ */
+ AV_FRAME_DATA_DOVI_RPU_BUFFER,
+
+ /**
+ * Parsed Dolby Vision metadata, suitable for passing to a software
+ * implementation. The payload is the AVDOVIMetadata struct defined in
+ * libavutil/dovi_meta.h.
+ */
+ AV_FRAME_DATA_DOVI_METADATA,
+
+ /**
+ * HDR Vivid dynamic metadata associated with a video frame. The payload is
+ * an AVDynamicHDRVivid type and contains information for color
+ * volume transform - CUVA 005.1-2021.
+ */
+ AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+ /**
+ * Ambient viewing environment metadata, as defined by H.274.
+ */
+ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT,
};
enum AVActiveFormatDescription {
@@ -201,7 +236,7 @@ enum AVActiveFormatDescription {
typedef struct AVFrameSideData {
enum AVFrameSideDataType type;
uint8_t *data;
- int size;
+ size_t size;
AVDictionary *metadata;
AVBufferRef *buf;
} AVFrameSideData;
@@ -296,21 +331,32 @@ typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
/**
* pointer to the picture/channel planes.
- * This might be different from the first allocated byte
+ * This might be different from the first allocated byte. For video,
+ * it could even point to the end of the image data.
+ *
+ * All pointers in data and extended_data must point into one of the
+ * AVBufferRef in buf or extended_buf.
*
* Some decoders access areas outside 0,0 - width,height, please
* see avcodec_align_dimensions2(). Some filters and swscale can read
* up to 16 bytes beyond the planes, if these filters are to be used,
* then 16 extra bytes must be allocated.
*
- * NOTE: Except for hwaccel formats, pointers not needed by the format
- * MUST be set to NULL.
+ * NOTE: Pointers not needed by the format MUST be set to NULL.
+ *
+ * @attention In case of video, the data[] pointers can point to the
+ * end of image data in order to reverse line order, when used in
+ * combination with negative values in the linesize[] array.
*/
uint8_t *data[AV_NUM_DATA_POINTERS];
/**
- * For video, size in bytes of each picture line.
- * For audio, size in bytes of each plane.
+ * For video, a positive or negative value, which is typically indicating
+ * the size in bytes of each picture line, but it can also be:
+ * - the negative byte size of lines for vertical flipping
+ * (with data[n] pointing to the end of the data
+ * - a positive or negative multiple of the byte size as for accessing
+ * even and odd fields of a frame (possibly flipped)
*
* For audio, only linesize[0] may be set. For planar audio, each channel
* plane must be the same size.
@@ -322,6 +368,9 @@ typedef struct AVFrame {
*
* @note The linesize may be larger than the size of usable data -- there
* may be extra padding present for performance reasons.
+ *
+ * @attention In case of video, line size values can be negative to achieve
+ * a vertically inverted iteration over image lines.
*/
int linesize[AV_NUM_DATA_POINTERS];
@@ -387,15 +436,6 @@ typedef struct AVFrame {
*/
int64_t pts;
-#if FF_API_PKT_PTS
- /**
- * PTS copied from the AVPacket that was decoded to produce this frame.
- * @deprecated use the pts field instead
- */
- attribute_deprecated
- int64_t pkt_pts;
-#endif
-
/**
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
* This is also the Presentation time of this AVFrame calculated from
@@ -404,13 +444,25 @@ typedef struct AVFrame {
int64_t pkt_dts;
/**
+ * Time base for the timestamps in this frame.
+ * In the future, this field may be set on frames output by decoders or
+ * filters, but its value will be by default ignored on input to encoders
+ * or filters.
+ */
+ AVRational time_base;
+
+#if FF_API_FRAME_PICTURE_NUMBER
+ /**
* picture number in bitstream order
*/
+ attribute_deprecated
int coded_picture_number;
/**
* picture number in display order
*/
+ attribute_deprecated
int display_picture_number;
+#endif
/**
* quality (between 1 (good) and FF_LAMBDA_MAX (bad))
@@ -422,14 +474,6 @@ typedef struct AVFrame {
*/
void *opaque;
-#if FF_API_ERROR_FRAME
- /**
- * @deprecated unused
- */
- attribute_deprecated
- uint64_t error[AV_NUM_DATA_POINTERS];
-#endif
-
/**
* When decoding, this signals how much the picture must be delayed.
* extra_delay = repeat_pict / (2*fps)
@@ -451,6 +495,7 @@ typedef struct AVFrame {
*/
int palette_has_changed;
+#if FF_API_REORDERED_OPAQUE
/**
* reordered opaque 64 bits (generally an integer or a double precision float
* PTS but can be anything).
@@ -458,24 +503,32 @@ typedef struct AVFrame {
* that time,
* the decoder reorders values as needed and sets AVFrame.reordered_opaque
* to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ *
+ * @deprecated Use AV_CODEC_FLAG_COPY_OPAQUE instead
*/
+ attribute_deprecated
int64_t reordered_opaque;
+#endif
/**
* Sample rate of the audio data.
*/
int sample_rate;
+#if FF_API_OLD_CHANNEL_LAYOUT
/**
* Channel layout of the audio data.
+ * @deprecated use ch_layout instead
*/
+ attribute_deprecated
uint64_t channel_layout;
+#endif
/**
- * AVBuffer references backing the data for this frame. If all elements of
- * this array are NULL, then this frame is not reference counted. This array
- * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must
- * also be non-NULL for all j < i.
+ * AVBuffer references backing the data for this frame. All the pointers in
+ * data and extended_data must point inside one of the buffers in buf or
+ * extended_buf. This array must be filled contiguously -- if buf[i] is
+ * non-NULL then buf[j] must also be non-NULL for all j < i.
*
* There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than
@@ -565,13 +618,18 @@ typedef struct AVFrame {
*/
int64_t pkt_pos;
+#if FF_API_PKT_DURATION
/**
* duration of the corresponding packet, expressed in
* AVStream->time_base units, 0 if unknown.
* - encoding: unused
* - decoding: Read by user.
+ *
+ * @deprecated use duration instead
*/
+ attribute_deprecated
int64_t pkt_duration;
+#endif
/**
* metadata.
@@ -593,12 +651,16 @@ typedef struct AVFrame {
#define FF_DECODE_ERROR_CONCEALMENT_ACTIVE 4
#define FF_DECODE_ERROR_DECODE_SLICES 8
+#if FF_API_OLD_CHANNEL_LAYOUT
/**
* number of audio channels, only used for audio.
* - encoding: unused
* - decoding: Read by user.
+ * @deprecated use ch_layout instead
*/
+ attribute_deprecated
int channels;
+#endif
/**
* size of the corresponding packet containing the compressed
@@ -609,24 +671,6 @@ typedef struct AVFrame {
*/
int pkt_size;
-#if FF_API_FRAME_QP
- /**
- * QP table
- */
- attribute_deprecated
- int8_t *qscale_table;
- /**
- * QP store stride
- */
- attribute_deprecated
- int qstride;
-
- attribute_deprecated
- int qscale_type;
-
- attribute_deprecated
- AVBufferRef *qp_table_buf;
-#endif
/**
* For hwaccel-format frames, this should be a reference to the
* AVHWFramesContext describing the frame.
@@ -672,70 +716,18 @@ typedef struct AVFrame {
* for the target frame's private_ref field.
*/
AVBufferRef *private_ref;
-} AVFrame;
-#if FF_API_FRAME_GET_SET
-/**
- * Accessors for some AVFrame fields. These used to be provided for ABI
- * compatibility, and do not need to be used anymore.
- */
-attribute_deprecated
-int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
-attribute_deprecated
-int64_t av_frame_get_pkt_duration (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_pkt_duration (AVFrame *frame, int64_t val);
-attribute_deprecated
-int64_t av_frame_get_pkt_pos (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_pkt_pos (AVFrame *frame, int64_t val);
-attribute_deprecated
-int64_t av_frame_get_channel_layout (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_channel_layout (AVFrame *frame, int64_t val);
-attribute_deprecated
-int av_frame_get_channels (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_channels (AVFrame *frame, int val);
-attribute_deprecated
-int av_frame_get_sample_rate (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_sample_rate (AVFrame *frame, int val);
-attribute_deprecated
-AVDictionary *av_frame_get_metadata (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_metadata (AVFrame *frame, AVDictionary *val);
-attribute_deprecated
-int av_frame_get_decode_error_flags (const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_decode_error_flags (AVFrame *frame, int val);
-attribute_deprecated
-int av_frame_get_pkt_size(const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_pkt_size(AVFrame *frame, int val);
-#if FF_API_FRAME_QP
-attribute_deprecated
-int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
-attribute_deprecated
-int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
-#endif
-attribute_deprecated
-enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
-attribute_deprecated
-enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
-attribute_deprecated
-void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
-#endif
+ /**
+ * Channel layout of the audio data.
+ */
+ AVChannelLayout ch_layout;
+
+ /**
+ * Duration of the frame, in the same units as pts. 0 if unknown.
+ */
+ int64_t duration;
+} AVFrame;
-/**
- * Get the name of a colorspace.
- * @return a static string identifying the colorspace; can be NULL.
- */
-const char *av_get_colorspace_name(enum AVColorSpace val);
/**
* Allocate an AVFrame and set its fields to default values. The resulting
@@ -804,7 +796,7 @@ void av_frame_move_ref(AVFrame *dst, AVFrame *src);
* The following fields must be set on frame before calling this function:
* - format (pixel format for video, sample format for audio)
* - width and height for video
- * - nb_samples and channel_layout for audio
+ * - nb_samples and ch_layout for audio
*
* This function will fill AVFrame.data and AVFrame.buf arrays and, if
* necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
@@ -841,7 +833,8 @@ int av_frame_is_writable(AVFrame *frame);
* Ensure that the frame data is writable, avoiding data copy if possible.
*
* Do nothing if the frame is writable, allocate new buffers and copy the data
- * if it is not.
+ * if it is not. Non-refcounted frames behave as non-writable, i.e. a copy
+ * is always made.
*
* @return 0 on success, a negative AVERROR on error.
*
@@ -876,6 +869,7 @@ int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
/**
* Get the buffer reference a given data plane is stored in.
*
+ * @param frame the frame to get the plane's buffer from
* @param plane index of the data plane of interest in frame->extended_data.
*
* @return the buffer reference that contains the plane or NULL if the input
@@ -894,7 +888,7 @@ AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
*/
AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
enum AVFrameSideDataType type,
- int size);
+ size_t size);
/**
* Add a new side data to a frame from an existing AVBufferRef
@@ -920,8 +914,7 @@ AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
enum AVFrameSideDataType type);
/**
- * If side data of the supplied type exists in the frame, free it and remove it
- * from the frame.
+ * Remove and free all side data instances of the given type.
*/
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
diff --git a/media/ffvpx/libavutil/hwcontext.c b/media/ffvpx/libavutil/hwcontext.c
index f1e404ab20..3396598269 100644
--- a/media/ffvpx/libavutil/hwcontext.c
+++ b/media/ffvpx/libavutil/hwcontext.c
@@ -18,6 +18,7 @@
#include "config.h"
+#include "avassert.h"
#include "buffer.h"
#include "common.h"
#include "hwcontext.h"
@@ -59,6 +60,9 @@ static const HWContextType * const hw_table[] = {
#if CONFIG_MEDIACODEC
&ff_hwcontext_type_mediacodec,
#endif
+#if CONFIG_VULKAN
+ &ff_hwcontext_type_vulkan,
+#endif
NULL,
};
@@ -73,6 +77,7 @@ static const char *const hw_type_names[] = {
[AV_HWDEVICE_TYPE_VDPAU] = "vdpau",
[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox",
[AV_HWDEVICE_TYPE_MEDIACODEC] = "mediacodec",
+ [AV_HWDEVICE_TYPE_VULKAN] = "vulkan",
};
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
@@ -304,7 +309,7 @@ static int hwframe_pool_prealloc(AVBufferRef *ref)
AVFrame **frames;
int i, ret = 0;
- frames = av_mallocz_array(ctx->initial_pool_size, sizeof(*frames));
+ frames = av_calloc(ctx->initial_pool_size, sizeof(*frames));
if (!frames)
return AVERROR(ENOMEM);
@@ -392,10 +397,14 @@ int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ref,
static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
{
- AVHWFramesContext *ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVHWFramesContext *ctx;
AVFrame *frame_tmp;
int ret = 0;
+ if (!src->hw_frames_ctx)
+ return AVERROR(EINVAL);
+ ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+
frame_tmp = av_frame_alloc();
if (!frame_tmp)
return AVERROR(ENOMEM);
@@ -418,7 +427,7 @@ static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
frame_tmp->width = ctx->width;
frame_tmp->height = ctx->height;
- ret = av_frame_get_buffer(frame_tmp, 32);
+ ret = av_frame_get_buffer(frame_tmp, 0);
if (ret < 0)
goto fail;
@@ -444,21 +453,54 @@ int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
if (!dst->buf[0])
return transfer_data_alloc(dst, src, flags);
- if (src->hw_frames_ctx) {
- ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+ /*
+ * Hardware -> Hardware Transfer.
+ * Unlike Software -> Hardware or Hardware -> Software, the transfer
+ * function could be provided by either the src or dst, depending on
+ * the specific combination of hardware.
+ */
+ if (src->hw_frames_ctx && dst->hw_frames_ctx) {
+ AVHWFramesContext *src_ctx =
+ (AVHWFramesContext*)src->hw_frames_ctx->data;
+ AVHWFramesContext *dst_ctx =
+ (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ if (src_ctx->internal->source_frames) {
+ av_log(src_ctx, AV_LOG_ERROR,
+ "A device with a derived frame context cannot be used as "
+ "the source of a HW -> HW transfer.");
+ return AVERROR(ENOSYS);
+ }
- ret = ctx->internal->hw_type->transfer_data_from(ctx, dst, src);
- if (ret < 0)
- return ret;
- } else if (dst->hw_frames_ctx) {
- ctx = (AVHWFramesContext*)dst->hw_frames_ctx->data;
+ if (dst_ctx->internal->source_frames) {
+ av_log(src_ctx, AV_LOG_ERROR,
+ "A device with a derived frame context cannot be used as "
+ "the destination of a HW -> HW transfer.");
+ return AVERROR(ENOSYS);
+ }
- ret = ctx->internal->hw_type->transfer_data_to(ctx, dst, src);
+ ret = src_ctx->internal->hw_type->transfer_data_from(src_ctx, dst, src);
+ if (ret == AVERROR(ENOSYS))
+ ret = dst_ctx->internal->hw_type->transfer_data_to(dst_ctx, dst, src);
if (ret < 0)
return ret;
- } else
- return AVERROR(ENOSYS);
+ } else {
+ if (src->hw_frames_ctx) {
+ ctx = (AVHWFramesContext*)src->hw_frames_ctx->data;
+ ret = ctx->internal->hw_type->transfer_data_from(ctx, dst, src);
+ if (ret < 0)
+ return ret;
+ } else if (dst->hw_frames_ctx) {
+ ctx = (AVHWFramesContext*)dst->hw_frames_ctx->data;
+
+ ret = ctx->internal->hw_type->transfer_data_to(ctx, dst, src);
+ if (ret < 0)
+ return ret;
+ } else {
+ return AVERROR(ENOSYS);
+ }
+ }
return 0;
}
@@ -520,6 +562,8 @@ int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
return ret;
}
+ frame->extended_data = frame->data;
+
return 0;
}
@@ -604,9 +648,10 @@ fail:
return ret;
}
-int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr,
- enum AVHWDeviceType type,
- AVBufferRef *src_ref, int flags)
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ref_ptr,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ref,
+ AVDictionary *options, int flags)
{
AVBufferRef *dst_ref = NULL, *tmp_ref;
AVHWDeviceContext *dst_ctx, *tmp_ctx;
@@ -639,6 +684,7 @@ int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr,
if (dst_ctx->internal->hw_type->device_derive) {
ret = dst_ctx->internal->hw_type->device_derive(dst_ctx,
tmp_ctx,
+ options,
flags);
if (ret == 0) {
dst_ctx->internal->source_device = av_buffer_ref(src_ref);
@@ -670,6 +716,14 @@ fail:
return ret;
}
+int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ref, int flags)
+{
+ return av_hwdevice_ctx_create_derived_opts(dst_ref_ptr, type, src_ref,
+ NULL, flags);
+}
+
static void ff_hwframe_unmap(void *opaque, uint8_t *data)
{
HWMapDescriptor *hwmap = (HWMapDescriptor*)data;
@@ -739,6 +793,8 @@ fail:
int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
{
+ AVBufferRef *orig_dst_frames = dst->hw_frames_ctx;
+ enum AVPixelFormat orig_dst_fmt = dst->format;
AVHWFramesContext *src_frames, *dst_frames;
HWMapDescriptor *hwmap;
int ret;
@@ -775,8 +831,10 @@ int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
src_frames->internal->hw_type->map_from) {
ret = src_frames->internal->hw_type->map_from(src_frames,
dst, src, flags);
- if (ret != AVERROR(ENOSYS))
+ if (ret >= 0)
return ret;
+ else if (ret != AVERROR(ENOSYS))
+ goto fail;
}
}
@@ -787,12 +845,30 @@ int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags)
dst_frames->internal->hw_type->map_to) {
ret = dst_frames->internal->hw_type->map_to(dst_frames,
dst, src, flags);
- if (ret != AVERROR(ENOSYS))
+ if (ret >= 0)
return ret;
+ else if (ret != AVERROR(ENOSYS))
+ goto fail;
}
}
return AVERROR(ENOSYS);
+
+fail:
+ // if the caller provided dst frames context, it should be preserved
+ // by this function
+ av_assert0(orig_dst_frames == NULL ||
+ orig_dst_frames == dst->hw_frames_ctx);
+
+ // preserve user-provided dst frame fields, but clean
+ // anything we might have set
+ dst->hw_frames_ctx = NULL;
+ av_frame_unref(dst);
+
+ dst->hw_frames_ctx = orig_dst_frames;
+ dst->format = orig_dst_fmt;
+
+ return ret;
}
int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx,
diff --git a/media/ffvpx/libavutil/hwcontext.h b/media/ffvpx/libavutil/hwcontext.h
index f5a4b62387..7ff08c8608 100644
--- a/media/ffvpx/libavutil/hwcontext.h
+++ b/media/ffvpx/libavutil/hwcontext.h
@@ -36,6 +36,7 @@ enum AVHWDeviceType {
AV_HWDEVICE_TYPE_DRM,
AV_HWDEVICE_TYPE_OPENCL,
AV_HWDEVICE_TYPE_MEDIACODEC,
+ AV_HWDEVICE_TYPE_VULKAN,
};
typedef struct AVHWDeviceInternal AVHWDeviceInternal;
@@ -248,7 +249,7 @@ const char *av_hwdevice_get_type_name(enum AVHWDeviceType type);
/**
* Iterate over supported device types.
*
- * @param type AV_HWDEVICE_TYPE_NONE initially, then the previous type
+ * @param prev AV_HWDEVICE_TYPE_NONE initially, then the previous type
* returned by this function in subsequent iterations.
* @return The next usable device type from enum AVHWDeviceType, or
* AV_HWDEVICE_TYPE_NONE if there are no more.
@@ -327,6 +328,26 @@ int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx,
enum AVHWDeviceType type,
AVBufferRef *src_ctx, int flags);
+/**
+ * Create a new device of the specified type from an existing device.
+ *
+ * This function performs the same action as av_hwdevice_ctx_create_derived,
+ * however, it is able to set options for the new device to be derived.
+ *
+ * @param dst_ctx On success, a reference to the newly-created
+ * AVHWDeviceContext.
+ * @param type The type of the new device to create.
+ * @param src_ctx A reference to an existing AVHWDeviceContext which will be
+ * used to create the new device.
+ * @param options Options for the new device to create, same format as in
+ * av_hwdevice_ctx_create.
+ * @param flags Currently unused; should be set to zero.
+ * @return Zero on success, a negative AVERROR code on failure.
+ */
+int av_hwdevice_ctx_create_derived_opts(AVBufferRef **dst_ctx,
+ enum AVHWDeviceType type,
+ AVBufferRef *src_ctx,
+ AVDictionary *options, int flags);
/**
* Allocate an AVHWFramesContext tied to a given device context.
@@ -550,6 +571,10 @@ enum {
* possible with the given arguments and hwframe setup, while other return
* values indicate that it failed somehow.
*
+ * On failure, the destination frame will be left blank, except for the
+ * hw_frames_ctx/format fields thay may have been set by the caller - those will
+ * be preserved as they were.
+ *
* @param dst Destination frame, to contain the mapping.
* @param src Source frame, to be mapped.
* @param flags Some combination of AV_HWFRAME_MAP_* flags.
@@ -566,6 +591,7 @@ int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags);
*
* @param derived_frame_ctx On success, a reference to the newly created
* AVHWFramesContext.
+ * @param format The AVPixelFormat for the derived context.
* @param derived_device_ctx A reference to the device to create the new
* AVHWFramesContext on.
* @param source_frame_ctx A reference to an existing AVHWFramesContext
diff --git a/media/ffvpx/libavutil/hwcontext_internal.h b/media/ffvpx/libavutil/hwcontext_internal.h
index 77dc47ddd6..e6266494ac 100644
--- a/media/ffvpx/libavutil/hwcontext_internal.h
+++ b/media/ffvpx/libavutil/hwcontext_internal.h
@@ -67,7 +67,8 @@ typedef struct HWContextType {
int (*device_create)(AVHWDeviceContext *ctx, const char *device,
AVDictionary *opts, int flags);
int (*device_derive)(AVHWDeviceContext *dst_ctx,
- AVHWDeviceContext *src_ctx, int flags);
+ AVHWDeviceContext *src_ctx,
+ AVDictionary *opts, int flags);
int (*device_init)(AVHWDeviceContext *ctx);
void (*device_uninit)(AVHWDeviceContext *ctx);
@@ -172,5 +173,6 @@ extern const HWContextType ff_hwcontext_type_vaapi;
extern const HWContextType ff_hwcontext_type_vdpau;
extern const HWContextType ff_hwcontext_type_videotoolbox;
extern const HWContextType ff_hwcontext_type_mediacodec;
+extern const HWContextType ff_hwcontext_type_vulkan;
#endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/imgutils.c b/media/ffvpx/libavutil/imgutils.c
index c733cb5cf5..9ab5757cf6 100644
--- a/media/ffvpx/libavutil/imgutils.c
+++ b/media/ffvpx/libavutil/imgutils.c
@@ -108,45 +108,71 @@ int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int wi
return 0;
}
-int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
- uint8_t *ptr, const int linesizes[4])
+int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt,
+ int height, const ptrdiff_t linesizes[4])
{
- int i, total_size, size[4] = { 0 }, has_plane[4] = { 0 };
+ int i, has_plane[4] = { 0 };
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
- memset(data , 0, sizeof(data[0])*4);
+ memset(sizes , 0, sizeof(sizes[0])*4);
if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return AVERROR(EINVAL);
- data[0] = ptr;
- if (linesizes[0] > (INT_MAX - 1024) / height)
+ if (linesizes[0] > SIZE_MAX / height)
return AVERROR(EINVAL);
- size[0] = linesizes[0] * height;
+ sizes[0] = linesizes[0] * (size_t)height;
- if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
- desc->flags & FF_PSEUDOPAL) {
- data[1] = ptr + size[0]; /* palette is stored here as 256 32 bits words */
- return size[0] + 256 * 4;
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
+ sizes[1] = 256 * 4; /* palette is stored here as 256 32 bits words */
+ return 0;
}
for (i = 0; i < 4; i++)
has_plane[desc->comp[i].plane] = 1;
- total_size = size[0];
for (i = 1; i < 4 && has_plane[i]; i++) {
int h, s = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
- data[i] = data[i-1] + size[i-1];
h = (height + (1 << s) - 1) >> s;
- if (linesizes[i] > INT_MAX / h)
+ if (linesizes[i] > SIZE_MAX / h)
return AVERROR(EINVAL);
- size[i] = h * linesizes[i];
- if (total_size > INT_MAX - size[i])
+ sizes[i] = (size_t)h * linesizes[i];
+ }
+
+ return 0;
+}
+
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4])
+{
+ int i, ret;
+ ptrdiff_t linesizes1[4];
+ size_t sizes[4];
+
+ memset(data , 0, sizeof(data[0])*4);
+
+ for (i = 0; i < 4; i++)
+ linesizes1[i] = linesizes[i];
+
+ ret = av_image_fill_plane_sizes(sizes, pix_fmt, height, linesizes1);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - ret)
return AVERROR(EINVAL);
- total_size += size[i];
+ ret += sizes[i];
}
- return total_size;
+ if (!ptr)
+ return ret;
+
+ data[0] = ptr;
+ for (i = 1; i < 4 && sizes[i]; i++)
+ data[i] = data[i - 1] + sizes[i - 1];
+
+ return ret;
}
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
@@ -194,6 +220,8 @@ int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int i, ret;
+ ptrdiff_t linesizes1[4];
+ size_t total_size, sizes[4];
uint8_t *buf;
if (!desc)
@@ -204,28 +232,36 @@ int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
if ((ret = av_image_fill_linesizes(linesizes, pix_fmt, align>7 ? FFALIGN(w, 8) : w)) < 0)
return ret;
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 4; i++) {
linesizes[i] = FFALIGN(linesizes[i], align);
+ linesizes1[i] = linesizes[i];
+ }
- if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, NULL, linesizes)) < 0)
+ if ((ret = av_image_fill_plane_sizes(sizes, pix_fmt, h, linesizes1)) < 0)
return ret;
- buf = av_malloc(ret + align);
+ total_size = align;
+ for (i = 0; i < 4; i++) {
+ if (total_size > SIZE_MAX - sizes[i])
+ return AVERROR(EINVAL);
+ total_size += sizes[i];
+ }
+ buf = av_malloc(total_size);
if (!buf)
return AVERROR(ENOMEM);
if ((ret = av_image_fill_pointers(pointers, pix_fmt, h, buf, linesizes)) < 0) {
av_free(buf);
return ret;
}
- if (desc->flags & AV_PIX_FMT_FLAG_PAL || (desc->flags & FF_PSEUDOPAL && pointers[1])) {
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
if (align < 4) {
av_log(NULL, AV_LOG_ERROR, "Formats with a palette require a minimum alignment of 4\n");
+ av_free(buf);
return AVERROR(EINVAL);
}
}
- if ((desc->flags & AV_PIX_FMT_FLAG_PAL ||
- desc->flags & FF_PSEUDOPAL) && pointers[1] &&
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL && pointers[1] &&
pointers[1] - pointers[0] > linesizes[0] * h) {
/* zero-initialize the padding before the palette */
memset(pointers[0] + linesizes[0] * h, 0,
@@ -320,9 +356,9 @@ static void image_copy_plane(uint8_t *dst, ptrdiff_t dst_linesize,
}
}
-static void image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
- const uint8_t *src, ptrdiff_t src_linesize,
- ptrdiff_t bytewidth, int height)
+void av_image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height)
{
int ret = -1;
@@ -353,8 +389,7 @@ static void image_copy(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4],
if (!desc || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return;
- if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
- desc->flags & FF_PSEUDOPAL) {
+ if (desc->flags & AV_PIX_FMT_FLAG_PAL) {
copy_plane(dst_data[0], dst_linesizes[0],
src_data[0], src_linesizes[0],
width, height);
@@ -405,7 +440,7 @@ void av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4
enum AVPixelFormat pix_fmt, int width, int height)
{
image_copy(dst_data, dst_linesizes, src_data, src_linesizes, pix_fmt,
- width, height, image_copy_plane_uc_from);
+ width, height, av_image_copy_plane_uc_from);
}
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
@@ -431,9 +466,10 @@ int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt,
int width, int height, int align)
{
- uint8_t *data[4];
+ int ret, i;
int linesize[4];
- int ret;
+ ptrdiff_t aligned_linesize[4];
+ size_t sizes[4];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (!desc)
return AVERROR(EINVAL);
@@ -442,12 +478,24 @@ int av_image_get_buffer_size(enum AVPixelFormat pix_fmt,
if (ret < 0)
return ret;
- // do not include palette for these pseudo-paletted formats
- if (desc->flags & FF_PSEUDOPAL)
- return FFALIGN(width, align) * height;
+ ret = av_image_fill_linesizes(linesize, pix_fmt, width);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 4; i++)
+ aligned_linesize[i] = FFALIGN(linesize[i], align);
+
+ ret = av_image_fill_plane_sizes(sizes, pix_fmt, height, aligned_linesize);
+ if (ret < 0)
+ return ret;
- return av_image_fill_arrays(data, linesize, NULL, pix_fmt,
- width, height, align);
+ ret = 0;
+ for (i = 0; i < 4; i++) {
+ if (sizes[i] > INT_MAX - ret)
+ return AVERROR(EINVAL);
+ ret += sizes[i];
+ }
+ return ret;
}
int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
@@ -519,7 +567,6 @@ static void memset_bytes(uint8_t *dst, size_t dst_size, uint8_t *clear,
if (clear_size == 1) {
memset(dst, clear[0], dst_size);
- dst_size = 0;
} else {
if (clear_size > dst_size)
clear_size = dst_size;
diff --git a/media/ffvpx/libavutil/imgutils.h b/media/ffvpx/libavutil/imgutils.h
index 5b790ecf0a..e10ac14952 100644
--- a/media/ffvpx/libavutil/imgutils.h
+++ b/media/ffvpx/libavutil/imgutils.h
@@ -27,8 +27,10 @@
* @{
*/
-#include "avutil.h"
+#include <stddef.h>
+#include <stdint.h>
#include "pixdesc.h"
+#include "pixfmt.h"
#include "rational.h"
/**
@@ -46,6 +48,7 @@
* component in the plane with the max pixel step.
* @param max_pixstep_comps an array which is filled with the component
* for each plane which has the max pixel step. May be NULL.
+ * @param pixdesc the AVPixFmtDescriptor for the image, describing its format
*/
void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
const AVPixFmtDescriptor *pixdesc);
@@ -63,15 +66,35 @@ int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
* width width.
*
* @param linesizes array to be filled with the linesize for each plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param width width of the image in pixels
* @return >= 0 in case of success, a negative error code otherwise
*/
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
/**
+ * Fill plane sizes for an image with pixel format pix_fmt and height height.
+ *
+ * @param size the array to be filled with the size of each image plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param height height of the image in pixels
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return >= 0 in case of success, a negative error code otherwise
+ *
+ * @note The linesize parameters have the type ptrdiff_t here, while they are
+ * int for av_image_fill_linesizes().
+ */
+int av_image_fill_plane_sizes(size_t size[4], enum AVPixelFormat pix_fmt,
+ int height, const ptrdiff_t linesizes[4]);
+
+/**
* Fill plane data pointers for an image with pixel format pix_fmt and
* height height.
*
* @param data pointers array to be filled with the pointer for each image plane
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param height height of the image in pixels
* @param ptr the pointer to a buffer which will contain the image
* @param linesizes the array containing the linesize for each
* plane, should be filled by av_image_fill_linesizes()
@@ -87,6 +110,11 @@ int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int hei
* The allocated image buffer has to be freed by using
* av_freep(&pointers[0]).
*
+ * @param pointers array to be filled with the pointer for each image plane
+ * @param linesizes the array filled with the linesize for each plane
+ * @param w width of the image in pixels
+ * @param h height of the image in pixels
+ * @param pix_fmt the AVPixelFormat of the image
* @param align the value to use for buffer size alignment
* @return the size in bytes required for the image buffer, a negative
* error code in case of failure
@@ -103,18 +131,44 @@ int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
* bytewidth must be contained by both absolute values of dst_linesize
* and src_linesize, otherwise the function behavior is undefined.
*
+ * @param dst destination plane to copy to
* @param dst_linesize linesize for the image plane in dst
+ * @param src source plane to copy from
* @param src_linesize linesize for the image plane in src
+ * @param height height (number of lines) of the plane
*/
void av_image_copy_plane(uint8_t *dst, int dst_linesize,
const uint8_t *src, int src_linesize,
int bytewidth, int height);
/**
+ * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where
+ * available, this function will use special functionality for reading from such
+ * memory, which may result in greatly improved performance compared to plain
+ * av_image_copy_plane().
+ *
+ * bytewidth must be contained by both absolute values of dst_linesize
+ * and src_linesize, otherwise the function behavior is undefined.
+ *
+ * @note The linesize parameters have the type ptrdiff_t here, while they are
+ * int for av_image_copy_plane().
+ * @note On x86, the linesizes currently need to be aligned to the cacheline
+ * size (i.e. 64) to get improved performance.
+ */
+void av_image_copy_plane_uc_from(uint8_t *dst, ptrdiff_t dst_linesize,
+ const uint8_t *src, ptrdiff_t src_linesize,
+ ptrdiff_t bytewidth, int height);
+
+/**
* Copy image in src_data to dst_data.
*
+ * @param dst_data destination image data buffer to copy to
* @param dst_linesizes linesizes for the image in dst_data
+ * @param src_data source image data buffer to copy from
* @param src_linesizes linesizes for the image in src_data
+ * @param pix_fmt the AVPixelFormat of the image
+ * @param width width of the image in pixels
+ * @param height height of the image in pixels
*/
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
const uint8_t *src_data[4], const int src_linesizes[4],
diff --git a/media/ffvpx/libavutil/integer.c b/media/ffvpx/libavutil/integer.c
index 78e252fbde..ae87c467b2 100644
--- a/media/ffvpx/libavutil/integer.c
+++ b/media/ffvpx/libavutil/integer.c
@@ -25,9 +25,11 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
-#include "common.h"
+#include <string.h>
+
#include "integer.h"
#include "avassert.h"
+#include "intmath.h"
static const AVInteger zero_i;
@@ -101,8 +103,8 @@ AVInteger av_shr_i(AVInteger a, int s){
for(i=0; i<AV_INTEGER_SIZE; i++){
unsigned int index= i + (s>>4);
unsigned int v=0;
- if(index+1<AV_INTEGER_SIZE) v = a.v[index+1]<<16;
- if(index <AV_INTEGER_SIZE) v+= a.v[index ];
+ if (index + 1 < AV_INTEGER_SIZE) v = a.v[index + 1] * (1U << 16);
+ if (index < AV_INTEGER_SIZE) v |= a.v[index];
out.v[i]= v >> (s&15);
}
return out;
@@ -156,11 +158,9 @@ AVInteger av_int2i(int64_t a){
}
int64_t av_i2int(AVInteger a){
- int i;
- int64_t out=(int8_t)a.v[AV_INTEGER_SIZE-1];
+ uint64_t out = a.v[3];
- for(i= AV_INTEGER_SIZE-2; i>=0; i--){
- out = (out<<16) + a.v[i];
- }
+ for (int i = 2; i >= 0; i--)
+ out = (out << 16) | a.v[i];
return out;
}
diff --git a/media/ffvpx/libavutil/integer.h b/media/ffvpx/libavutil/integer.h
index 45f733c04c..2d9b5bb10f 100644
--- a/media/ffvpx/libavutil/integer.h
+++ b/media/ffvpx/libavutil/integer.h
@@ -29,7 +29,7 @@
#define AVUTIL_INTEGER_H
#include <stdint.h>
-#include "common.h"
+#include "attributes.h"
#define AV_INTEGER_SIZE 8
diff --git a/media/ffvpx/libavutil/internal.h b/media/ffvpx/libavutil/internal.h
index 3a5d1c7bc6..454c59aa50 100644
--- a/media/ffvpx/libavutil/internal.h
+++ b/media/ffvpx/libavutil/internal.h
@@ -37,15 +37,12 @@
#include <stdint.h>
#include <stddef.h>
#include <assert.h>
+#include <stdio.h>
#include "config.h"
#include "attributes.h"
#include "timer.h"
-#include "cpu.h"
-#include "dict.h"
#include "macros.h"
-#include "mem.h"
#include "pixfmt.h"
-#include "version.h"
#if ARCH_X86
# include "x86/emms.h"
@@ -88,14 +85,6 @@
#define FF_MEMORY_POISON 0x2a
-#define MAKE_ACCESSORS(str, name, type, field) \
- type av_##name##_get_##field(const str *s) { return s->field; } \
- void av_##name##_set_##field(str *s, type v) { s->field = v; }
-
-// Some broken preprocessors need a second expansion
-// to be forced to tokenize __VA_ARGS__
-#define E1(x) x
-
/* Check if the hard coded offset of a struct member still matches reality.
* Induce a compilation failure if not.
*/
@@ -103,84 +92,22 @@
int x_##o[offsetof(s, m) == o? 1: -1]; \
}
-#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
- uint8_t la_##v[sizeof(t s o) + (a)]; \
- t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
-
-#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
- DECLARE_ALIGNED(a, t, la_##v) s o; \
- t (*v) o = la_##v
-
-#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
-
-#if HAVE_LOCAL_ALIGNED
-# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
-#else
-# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
-#endif
-
-#if HAVE_LOCAL_ALIGNED
-# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
-#else
-# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
-#endif
-
-#if HAVE_LOCAL_ALIGNED
-# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
-#else
-# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
-#endif
-#if HAVE_LOCAL_ALIGNED
-# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
-#else
-# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
-#endif
-
-#define FF_ALLOC_OR_GOTO(ctx, p, size, label)\
-{\
- p = av_malloc(size);\
- if (!(p) && (size) != 0) {\
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
- goto label;\
- }\
-}
-
-#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)\
-{\
- p = av_mallocz(size);\
- if (!(p) && (size) != 0) {\
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
- goto label;\
- }\
-}
-
-#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)\
-{\
- p = av_malloc_array(nelem, elsize);\
- if (!p) {\
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
- goto label;\
- }\
-}
-
-#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)\
-{\
- p = av_mallocz_array(nelem, elsize);\
- if (!p) {\
- av_log(ctx, AV_LOG_ERROR, "Cannot allocate memory.\n");\
- goto label;\
- }\
-}
+#define FF_ALLOC_TYPED_ARRAY(p, nelem) (p = av_malloc_array(nelem, sizeof(*p)))
+#define FF_ALLOCZ_TYPED_ARRAY(p, nelem) (p = av_calloc(nelem, sizeof(*p)))
#define FF_PTR_ADD(ptr, off) ((off) ? (ptr) + (off) : (ptr))
+/**
+ * Access a field in a structure by its offset.
+ */
+#define FF_FIELD_AT(type, off, obj) (*(type *)((char *)&(obj) + (off)))
+
#include "libm.h"
/**
* Return NULL if CONFIG_SMALL is true, otherwise the argument
- * without modification. Used to disable the definition of strings
- * (for example AVCodec long_names).
+ * without modification. Used to disable the definition of strings.
*/
#if CONFIG_SMALL
# define NULL_IF_CONFIG_SMALL(x) NULL
@@ -189,45 +116,6 @@
#endif
/**
- * Define a function with only the non-default version specified.
- *
- * On systems with ELF shared libraries, all symbols exported from
- * FFmpeg libraries are tagged with the name and major version of the
- * library to which they belong. If a function is moved from one
- * library to another, a wrapper must be retained in the original
- * location to preserve binary compatibility.
- *
- * Functions defined with this macro will never be used to resolve
- * symbols by the build-time linker.
- *
- * @param type return type of function
- * @param name name of function
- * @param args argument list of function
- * @param ver version tag to assign function
- */
-#if HAVE_SYMVER_ASM_LABEL
-# define FF_SYMVER(type, name, args, ver) \
- type ff_##name args __asm__ (EXTERN_PREFIX #name "@" ver); \
- type ff_##name args
-#elif HAVE_SYMVER_GNU_ASM
-# define FF_SYMVER(type, name, args, ver) \
- __asm__ (".symver ff_" #name "," EXTERN_PREFIX #name "@" ver); \
- type ff_##name args; \
- type ff_##name args
-#endif
-
-/**
- * Return NULL if a threading library has not been enabled.
- * Used to disable threading functions in AVCodec definitions
- * when not needed.
- */
-#if HAVE_THREADS
-# define ONLY_IF_THREADS_ENABLED(x) x
-#else
-# define ONLY_IF_THREADS_ENABLED(x) NULL
-#endif
-
-/**
* Log a generic warning message about a missing feature.
*
* @param[in] avc a pointer to an arbitrary struct of which the first
@@ -255,8 +143,6 @@ void avpriv_request_sample(void *avc,
#pragma comment(linker, "/include:" EXTERN_PREFIX "avpriv_snprintf")
#endif
-#define avpriv_open ff_open
-#define avpriv_tempfile ff_tempfile
#define PTRDIFF_SPECIFIER "Id"
#define SIZE_SPECIFIER "Iu"
#else
@@ -270,6 +156,12 @@ void avpriv_request_sample(void *avc,
# define ff_dlog(ctx, ...) do { if (0) av_log(ctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
#endif
+#ifdef TRACE
+# define ff_tlog(ctx, ...) av_log(ctx, AV_LOG_TRACE, __VA_ARGS__)
+#else
+# define ff_tlog(ctx, ...) do { } while(0)
+#endif
+
// For debuging we use signed operations so overflows can be detected (by ubsan)
// For production we use unsigned so there are no undefined operations
#ifdef CHECKED
@@ -280,61 +172,6 @@ void avpriv_request_sample(void *avc,
#define SUINT32 uint32_t
#endif
-/**
- * Clip and convert a double value into the long long amin-amax range.
- * This function is needed because conversion of floating point to integers when
- * it does not fit in the integer's representation does not necessarily saturate
- * correctly (usually converted to a cvttsd2si on x86) which saturates numbers
- * > INT64_MAX to INT64_MIN. The standard marks such conversions as undefined
- * behavior, allowing this sort of mathematically bogus conversions. This provides
- * a safe alternative that is slower obviously but assures safety and better
- * mathematical behavior.
- * @param a value to clip
- * @param amin minimum value of the clip range
- * @param amax maximum value of the clip range
- * @return clipped value
- */
-static av_always_inline av_const int64_t ff_rint64_clip(double a, int64_t amin, int64_t amax)
-{
- int64_t res;
-#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
- if (amin > amax) abort();
-#endif
- // INT64_MAX+1,INT64_MIN are exactly representable as IEEE doubles
- // do range checks first
- if (a >= 9223372036854775808.0)
- return amax;
- if (a <= -9223372036854775808.0)
- return amin;
-
- // safe to call llrint and clip accordingly
- res = llrint(a);
- if (res > amax)
- return amax;
- if (res < amin)
- return amin;
- return res;
-}
-
-/**
- * A wrapper for open() setting O_CLOEXEC.
- */
-av_warn_unused_result
-int avpriv_open(const char *filename, int flags, ...);
-
-/**
- * Wrapper to work around the lack of mkstemp() on mingw.
- * Also, tries to create file in /tmp first, if possible.
- * *prefix can be a character constant; *filename will be allocated internally.
- * @return file descriptor of opened file (or negative value corresponding to an
- * AVERROR code on error)
- * and opened file name in **filename.
- * @note On very old libcs it is necessary to set a secure umask before
- * calling this, av_tempfile() can't call umask itself as it is used in
- * libraries and could interfere with the calling application.
- */
-int avpriv_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);
-
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
static av_always_inline av_const int avpriv_mirror(int x, int w)
@@ -350,25 +187,4 @@ static av_always_inline av_const int avpriv_mirror(int x, int w)
return x;
}
-void ff_check_pixfmt_descriptors(void);
-
-/**
- * Set a dictionary value to an ISO-8601 compliant timestamp string.
- *
- * @param s AVFormatContext
- * @param key metadata key
- * @param timestamp unix timestamp in microseconds
- * @return <0 on error
- */
-int avpriv_dict_set_timestamp(AVDictionary **dict, const char *key, int64_t timestamp);
-
-// Helper macro for AV_PIX_FMT_FLAG_PSEUDOPAL deprecation. Code inside FFmpeg
-// should always use FF_PSEUDOPAL. Once the public API flag gets removed, all
-// code using it is dead code.
-#if FF_API_PSEUDOPAL
-#define FF_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL
-#else
-#define FF_PSEUDOPAL 0
-#endif
-
#endif /* AVUTIL_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/intmath.h b/media/ffvpx/libavutil/intmath.h
index 9573109e9d..c54d23b7bf 100644
--- a/media/ffvpx/libavutil/intmath.h
+++ b/media/ffvpx/libavutil/intmath.h
@@ -28,8 +28,9 @@
#if ARCH_ARM
# include "arm/intmath.h"
-#endif
-#if ARCH_X86
+#elif ARCH_RISCV
+# include "riscv/intmath.h"
+#elif ARCH_X86
# include "x86/intmath.h"
#endif
diff --git a/media/ffvpx/libavutil/lls.c b/media/ffvpx/libavutil/lls.c
index 0560b6a79c..c1e038daf1 100644
--- a/media/ffvpx/libavutil/lls.c
+++ b/media/ffvpx/libavutil/lls.c
@@ -28,9 +28,8 @@
#include <math.h>
#include <string.h>
+#include "config.h"
#include "attributes.h"
-#include "internal.h"
-#include "version.h"
#include "lls.h"
static void update_lls(LLSModel *m, const double *var)
@@ -118,6 +117,7 @@ av_cold void avpriv_init_lls(LLSModel *m, int indep_count)
m->indep_count = indep_count;
m->update_lls = update_lls;
m->evaluate_lls = evaluate_lls;
- if (ARCH_X86)
- ff_init_lls_x86(m);
+#if ARCH_X86
+ ff_init_lls_x86(m);
+#endif
}
diff --git a/media/ffvpx/libavutil/lls.h b/media/ffvpx/libavutil/lls.h
index 1a276d537d..0709275822 100644
--- a/media/ffvpx/libavutil/lls.h
+++ b/media/ffvpx/libavutil/lls.h
@@ -24,8 +24,7 @@
#define AVUTIL_LLS_H
#include "macros.h"
-#include "mem.h"
-#include "version.h"
+#include "mem_internal.h"
#define MAX_VARS 32
#define MAX_VARS_ALIGN FFALIGN(MAX_VARS+1,4)
diff --git a/media/ffvpx/libavutil/log.c b/media/ffvpx/libavutil/log.c
index 93a156b8e4..5948e50467 100644
--- a/media/ffvpx/libavutil/log.c
+++ b/media/ffvpx/libavutil/log.c
@@ -32,9 +32,11 @@
#if HAVE_IO_H
#include <io.h>
#endif
+#include <inttypes.h>
#include <stdarg.h>
+#include <stdio.h>
#include <stdlib.h>
-#include "avutil.h"
+#include <string.h>
#include "bprint.h"
#include "common.h"
#include "internal.h"
@@ -55,7 +57,7 @@ static int av_log_level = AV_LOG_INFO;
static int flags;
#define NB_LEVELS 8
-#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
#include <windows.h>
static const uint8_t color[16 + AV_CLASS_CATEGORY_NB] = {
[AV_LOG_PANIC /8] = 12,
@@ -120,50 +122,68 @@ static const uint32_t color[16 + AV_CLASS_CATEGORY_NB] = {
#endif
static int use_color = -1;
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+static void win_console_puts(const char *str)
+{
+ const uint8_t *q = str;
+ uint16_t line[LINE_SZ];
+
+ while (*q) {
+ uint16_t *buf = line;
+ DWORD nb_chars = 0;
+ DWORD written;
+
+ while (*q && nb_chars < LINE_SZ - 1) {
+ uint32_t ch;
+ uint16_t tmp;
+
+ GET_UTF8(ch, *q ? *q++ : 0, ch = 0xfffd; goto continue_on_invalid;)
+continue_on_invalid:
+ PUT_UTF16(ch, tmp, *buf++ = tmp; nb_chars++;)
+ }
+
+ WriteConsoleW(con, line, nb_chars, &written, NULL);
+ }
+}
+#endif
+
static void check_color_terminal(void)
{
-#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE
+ char *term = getenv("TERM");
+
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
CONSOLE_SCREEN_BUFFER_INFO con_info;
+ DWORD dummy;
con = GetStdHandle(STD_ERROR_HANDLE);
- use_color = (con != INVALID_HANDLE_VALUE) && !getenv("NO_COLOR") &&
- !getenv("AV_LOG_FORCE_NOCOLOR");
- if (use_color) {
+ if (con != INVALID_HANDLE_VALUE && !GetConsoleMode(con, &dummy))
+ con = INVALID_HANDLE_VALUE;
+ if (con != INVALID_HANDLE_VALUE) {
GetConsoleScreenBufferInfo(con, &con_info);
attr_orig = con_info.wAttributes;
background = attr_orig & 0xF0;
}
+#endif
+
+ if (getenv("AV_LOG_FORCE_NOCOLOR")) {
+ use_color = 0;
+ } else if (getenv("AV_LOG_FORCE_COLOR")) {
+ use_color = 1;
+ } else {
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+ use_color = (con != INVALID_HANDLE_VALUE);
#elif HAVE_ISATTY
- char *term = getenv("TERM");
- use_color = !getenv("NO_COLOR") && !getenv("AV_LOG_FORCE_NOCOLOR") &&
- (getenv("TERM") && isatty(2) || getenv("AV_LOG_FORCE_COLOR"));
- if ( getenv("AV_LOG_FORCE_256COLOR")
- || (term && strstr(term, "256color")))
- use_color *= 256;
+ use_color = (term && isatty(2));
#else
- use_color = getenv("AV_LOG_FORCE_COLOR") && !getenv("NO_COLOR") &&
- !getenv("AV_LOG_FORCE_NOCOLOR");
+ use_color = 0;
#endif
+ }
+
+ if (getenv("AV_LOG_FORCE_256COLOR") || term && strstr(term, "256color"))
+ use_color *= 256;
}
-static void colored_fputs(int level, int tint, const char *str)
+static void ansi_fputs(int level, int tint, const char *str, int local_use_color)
{
- int local_use_color;
- if (!*str)
- return;
-
- if (use_color < 0)
- check_color_terminal();
-
- if (level == AV_LOG_INFO/8) local_use_color = 0;
- else local_use_color = use_color;
-
-#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE
- if (local_use_color)
- SetConsoleTextAttribute(con, background | color[level]);
- fputs(str, stderr);
- if (local_use_color)
- SetConsoleTextAttribute(con, attr_orig);
-#else
if (local_use_color == 1) {
fprintf(stderr,
"\033[%"PRIu32";3%"PRIu32"m%s\033[0m",
@@ -184,6 +204,32 @@ static void colored_fputs(int level, int tint, const char *str)
str);
} else
fputs(str, stderr);
+}
+
+static void colored_fputs(int level, int tint, const char *str)
+{
+ int local_use_color;
+ if (!*str)
+ return;
+
+ if (use_color < 0)
+ check_color_terminal();
+
+ if (level == AV_LOG_INFO/8) local_use_color = 0;
+ else local_use_color = use_color;
+
+#if defined(_WIN32) && HAVE_SETCONSOLETEXTATTRIBUTE && HAVE_GETSTDHANDLE
+ if (con != INVALID_HANDLE_VALUE) {
+ if (local_use_color)
+ SetConsoleTextAttribute(con, background | color[level]);
+ win_console_puts(str);
+ if (local_use_color)
+ SetConsoleTextAttribute(con, attr_orig);
+ } else {
+ ansi_fputs(level, tint, str, local_use_color);
+ }
+#else
+ ansi_fputs(level, tint, str, local_use_color);
#endif
}
@@ -226,6 +272,8 @@ static const char *get_level_str(int level)
return "quiet";
case AV_LOG_DEBUG:
return "debug";
+ case AV_LOG_TRACE:
+ return "trace";
case AV_LOG_VERBOSE:
return "verbose";
case AV_LOG_INFO:
@@ -360,19 +408,28 @@ static void (*av_log_callback)(void*, int, const char*, va_list) =
void av_log(void* avcl, int level, const char *fmt, ...)
{
- AVClass* avc = avcl ? *(AVClass **) avcl : NULL;
va_list vl;
va_start(vl, fmt);
- if (avc && avc->version >= (50 << 16 | 15 << 8 | 2) &&
- avc->log_level_offset_offset && level >= AV_LOG_FATAL)
- level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset);
av_vlog(avcl, level, fmt, vl);
va_end(vl);
}
+void av_log_once(void* avcl, int initial_level, int subsequent_level, int *state, const char *fmt, ...)
+{
+ va_list vl;
+ va_start(vl, fmt);
+ av_vlog(avcl, *state ? subsequent_level : initial_level, fmt, vl);
+ va_end(vl);
+ *state = 1;
+}
+
void av_vlog(void* avcl, int level, const char *fmt, va_list vl)
{
+ AVClass* avc = avcl ? *(AVClass **) avcl : NULL;
void (*log_callback)(void*, int, const char*, va_list) = av_log_callback;
+ if (avc && avc->version >= (50 << 16 | 15 << 8 | 2) &&
+ avc->log_level_offset_offset && level >= AV_LOG_FATAL)
+ level += *(int *) (((uint8_t *) avcl) + avc->log_level_offset_offset);
if (log_callback)
log_callback(avcl, level, fmt, vl);
}
@@ -412,7 +469,7 @@ static void missing_feature_sample(int sample, void *avc, const char *msg,
"been implemented.\n");
if (sample)
av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
- "of this file to ftp://upload.ffmpeg.org/incoming/ "
+ "of this file to https://streams.videolan.org/upload/ "
"and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n");
}
diff --git a/media/ffvpx/libavutil/log.h b/media/ffvpx/libavutil/log.h
index d9554e609d..ab7ceabe22 100644
--- a/media/ffvpx/libavutil/log.h
+++ b/media/ffvpx/libavutil/log.h
@@ -22,7 +22,6 @@
#define AVUTIL_LOG_H
#include <stdarg.h>
-#include "avutil.h"
#include "attributes.h"
#include "version.h"
@@ -108,21 +107,6 @@ typedef struct AVClass {
int parent_log_context_offset;
/**
- * Return next AVOptions-enabled child or NULL
- */
- void* (*child_next)(void *obj, void *prev);
-
- /**
- * Return an AVClass corresponding to the next potential
- * AVOptions-enabled child.
- *
- * The difference between child_next and this is that
- * child_next iterates over _already existing_ objects, while
- * child_class_next iterates over _all possible_ children.
- */
- const struct AVClass* (*child_class_next)(const struct AVClass *prev);
-
- /**
* Category used for visualization (like color)
* This is only set if the category is equal for all objects using this class.
* available since version (51 << 16 | 56 << 8 | 100)
@@ -140,6 +124,26 @@ typedef struct AVClass {
* available since version (52.12)
*/
int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Iterate over the AVClasses corresponding to potential AVOptions-enabled
+ * children.
+ *
+ * @param iter pointer to opaque iteration state. The caller must initialize
+ * *iter to NULL before the first call.
+ * @return AVClass for the next AVOptions-enabled child or NULL if there are
+ * no more such children.
+ *
+ * @note The difference between child_next and this is that child_next
+ * iterates over _already existing_ objects, while child_class_iterate
+ * iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_iterate)(void **iter);
} AVClass;
/**
@@ -233,6 +237,27 @@ typedef struct AVClass {
*/
void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+/**
+ * Send the specified message to the log once with the initial_level and then with
+ * the subsequent_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param initial_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" for the first occurance.
+ * @param subsequent_level importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant" after the first occurance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param state a variable to keep trak of if a message has already been printed
+ * this must be initialized to 0 before the first use. The same state
+ * must not be accessed by 2 Threads simultaneously.
+ */
+void av_log_once(void* avcl, int initial_level, int subsequent_level, int *state, const char *fmt, ...) av_printf_format(5, 6);
+
/**
* Send the specified message to the log if the level is less than or equal
diff --git a/media/ffvpx/libavutil/macros.h b/media/ffvpx/libavutil/macros.h
index 2007ee5619..2a7567c3ea 100644
--- a/media/ffvpx/libavutil/macros.h
+++ b/media/ffvpx/libavutil/macros.h
@@ -25,6 +25,36 @@
#ifndef AVUTIL_MACROS_H
#define AVUTIL_MACROS_H
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+/**
+ * Comparator.
+ * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
+ * if x == y. This is useful for instance in a qsort comparator callback.
+ * Furthermore, compilers are able to optimize this to branchless code, and
+ * there is no risk of overflow with signed types.
+ * As with many macros, this evaluates its argument multiple times, it thus
+ * must not have a side-effect.
+ */
+#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
/**
* @addtogroup preproc_misc Preprocessor String Macros
*
diff --git a/media/ffvpx/libavutil/mathematics.c b/media/ffvpx/libavutil/mathematics.c
index 1bf044cdf1..b878317d63 100644
--- a/media/ffvpx/libavutil/mathematics.c
+++ b/media/ffvpx/libavutil/mathematics.c
@@ -26,11 +26,11 @@
#include <stdint.h>
#include <limits.h>
+#include "avutil.h"
#include "mathematics.h"
#include "libavutil/intmath.h"
#include "libavutil/common.h"
#include "avassert.h"
-#include "version.h"
/* Stein's binary GCD algorithm:
* https://en.wikipedia.org/wiki/Binary_GCD_algorithm */
@@ -148,7 +148,7 @@ int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
{
int64_t a = tb_a.num * (int64_t)tb_b.den;
int64_t b = tb_b.num * (int64_t)tb_a.den;
- if ((FFABS(ts_a)|a|FFABS(ts_b)|b) <= INT_MAX)
+ if ((FFABS64U(ts_a)|a|FFABS64U(ts_b)|b) <= INT_MAX)
return (ts_a*a > ts_b*b) - (ts_a*a < ts_b*b);
if (av_rescale_rnd(ts_a, a, b, AV_ROUND_DOWN) < ts_b)
return -1;
@@ -198,7 +198,7 @@ int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t i
m = inc_tb.num * (int64_t)ts_tb.den;
d = inc_tb.den * (int64_t)ts_tb.num;
- if (m % d == 0)
+ if (m % d == 0 && ts <= INT64_MAX - m / d)
return ts + m / d;
if (m < d)
return ts;
@@ -206,6 +206,10 @@ int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t i
{
int64_t old = av_rescale_q(ts, ts_tb, inc_tb);
int64_t old_ts = av_rescale_q(old, inc_tb, ts_tb);
- return av_rescale_q(old + 1, inc_tb, ts_tb) + (ts - old_ts);
+
+ if (old == INT64_MAX || old == AV_NOPTS_VALUE || old_ts == AV_NOPTS_VALUE)
+ return ts;
+
+ return av_sat_add64(av_rescale_q(old + 1, inc_tb, ts_tb), ts - old_ts);
}
}
diff --git a/media/ffvpx/libavutil/mathematics.h b/media/ffvpx/libavutil/mathematics.h
index 64d4137a60..e4aff1e973 100644
--- a/media/ffvpx/libavutil/mathematics.h
+++ b/media/ffvpx/libavutil/mathematics.h
@@ -111,7 +111,8 @@ enum AVRounding {
/**
* Compute the greatest common divisor of two integer operands.
*
- * @param a,b Operands
+ * @param a Operand
+ * @param b Operand
* @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0;
* if a == 0 and b == 0, returns 0.
*/
@@ -186,7 +187,8 @@ int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
* av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02)
* @endcode
*
- * @param a,b Operands
+ * @param a Operand
+ * @param b Operand
* @param mod Divisor; must be a power of 2
* @return
* - a negative value if `a % mod < b % mod`
diff --git a/media/ffvpx/libavutil/mem.c b/media/ffvpx/libavutil/mem.c
index 88fe09b179..36b8940a0c 100644
--- a/media/ffvpx/libavutil/mem.c
+++ b/media/ffvpx/libavutil/mem.c
@@ -31,16 +31,19 @@
#include <limits.h>
#include <stdint.h>
#include <stdlib.h>
+#include <stdatomic.h>
#include <string.h>
#if HAVE_MALLOC_H
#include <malloc.h>
#endif
+#include "attributes.h"
#include "avassert.h"
-#include "avutil.h"
-#include "common.h"
#include "dynarray.h"
+#include "error.h"
+#include "internal.h"
#include "intreadwrite.h"
+#include "macros.h"
#include "mem.h"
#ifdef MALLOC_PREFIX
@@ -59,8 +62,6 @@ void free(void *ptr);
#endif /* MALLOC_PREFIX */
-#include "mem_internal.h"
-
#define ALIGN (HAVE_AVX512 ? 64 : (HAVE_AVX ? 32 : 16))
/* NOTE: if you want to override these functions with your own
@@ -68,18 +69,35 @@ void free(void *ptr);
* dynamic libraries and remove -Wl,-Bsymbolic from the linker flags.
* Note that this will cost performance. */
-static size_t max_alloc_size= INT_MAX;
+static atomic_size_t max_alloc_size = ATOMIC_VAR_INIT(INT_MAX);
void av_max_alloc(size_t max){
- max_alloc_size = max;
+ atomic_store_explicit(&max_alloc_size, max, memory_order_relaxed);
+}
+
+static int size_mult(size_t a, size_t b, size_t *r)
+{
+ size_t t;
+
+#if (!defined(__INTEL_COMPILER) && AV_GCC_VERSION_AT_LEAST(5,1)) || AV_HAS_BUILTIN(__builtin_mul_overflow)
+ if (__builtin_mul_overflow(a, b, &t))
+ return AVERROR(EINVAL);
+#else
+ t = a * b;
+ /* Hack inspired from glibc: don't try the division if nelem and elsize
+ * are both less than sqrt(SIZE_MAX). */
+ if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
+ return AVERROR(EINVAL);
+#endif
+ *r = t;
+ return 0;
}
void *av_malloc(size_t size)
{
void *ptr = NULL;
- /* let's disallow possibly ambiguous cases */
- if (size > (max_alloc_size - 32))
+ if (size > atomic_load_explicit(&max_alloc_size, memory_order_relaxed))
return NULL;
#if HAVE_POSIX_MEMALIGN
@@ -134,15 +152,20 @@ void *av_malloc(size_t size)
void *av_realloc(void *ptr, size_t size)
{
- /* let's disallow possibly ambiguous cases */
- if (size > (max_alloc_size - 32))
+ void *ret;
+ if (size > atomic_load_explicit(&max_alloc_size, memory_order_relaxed))
return NULL;
#if HAVE_ALIGNED_MALLOC
- return _aligned_realloc(ptr, size + !size, ALIGN);
+ ret = _aligned_realloc(ptr, size + !size, ALIGN);
#else
- return realloc(ptr, size + !size);
+ ret = realloc(ptr, size + !size);
+#endif
+#if CONFIG_MEMORY_POISONING
+ if (ret && !ptr)
+ memset(ret, FF_MEMORY_POISON, size);
#endif
+ return ret;
}
void *av_realloc_f(void *ptr, size_t nelem, size_t elsize)
@@ -150,7 +173,7 @@ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize)
size_t size;
void *r;
- if (av_size_mult(elsize, nelem, &size)) {
+ if (size_mult(elsize, nelem, &size)) {
av_free(ptr);
return NULL;
}
@@ -183,23 +206,18 @@ int av_reallocp(void *ptr, size_t size)
void *av_malloc_array(size_t nmemb, size_t size)
{
- if (!size || nmemb >= INT_MAX / size)
- return NULL;
- return av_malloc(nmemb * size);
-}
-
-void *av_mallocz_array(size_t nmemb, size_t size)
-{
- if (!size || nmemb >= INT_MAX / size)
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
return NULL;
- return av_mallocz(nmemb * size);
+ return av_malloc(result);
}
void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
{
- if (!size || nmemb >= INT_MAX / size)
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
return NULL;
- return av_realloc(ptr, nmemb * size);
+ return av_realloc(ptr, result);
}
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
@@ -243,9 +261,10 @@ void *av_mallocz(size_t size)
void *av_calloc(size_t nmemb, size_t size)
{
- if (size <= 0 || nmemb >= INT_MAX / size)
+ size_t result;
+ if (size_mult(nmemb, size, &result) < 0)
return NULL;
- return av_mallocz(nmemb * size);
+ return av_mallocz(result);
}
char *av_strdup(const char *s)
@@ -475,15 +494,21 @@ void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
{
+ size_t max_size;
+
if (min_size <= *size)
return ptr;
- if (min_size > max_alloc_size - 32) {
+ max_size = atomic_load_explicit(&max_alloc_size, memory_order_relaxed);
+ /* *size is an unsigned, so the real maximum is <= UINT_MAX. */
+ max_size = FFMIN(max_size, UINT_MAX);
+
+ if (min_size > max_size) {
*size = 0;
return NULL;
}
- min_size = FFMIN(max_alloc_size - 32, FFMAX(min_size + min_size / 16 + 32, min_size));
+ min_size = FFMIN(max_size, FFMAX(min_size + min_size / 16 + 32, min_size));
ptr = av_realloc(ptr, min_size);
/* we could set this to the unmodified min_size but this is safer
@@ -497,12 +522,47 @@ void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
return ptr;
}
+static inline void fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc)
+{
+ size_t max_size;
+ void *val;
+
+ memcpy(&val, ptr, sizeof(val));
+ if (min_size <= *size) {
+ av_assert0(val || !min_size);
+ return;
+ }
+
+ max_size = atomic_load_explicit(&max_alloc_size, memory_order_relaxed);
+ /* *size is an unsigned, so the real maximum is <= UINT_MAX. */
+ max_size = FFMIN(max_size, UINT_MAX);
+
+ if (min_size > max_size) {
+ av_freep(ptr);
+ *size = 0;
+ return;
+ }
+ min_size = FFMIN(max_size, FFMAX(min_size + min_size / 16 + 32, min_size));
+ av_freep(ptr);
+ val = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size);
+ memcpy(ptr, &val, sizeof(val));
+ if (!val)
+ min_size = 0;
+ *size = min_size;
+ return;
+}
+
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
{
- ff_fast_malloc(ptr, size, min_size, 0);
+ fast_malloc(ptr, size, min_size, 0);
}
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
{
- ff_fast_malloc(ptr, size, min_size, 1);
+ fast_malloc(ptr, size, min_size, 1);
+}
+
+int av_size_mult(size_t a, size_t b, size_t *r)
+{
+ return size_mult(a, b, r);
}
diff --git a/media/ffvpx/libavutil/mem.h b/media/ffvpx/libavutil/mem.h
index 5fb1a02dd9..62b4ca6e50 100644
--- a/media/ffvpx/libavutil/mem.h
+++ b/media/ffvpx/libavutil/mem.h
@@ -31,8 +31,8 @@
#include <stdint.h>
#include "attributes.h"
-#include "error.h"
#include "avutil.h"
+#include "version.h"
/**
* @addtogroup lavu_mem
@@ -49,81 +49,6 @@
* dealing with memory consistently possible on all platforms.
*
* @{
- *
- * @defgroup lavu_mem_macros Alignment Macros
- * Helper macros for declaring aligned variables.
- * @{
- */
-
-/**
- * @def DECLARE_ALIGNED(n,t,v)
- * Declare a variable that is aligned in memory.
- *
- * @code{.c}
- * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
- * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
- *
- * // The default-alignment equivalent would be
- * uint16_t aligned_int = 42;
- * uint8_t aligned_array[128];
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_ALIGNED(n,t,v)
- * Declare an aligned variable appropriate for use in inline assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-/**
- * @def DECLARE_ASM_CONST(n,t,v)
- * Declare a static constant aligned variable appropriate for use in inline
- * assembly code.
- *
- * @code{.c}
- * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
- * @endcode
- *
- * @param n Minimum alignment in bytes
- * @param t Type of the variable (or array element)
- * @param v Name of the variable
- */
-
-#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
-#elif defined(__DJGPP__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
-#elif defined(__GNUC__) || defined(__clang__)
- #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
- #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
-#elif defined(_MSC_VER)
- #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
- #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
-#else
- #define DECLARE_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_ALIGNED(n,t,v) t v
- #define DECLARE_ASM_CONST(n,t,v) static const t v
-#endif
-
-/**
- * @}
*/
/**
@@ -232,20 +157,12 @@ av_alloc_size(1, 2) void *av_malloc_array(size_t nmemb, size_t size);
* @see av_mallocz()
* @see av_malloc_array()
*/
-av_alloc_size(1, 2) void *av_mallocz_array(size_t nmemb, size_t size);
-
-/**
- * Non-inlined equivalent of av_mallocz_array().
- *
- * Created for symmetry with the calloc() C function.
- */
-void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
+void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib av_alloc_size(1, 2);
/**
* Allocate, reallocate, or free a block of memory.
*
- * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is
- * zero, free the memory block pointed to by `ptr`. Otherwise, expand or
+ * If `ptr` is `NULL` and `size` > 0, allocate a new block. Otherwise, expand or
* shrink that block of memory according to `size`.
*
* @param ptr Pointer to a memory block already allocated with
@@ -254,10 +171,11 @@ void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
* reallocated
*
* @return Pointer to a newly-reallocated block or `NULL` if the block
- * cannot be reallocated or the function is used to free the memory block
+ * cannot be reallocated
*
* @warning Unlike av_malloc(), the returned pointer is not guaranteed to be
- * correctly aligned.
+ * correctly aligned. The returned pointer must be freed after even
+ * if size is zero.
* @see av_fast_realloc()
* @see av_reallocp()
*/
@@ -305,8 +223,7 @@ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
/**
* Allocate, reallocate, or free an array.
*
- * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If
- * `nmemb` is zero, free the memory block pointed to by `ptr`.
+ * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block.
*
* @param ptr Pointer to a memory block already allocated with
* av_realloc() or `NULL`
@@ -314,19 +231,19 @@ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
* @param size Size of the single element of the array
*
* @return Pointer to a newly-reallocated block or NULL if the block
- * cannot be reallocated or the function is used to free the memory block
+ * cannot be reallocated
*
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
- * correctly aligned.
+ * correctly aligned. The returned pointer must be freed after even if
+ * nmemb is zero.
* @see av_reallocp_array()
*/
av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
/**
- * Allocate, reallocate, or free an array through a pointer to a pointer.
+ * Allocate, reallocate an array through a pointer to a pointer.
*
- * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is
- * zero, free the memory block pointed to by `*ptr`.
+ * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block.
*
* @param[in,out] ptr Pointer to a pointer to a memory block already
* allocated with av_realloc(), or a pointer to `NULL`.
@@ -337,7 +254,7 @@ av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size)
* @return Zero on success, an AVERROR error code on failure
*
* @warning Unlike av_malloc(), the allocated memory is not guaranteed to be
- * correctly aligned.
+ * correctly aligned. *ptr must be freed after even if nmemb is zero.
*/
int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
@@ -662,20 +579,12 @@ void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
/**
* Multiply two `size_t` values checking for overflow.
*
- * @param[in] a,b Operands of multiplication
+ * @param[in] a Operand of multiplication
+ * @param[in] b Operand of multiplication
* @param[out] r Pointer to the result of the operation
* @return 0 on success, AVERROR(EINVAL) on overflow
*/
-static inline int av_size_mult(size_t a, size_t b, size_t *r)
-{
- size_t t = a * b;
- /* Hack inspired from glibc: don't try the division if nelem and elsize
- * are both less than sqrt(SIZE_MAX). */
- if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
- return AVERROR(EINVAL);
- *r = t;
- return 0;
-}
+int av_size_mult(size_t a, size_t b, size_t *r);
/**
* Set the maximum size that may be allocated in one block.
diff --git a/media/ffvpx/libavutil/mem_internal.h b/media/ffvpx/libavutil/mem_internal.h
index 6fdbcb016e..2448c606f1 100644
--- a/media/ffvpx/libavutil/mem_internal.h
+++ b/media/ffvpx/libavutil/mem_internal.h
@@ -21,25 +21,118 @@
#ifndef AVUTIL_MEM_INTERNAL_H
#define AVUTIL_MEM_INTERNAL_H
-#include "avassert.h"
+#include "config.h"
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "macros.h"
#include "mem.h"
+#include "version.h"
+
+/**
+ * @def DECLARE_ALIGNED(n,t,v)
+ * Declare a variable that is aligned in memory.
+ *
+ * @code{.c}
+ * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42;
+ * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128];
+ *
+ * // The default-alignment equivalent would be
+ * uint16_t aligned_int = 42;
+ * uint8_t aligned_array[128];
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_ALIGNED(n,t,v)
+ * Declare an aligned variable appropriate for use in inline assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_ALIGNED(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+/**
+ * @def DECLARE_ASM_CONST(n,t,v)
+ * Declare a static constant aligned variable appropriate for use in inline
+ * assembly code.
+ *
+ * @code{.c}
+ * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008);
+ * @endcode
+ *
+ * @param n Minimum alignment in bytes
+ * @param t Type of the variable (or array element)
+ * @param v Name of the variable
+ */
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__DJGPP__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v
+#elif defined(__GNUC__) || defined(__clang__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t av_used __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+// Some broken preprocessors need a second expansion
+// to be forced to tokenize __VA_ARGS__
+#define E1(x) x
+
+#define LOCAL_ALIGNED_A(a, t, v, s, o, ...) \
+ uint8_t la_##v[sizeof(t s o) + (a)]; \
+ t (*v) o = (void *)FFALIGN((uintptr_t)la_##v, a)
+
+#define LOCAL_ALIGNED_D(a, t, v, s, o, ...) \
+ DECLARE_ALIGNED(a, t, la_##v) s o; \
+ t (*v) o = la_##v
+
+#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
+#endif
-static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc)
-{
- void *val;
-
- memcpy(&val, ptr, sizeof(val));
- if (min_size <= *size) {
- av_assert0(val || !min_size);
- return 0;
- }
- min_size = FFMAX(min_size + min_size / 16 + 32, min_size);
- av_freep(ptr);
- val = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size);
- memcpy(ptr, &val, sizeof(val));
- if (!val)
- min_size = 0;
- *size = min_size;
- return 1;
-}
#endif /* AVUTIL_MEM_INTERNAL_H */
diff --git a/media/ffvpx/libavutil/moz.build b/media/ffvpx/libavutil/moz.build
index 870934f52f..fcd2163712 100644
--- a/media/ffvpx/libavutil/moz.build
+++ b/media/ffvpx/libavutil/moz.build
@@ -11,6 +11,7 @@ if CONFIG['FFVPX_ASFLAGS']:
SharedLibrary('mozavutil')
SOURCES += [
'adler32.c',
+ 'avsscanf.c',
'avstring.c',
'base64.c',
'bprint.c',
@@ -48,6 +49,7 @@ SOURCES += [
'time.c',
'timecode.c',
'utils.c',
+ 'video_enc_params.c',
]
SYMBOLS_FILE = 'avutil.symbols'
diff --git a/media/ffvpx/libavutil/opt.c b/media/ffvpx/libavutil/opt.c
index 93d6c26c11..0908751752 100644
--- a/media/ffvpx/libavutil/opt.c
+++ b/media/ffvpx/libavutil/opt.c
@@ -39,6 +39,7 @@
#include "opt.h"
#include "samplefmt.h"
#include "bprint.h"
+#include "version.h"
#include <float.h>
@@ -71,7 +72,11 @@ static int read_number(const AVOption *o, const void *dst, double *num, int *den
case AV_OPT_TYPE_INT:
*intnum = *(int *)dst;
return 0;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
case AV_OPT_TYPE_DURATION:
case AV_OPT_TYPE_INT64:
case AV_OPT_TYPE_UINT64:
@@ -126,7 +131,11 @@ static int write_number(void *obj, const AVOption *o, void *dst, double num, int
*(int *)dst = llrint(num / den) * intnum;
break;
case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
case AV_OPT_TYPE_INT64:{
double d = num / den;
if (intnum == 1 && d == (double)INT64_MAX) {
@@ -229,13 +238,15 @@ static int set_string(void *obj, const AVOption *o, const char *val, uint8_t **d
static int set_string_number(void *obj, void *target_obj, const AVOption *o, const char *val, void *dst)
{
int ret = 0;
- int num, den;
- char c;
- if (sscanf(val, "%d%*1[:/]%d%c", &num, &den, &c) == 2) {
- if ((ret = write_number(obj, o, dst, 1, den, num)) >= 0)
- return ret;
- ret = 0;
+ if (o->type == AV_OPT_TYPE_RATIONAL || o->type == AV_OPT_TYPE_VIDEO_RATE) {
+ int num, den;
+ char c;
+ if (sscanf(val, "%d%*1[:/]%d%c", &num, &den, &c) == 2) {
+ if ((ret = write_number(obj, o, dst, 1, den, num)) >= 0)
+ return ret;
+ ret = 0;
+ }
}
for (;;) {
@@ -254,14 +265,18 @@ static int set_string_number(void *obj, void *target_obj, const AVOption *o, con
}
{
- const AVOption *o_named = av_opt_find(target_obj, i ? buf : val, o->unit, 0, 0);
int res;
int ci = 0;
double const_values[64];
const char * const_names[64];
- if (o_named && o_named->type == AV_OPT_TYPE_CONST)
+ int search_flags = (o->flags & AV_OPT_FLAG_CHILD_CONSTS) ? AV_OPT_SEARCH_CHILDREN : 0;
+ const AVOption *o_named = av_opt_find(target_obj, i ? buf : val, o->unit, 0, search_flags);
+ if (o_named && o_named->type == AV_OPT_TYPE_CONST) {
d = DEFAULT_NUMVAL(o_named);
- else {
+ if (o_named->flags & AV_OPT_FLAG_DEPRECATED)
+ av_log(obj, AV_LOG_WARNING, "The \"%s\" option is deprecated: %s\n",
+ o_named->name, o_named->help);
+ } else {
if (o->unit) {
for (o_named = NULL; o_named = av_opt_next(target_obj, o_named); ) {
if (o_named->type == AV_OPT_TYPE_CONST &&
@@ -330,12 +345,7 @@ static int set_string_image_size(void *obj, const AVOption *o, const char *val,
static int set_string_video_rate(void *obj, const AVOption *o, const char *val, AVRational *dst)
{
- int ret;
- if (!val) {
- ret = AVERROR(EINVAL);
- } else {
- ret = av_parse_video_rate(dst, val);
- }
+ int ret = av_parse_video_rate(dst, val);
if (ret < 0)
av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as video rate\n", val);
return ret;
@@ -446,6 +456,34 @@ static int set_string_sample_fmt(void *obj, const AVOption *o, const char *val,
AV_SAMPLE_FMT_NB, av_get_sample_fmt, "sample format");
}
+static int set_string_dict(void *obj, const AVOption *o, const char *val, uint8_t **dst)
+{
+ AVDictionary *options = NULL;
+
+ if (val) {
+ int ret = av_dict_parse_string(&options, val, "=", ":", 0);
+ if (ret < 0) {
+ av_dict_free(&options);
+ return ret;
+ }
+ }
+
+ av_dict_free((AVDictionary **)dst);
+ *dst = (uint8_t *)options;
+
+ return 0;
+}
+
+static int set_string_channel_layout(void *obj, const AVOption *o,
+ const char *val, void *dst)
+{
+ AVChannelLayout *channel_layout = dst;
+ av_channel_layout_uninit(channel_layout);
+ if (!val)
+ return 0;
+ return av_channel_layout_from_string(channel_layout, val);
+}
+
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
{
int ret = 0;
@@ -453,12 +491,17 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
if (!o || !target_obj)
return AVERROR_OPTION_NOT_FOUND;
+FF_DISABLE_DEPRECATION_WARNINGS
if (!val && (o->type != AV_OPT_TYPE_STRING &&
o->type != AV_OPT_TYPE_PIXEL_FMT && o->type != AV_OPT_TYPE_SAMPLE_FMT &&
- o->type != AV_OPT_TYPE_IMAGE_SIZE && o->type != AV_OPT_TYPE_VIDEO_RATE &&
+ o->type != AV_OPT_TYPE_IMAGE_SIZE &&
o->type != AV_OPT_TYPE_DURATION && o->type != AV_OPT_TYPE_COLOR &&
- o->type != AV_OPT_TYPE_CHANNEL_LAYOUT && o->type != AV_OPT_TYPE_BOOL))
+#if FF_API_OLD_CHANNEL_LAYOUT
+ o->type != AV_OPT_TYPE_CHANNEL_LAYOUT &&
+#endif
+ o->type != AV_OPT_TYPE_BOOL))
return AVERROR(EINVAL);
+FF_ENABLE_DEPRECATION_WARNINGS
if (o->flags & AV_OPT_FLAG_READONLY)
return AVERROR(EINVAL);
@@ -514,6 +557,8 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
}
case AV_OPT_TYPE_COLOR:
return set_string_color(obj, o, val, dst);
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
if (!val || !strcmp(val, "none")) {
*(int64_t *)dst = 0;
@@ -527,6 +572,17 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
return ret;
}
break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_CHLAYOUT:
+ ret = set_string_channel_layout(obj, o, val, dst);
+ if (ret < 0) {
+ av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as channel layout\n", val);
+ ret = AVERROR(EINVAL);
+ }
+ return ret;
+ case AV_OPT_TYPE_DICT:
+ return set_string_dict(obj, o, val, dst);
}
av_log(obj, AV_LOG_ERROR, "Invalid option type.\n");
@@ -688,6 +744,8 @@ int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt,
return set_format(obj, name, fmt, search_flags, AV_OPT_TYPE_SAMPLE_FMT, "sample", AV_SAMPLE_FMT_NB);
}
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
int av_opt_set_channel_layout(void *obj, const char *name, int64_t cl, int search_flags)
{
void *target_obj;
@@ -703,6 +761,8 @@ int av_opt_set_channel_layout(void *obj, const char *name, int64_t cl, int searc
*(int64_t *)(((uint8_t *)target_obj) + o->offset) = cl;
return 0;
}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val,
int search_flags)
@@ -723,6 +783,22 @@ int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val,
return 0;
}
+int av_opt_set_chlayout(void *obj, const char *name,
+ const AVChannelLayout *channel_layout,
+ int search_flags)
+{
+ void *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ AVChannelLayout *dst;
+
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+
+ dst = (AVChannelLayout*)((uint8_t*)target_obj + o->offset);
+
+ return av_channel_layout_copy(dst, channel_layout);
+}
+
static void format_duration(char *buf, size_t size, int64_t d)
{
char *e;
@@ -851,10 +927,24 @@ int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
(int)((uint8_t *)dst)[0], (int)((uint8_t *)dst)[1],
(int)((uint8_t *)dst)[2], (int)((uint8_t *)dst)[3]);
break;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+
i64 = *(int64_t *)dst;
ret = snprintf(buf, sizeof(buf), "0x%"PRIx64, i64);
break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ case AV_OPT_TYPE_CHLAYOUT:
+ ret = av_channel_layout_describe(dst, buf, sizeof(buf));
+ break;
+ case AV_OPT_TYPE_DICT:
+ if (!*(AVDictionary **)dst && (search_flags & AV_OPT_ALLOW_NULL)) {
+ *out_val = NULL;
+ return 0;
+ }
+ return av_dict_get_string(*(AVDictionary **)dst, (char **)out_val, '=', ':');
default:
return AVERROR(EINVAL);
}
@@ -893,7 +983,10 @@ int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_v
if ((ret = get_number(obj, name, NULL, &num, &den, &intnum, search_flags)) < 0)
return ret;
- *out_val = num * intnum / den;
+ if (num == den)
+ *out_val = intnum;
+ else
+ *out_val = num * intnum / den;
return 0;
}
@@ -987,6 +1080,8 @@ int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AV
return get_format(obj, name, search_flags, out_fmt, AV_OPT_TYPE_SAMPLE_FMT, "sample");
}
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *cl)
{
void *dst, *target_obj;
@@ -1003,6 +1098,24 @@ int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int
*cl = *(int64_t *)dst;
return 0;
}
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+int av_opt_get_chlayout(void *obj, const char *name, int search_flags, AVChannelLayout *cl)
+{
+ void *dst, *target_obj;
+ const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj);
+ if (!o || !target_obj)
+ return AVERROR_OPTION_NOT_FOUND;
+ if (o->type != AV_OPT_TYPE_CHLAYOUT) {
+ av_log(obj, AV_LOG_ERROR,
+ "The value for option '%s' is not a channel layout.\n", name);
+ return AVERROR(EINVAL);
+ }
+
+ dst = ((uint8_t*)target_obj) + o->offset;
+ return av_channel_layout_copy(cl, dst);
+}
int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val)
{
@@ -1034,6 +1147,23 @@ int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name)
return res & flag->default_val.i64;
}
+static void log_int_value(void *av_log_obj, int level, int64_t i)
+{
+ if (i == INT_MAX) {
+ av_log(av_log_obj, level, "INT_MAX");
+ } else if (i == INT_MIN) {
+ av_log(av_log_obj, level, "INT_MIN");
+ } else if (i == UINT32_MAX) {
+ av_log(av_log_obj, level, "UINT32_MAX");
+ } else if (i == INT64_MAX) {
+ av_log(av_log_obj, level, "I64_MAX");
+ } else if (i == INT64_MIN) {
+ av_log(av_log_obj, level, "I64_MIN");
+ } else {
+ av_log(av_log_obj, level, "%"PRId64, i);
+ }
+}
+
static void log_value(void *av_log_obj, int level, double d)
{
if (d == INT_MAX) {
@@ -1102,7 +1232,7 @@ static char *get_opt_flags_string(void *obj, const char *unit, int64_t value)
}
static void opt_list(void *obj, void *av_log_obj, const char *unit,
- int req_flags, int rej_flags)
+ int req_flags, int rej_flags, enum AVOptionType parent_type)
{
const AVOption *opt = NULL;
AVOptionRanges *r;
@@ -1126,7 +1256,7 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
av_log(av_log_obj, AV_LOG_INFO, " %-15s ", opt->name);
else
av_log(av_log_obj, AV_LOG_INFO, " %s%-17s ",
- (opt->flags & AV_OPT_FLAG_FILTERING_PARAM) ? "" : "-",
+ (opt->flags & AV_OPT_FLAG_FILTERING_PARAM) ? " " : "-",
opt->name);
switch (opt->type) {
@@ -1157,6 +1287,9 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
case AV_OPT_TYPE_BINARY:
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<binary>");
break;
+ case AV_OPT_TYPE_DICT:
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<dictionary>");
+ break;
case AV_OPT_TYPE_IMAGE_SIZE:
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<image_size>");
break;
@@ -1175,26 +1308,39 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
case AV_OPT_TYPE_COLOR:
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<color>");
break;
+ case AV_OPT_TYPE_CHLAYOUT:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<channel_layout>");
break;
case AV_OPT_TYPE_BOOL:
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "<boolean>");
break;
case AV_OPT_TYPE_CONST:
+ if (parent_type == AV_OPT_TYPE_INT)
+ av_log(av_log_obj, AV_LOG_INFO, "%-12"PRId64" ", opt->default_val.i64);
+ else
+ av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "");
+ break;
default:
av_log(av_log_obj, AV_LOG_INFO, "%-12s ", "");
break;
}
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_ENCODING_PARAM) ? 'E' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_DECODING_PARAM) ? 'D' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_FILTERING_PARAM)? 'F' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_VIDEO_PARAM ) ? 'V' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_AUDIO_PARAM ) ? 'A' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_SUBTITLE_PARAM) ? 'S' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_EXPORT) ? 'X' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_READONLY) ? 'R' : '.');
- av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_BSF_PARAM) ? 'B' : '.');
+ av_log(av_log_obj, AV_LOG_INFO, "%c%c%c%c%c%c%c%c%c%c%c",
+ (opt->flags & AV_OPT_FLAG_ENCODING_PARAM) ? 'E' : '.',
+ (opt->flags & AV_OPT_FLAG_DECODING_PARAM) ? 'D' : '.',
+ (opt->flags & AV_OPT_FLAG_FILTERING_PARAM) ? 'F' : '.',
+ (opt->flags & AV_OPT_FLAG_VIDEO_PARAM) ? 'V' : '.',
+ (opt->flags & AV_OPT_FLAG_AUDIO_PARAM) ? 'A' : '.',
+ (opt->flags & AV_OPT_FLAG_SUBTITLE_PARAM) ? 'S' : '.',
+ (opt->flags & AV_OPT_FLAG_EXPORT) ? 'X' : '.',
+ (opt->flags & AV_OPT_FLAG_READONLY) ? 'R' : '.',
+ (opt->flags & AV_OPT_FLAG_BSF_PARAM) ? 'B' : '.',
+ (opt->flags & AV_OPT_FLAG_RUNTIME_PARAM) ? 'T' : '.',
+ (opt->flags & AV_OPT_FLAG_DEPRECATED) ? 'P' : '.');
if (opt->help)
av_log(av_log_obj, AV_LOG_INFO, " %s", opt->help);
@@ -1224,6 +1370,8 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
!((opt->type == AV_OPT_TYPE_COLOR ||
opt->type == AV_OPT_TYPE_IMAGE_SIZE ||
opt->type == AV_OPT_TYPE_STRING ||
+ opt->type == AV_OPT_TYPE_DICT ||
+ opt->type == AV_OPT_TYPE_CHLAYOUT ||
opt->type == AV_OPT_TYPE_VIDEO_RATE) &&
!opt->default_val.str)) {
av_log(av_log_obj, AV_LOG_INFO, " (default ");
@@ -1254,7 +1402,7 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
if (def_const)
av_log(av_log_obj, AV_LOG_INFO, "%s", def_const);
else
- log_value(av_log_obj, AV_LOG_INFO, opt->default_val.i64);
+ log_int_value(av_log_obj, AV_LOG_INFO, opt->default_val.i64);
break;
}
case AV_OPT_TYPE_DOUBLE:
@@ -1274,19 +1422,25 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit,
case AV_OPT_TYPE_COLOR:
case AV_OPT_TYPE_IMAGE_SIZE:
case AV_OPT_TYPE_STRING:
+ case AV_OPT_TYPE_DICT:
case AV_OPT_TYPE_VIDEO_RATE:
+ case AV_OPT_TYPE_CHLAYOUT:
av_log(av_log_obj, AV_LOG_INFO, "\"%s\"", opt->default_val.str);
break;
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
av_log(av_log_obj, AV_LOG_INFO, "0x%"PRIx64, opt->default_val.i64);
break;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
}
av_log(av_log_obj, AV_LOG_INFO, ")");
}
av_log(av_log_obj, AV_LOG_INFO, "\n");
if (opt->unit && opt->type != AV_OPT_TYPE_CONST)
- opt_list(obj, av_log_obj, opt->unit, req_flags, rej_flags);
+ opt_list(obj, av_log_obj, opt->unit, req_flags, rej_flags, opt->type);
}
}
@@ -1297,7 +1451,7 @@ int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags)
av_log(av_log_obj, AV_LOG_INFO, "%s AVOptions:\n", (*(AVClass **)obj)->class_name);
- opt_list(obj, av_log_obj, NULL, req_flags, rej_flags);
+ opt_list(obj, av_log_obj, NULL, req_flags, rej_flags, -1);
return 0;
}
@@ -1329,7 +1483,11 @@ void av_opt_set_defaults2(void *s, int mask, int flags)
case AV_OPT_TYPE_INT64:
case AV_OPT_TYPE_UINT64:
case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
case AV_OPT_TYPE_PIXEL_FMT:
case AV_OPT_TYPE_SAMPLE_FMT:
write_number(s, opt, dst, 1, 1, opt->default_val.i64);
@@ -1362,9 +1520,12 @@ void av_opt_set_defaults2(void *s, int mask, int flags)
case AV_OPT_TYPE_BINARY:
set_string_binary(s, opt, opt->default_val.str, dst);
break;
+ case AV_OPT_TYPE_CHLAYOUT:
+ set_string_channel_layout(s, opt, opt->default_val.str, dst);
+ break;
case AV_OPT_TYPE_DICT:
- /* Cannot set defaults for these types */
- break;
+ set_string_dict(s, opt, opt->default_val.str, dst);
+ break;
default:
av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n",
opt->type, opt->name);
@@ -1569,6 +1730,10 @@ void av_opt_free(void *obj)
av_dict_free((AVDictionary **)(((uint8_t *)obj) + o->offset));
break;
+ case AV_OPT_TYPE_CHLAYOUT:
+ av_channel_layout_uninit((AVChannelLayout *)(((uint8_t *)obj) + o->offset));
+ break;
+
default:
break;
}
@@ -1577,27 +1742,26 @@ void av_opt_free(void *obj)
int av_opt_set_dict2(void *obj, AVDictionary **options, int search_flags)
{
- AVDictionaryEntry *t = NULL;
+ const AVDictionaryEntry *t = NULL;
AVDictionary *tmp = NULL;
- int ret = 0;
+ int ret;
if (!options)
return 0;
- while ((t = av_dict_get(*options, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ while ((t = av_dict_iterate(*options, t))) {
ret = av_opt_set(obj, t->key, t->value, search_flags);
if (ret == AVERROR_OPTION_NOT_FOUND)
- ret = av_dict_set(&tmp, t->key, t->value, 0);
+ ret = av_dict_set(&tmp, t->key, t->value, AV_DICT_MULTIKEY);
if (ret < 0) {
av_log(obj, AV_LOG_ERROR, "Error setting option %s to value %s.\n", t->key, t->value);
av_dict_free(&tmp);
return ret;
}
- ret = 0;
}
av_dict_free(options);
*options = tmp;
- return ret;
+ return 0;
}
int av_opt_set_dict(void *obj, AVDictionary **options)
@@ -1627,8 +1791,9 @@ const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
if (search_flags & AV_OPT_SEARCH_CHILDREN) {
if (search_flags & AV_OPT_SEARCH_FAKE_OBJ) {
- const AVClass *child = NULL;
- while (child = av_opt_child_class_next(c, child))
+ void *iter = NULL;
+ const AVClass *child;
+ while (child = av_opt_child_class_iterate(c, &iter))
if (o = av_opt_find2(&child, name, unit, opt_flags, search_flags, NULL))
return o;
} else {
@@ -1663,10 +1828,10 @@ void *av_opt_child_next(void *obj, void *prev)
return NULL;
}
-const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev)
+const AVClass *av_opt_child_class_iterate(const AVClass *parent, void **iter)
{
- if (parent->child_class_next)
- return parent->child_class_next(prev);
+ if (parent->child_class_iterate)
+ return parent->child_class_iterate(iter);
return NULL;
}
@@ -1686,7 +1851,11 @@ static int opt_size(enum AVOptionType type)
case AV_OPT_TYPE_FLAGS:
return sizeof(int);
case AV_OPT_TYPE_DURATION:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
case AV_OPT_TYPE_INT64:
case AV_OPT_TYPE_UINT64:
return sizeof(int64_t);
@@ -1753,12 +1922,16 @@ int av_opt_copy(void *dst, const void *src)
} else if (o->type == AV_OPT_TYPE_DICT) {
AVDictionary **sdict = (AVDictionary **) field_src;
AVDictionary **ddict = (AVDictionary **) field_dst;
+ int ret2;
if (*sdict != *ddict)
av_dict_free(ddict);
*ddict = NULL;
- av_dict_copy(ddict, *sdict, 0);
- if (av_dict_count(*sdict) != av_dict_count(*ddict))
- ret = AVERROR(ENOMEM);
+ ret2 = av_dict_copy(ddict, *sdict, 0);
+ if (ret2 < 0)
+ ret = ret2;
+ } else if (o->type == AV_OPT_TYPE_CHLAYOUT) {
+ if (field_dst != field_src)
+ ret = av_channel_layout_copy(field_dst, field_src);
} else {
int size = opt_size(o->type);
if (size < 0)
@@ -1774,10 +1947,7 @@ int av_opt_query_ranges(AVOptionRanges **ranges_arg, void *obj, const char *key,
{
int ret;
const AVClass *c = *(AVClass**)obj;
- int (*callback)(AVOptionRanges **, void *obj, const char *key, int flags) = NULL;
-
- if (c->version > (52 << 16 | 11 << 8))
- callback = c->query_ranges;
+ int (*callback)(AVOptionRanges **, void *obj, const char *key, int flags) = c->query_ranges;
if (!callback)
callback = av_opt_query_ranges_default;
@@ -1825,7 +1995,11 @@ int av_opt_query_ranges_default(AVOptionRanges **ranges_arg, void *obj, const ch
case AV_OPT_TYPE_DOUBLE:
case AV_OPT_TYPE_DURATION:
case AV_OPT_TYPE_COLOR:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
break;
case AV_OPT_TYPE_STRING:
range->component_min = 0;
@@ -1905,12 +2079,24 @@ int av_opt_is_set_to_default(void *obj, const AVOption *o)
case AV_OPT_TYPE_PIXEL_FMT:
case AV_OPT_TYPE_SAMPLE_FMT:
case AV_OPT_TYPE_INT:
+#if FF_API_OLD_CHANNEL_LAYOUT
+FF_DISABLE_DEPRECATION_WARNINGS
case AV_OPT_TYPE_CHANNEL_LAYOUT:
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
case AV_OPT_TYPE_DURATION:
case AV_OPT_TYPE_INT64:
case AV_OPT_TYPE_UINT64:
read_number(o, dst, NULL, NULL, &i64);
return o->default_val.i64 == i64;
+ case AV_OPT_TYPE_CHLAYOUT: {
+ AVChannelLayout ch_layout = { 0 };
+ if (o->default_val.str) {
+ if ((ret = av_channel_layout_from_string(&ch_layout, o->default_val.str)) < 0)
+ return ret;
+ }
+ return !av_channel_layout_compare((AVChannelLayout *)dst, &ch_layout);
+ }
case AV_OPT_TYPE_STRING:
str = *(char **)dst;
if (str == o->default_val.str) //2 NULLs
@@ -1948,9 +2134,23 @@ int av_opt_is_set_to_default(void *obj, const AVOption *o)
av_free(tmp.data);
return ret;
}
- case AV_OPT_TYPE_DICT:
- /* Binary and dict have not default support yet. Any pointer is not default. */
- return !!(*(void **)dst);
+ case AV_OPT_TYPE_DICT: {
+ AVDictionary *dict1 = NULL;
+ AVDictionary *dict2 = *(AVDictionary **)dst;
+ const AVDictionaryEntry *en1 = NULL;
+ const AVDictionaryEntry *en2 = NULL;
+ ret = av_dict_parse_string(&dict1, o->default_val.str, "=", ":", 0);
+ if (ret < 0) {
+ av_dict_free(&dict1);
+ return ret;
+ }
+ do {
+ en1 = av_dict_iterate(dict1, en1);
+ en2 = av_dict_iterate(dict2, en2);
+ } while (en1 && en2 && !strcmp(en1->key, en2->key) && !strcmp(en1->value, en2->value));
+ av_dict_free(&dict1);
+ return (!en1 && !en2);
+ }
case AV_OPT_TYPE_IMAGE_SIZE:
if (!o->default_val.str || !strcmp(o->default_val.str, "none"))
w = h = 0;
@@ -2034,6 +2234,8 @@ int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,
av_freep(&buf);
}
}
- av_bprint_finalize(&bprint, buffer);
+ ret = av_bprint_finalize(&bprint, buffer);
+ if (ret < 0)
+ return ret;
return 0;
}
diff --git a/media/ffvpx/libavutil/opt.h b/media/ffvpx/libavutil/opt.h
index 39f4a8dda0..461b5d3b6b 100644
--- a/media/ffvpx/libavutil/opt.h
+++ b/media/ffvpx/libavutil/opt.h
@@ -29,11 +29,11 @@
#include "rational.h"
#include "avutil.h"
+#include "channel_layout.h"
#include "dict.h"
#include "log.h"
#include "pixfmt.h"
#include "samplefmt.h"
-#include "version.h"
/**
* @defgroup avoptions AVOptions
@@ -114,7 +114,7 @@
* libavcodec exports generic options, while its priv_data field exports
* codec-specific options). In such a case, it is possible to set up the
* parent struct to export a child's options. To do that, simply
- * implement AVClass.child_next() and AVClass.child_class_next() in the
+ * implement AVClass.child_next() and AVClass.child_class_iterate() in the
* parent struct's AVClass.
* Assuming that the test_struct from above now also contains a
* child_struct field:
@@ -143,23 +143,25 @@
* return t->child_struct;
* return NULL
* }
- * const AVClass child_class_next(const AVClass *prev)
+ * const AVClass child_class_iterate(void **iter)
* {
- * return prev ? NULL : &child_class;
+ * const AVClass *c = *iter ? NULL : &child_class;
+ * *iter = (void*)(uintptr_t)c;
+ * return c;
* }
* @endcode
- * Putting child_next() and child_class_next() as defined above into
+ * Putting child_next() and child_class_iterate() as defined above into
* test_class will now make child_struct's options accessible through
* test_struct (again, proper setup as described above needs to be done on
* child_struct right after it is created).
*
* From the above example it might not be clear why both child_next()
- * and child_class_next() are needed. The distinction is that child_next()
- * iterates over actually existing objects, while child_class_next()
+ * and child_class_iterate() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_iterate()
* iterates over all possible child classes. E.g. if an AVCodecContext
* was initialized to use a codec which has private options, then its
* child_next() will return AVCodecContext.priv_data and finish
- * iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ * iterating. OTOH child_class_iterate() on AVCodecContext.av_class will
* iterate over all available codecs with private options.
*
* @subsection avoptions_implement_named_constants Named constants
@@ -194,7 +196,7 @@
* For enumerating there are basically two cases. The first is when you want to
* get all options that may potentially exist on the struct and its children
* (e.g. when constructing documentation). In that case you should call
- * av_opt_child_class_next() recursively on the parent struct's AVClass. The
+ * av_opt_child_class_iterate() recursively on the parent struct's AVClass. The
* second case is when you have an already initialized struct with all its
* children and you want to get all options that can be actually written or read
* from it. In that case you should call av_opt_child_next() recursively (and
@@ -236,8 +238,11 @@ enum AVOptionType{
AV_OPT_TYPE_VIDEO_RATE, ///< offset must point to AVRational
AV_OPT_TYPE_DURATION,
AV_OPT_TYPE_COLOR,
+#if FF_API_OLD_CHANNEL_LAYOUT
AV_OPT_TYPE_CHANNEL_LAYOUT,
+#endif
AV_OPT_TYPE_BOOL,
+ AV_OPT_TYPE_CHLAYOUT,
};
/**
@@ -288,8 +293,10 @@ typedef struct AVOption {
*/
#define AV_OPT_FLAG_READONLY 128
#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering
+#define AV_OPT_FLAG_RUNTIME_PARAM (1<<15) ///< a generic parameter which can be set by the user at runtime
#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information
+#define AV_OPT_FLAG_CHILD_CONSTS (1<<18) ///< set if option constants can also reside in child objects
//FIXME think about enc-audio, ... style flags
/**
@@ -647,10 +654,10 @@ void *av_opt_child_next(void *obj, void *prev);
/**
* Iterate over potential AVOptions-enabled children of parent.
*
- * @param prev result of a previous call to this function or NULL
+ * @param iter a pointer where iteration state is stored.
* @return AVClass corresponding to next potential child or NULL
*/
-const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+const AVClass *av_opt_child_class_iterate(const AVClass *parent, void **iter);
/**
* @defgroup opt_set_funcs Option setting functions
@@ -669,6 +676,9 @@ const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *pre
* scalars or named flags separated by '+' or '-'. Prefixing a flag
* with '+' causes it to be set without affecting the other flags;
* similarly, '-' unsets a flag.
+ * If the field is of a dictionary type, it has to be a ':' separated list of
+ * key=value parameters. Values containing ':' special characters must be
+ * escaped.
* @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
* is passed here, then the option may be set on a child of obj.
*
@@ -687,7 +697,11 @@ int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_
int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);
int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);
+#if FF_API_OLD_CHANNEL_LAYOUT
+attribute_deprecated
int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);
+#endif
+int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *layout, int search_flags);
/**
* @note Any old dictionary present is discarded and replaced with a copy of the new one. The
* caller still owns val is and responsible for freeing it.
@@ -729,9 +743,10 @@ int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, in
/**
* @note the returned string will be av_malloc()ed and must be av_free()ed by the caller
*
- * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the option has
- * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set
- * to NULL instead of an allocated empty string.
+ * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the
+ * option is of type AV_OPT_TYPE_STRING, AV_OPT_TYPE_BINARY or AV_OPT_TYPE_DICT
+ * and is set to NULL, *out_val will be set to NULL instead of an allocated
+ * empty string.
*/
int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
@@ -741,7 +756,11 @@ int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_
int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);
int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);
int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);
+#if FF_API_OLD_CHANNEL_LAYOUT
+attribute_deprecated
int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);
+#endif
+int av_opt_get_chlayout(void *obj, const char *name, int search_flags, AVChannelLayout *layout);
/**
* @param[out] out_val The returned dictionary is a copy of the actual value and must
* be freed with av_dict_free() by the caller
@@ -783,9 +802,16 @@ int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags
/**
* Copy options from src object into dest object.
*
+ * The underlying AVClass of both src and dest must coincide. The guarantee
+ * below does not apply if this is not fulfilled.
+ *
* Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.
* Original memory allocated for such options is freed unless both src and dest options points to the same memory.
*
+ * Even on error it is guaranteed that allocated options from src and dest
+ * no longer alias each other afterwards; in particular calling av_opt_free()
+ * on both src and dest is safe afterwards if dest has been memdup'ed from src.
+ *
* @param dest Object to copy from
* @param src Object to copy into
* @return 0 on success, negative on error
diff --git a/media/ffvpx/libavutil/parseutils.c b/media/ffvpx/libavutil/parseutils.c
index af90e5bf2c..c2916c458b 100644
--- a/media/ffvpx/libavutil/parseutils.c
+++ b/media/ffvpx/libavutil/parseutils.c
@@ -102,6 +102,7 @@ static const VideoSizeAbbr video_size_abbrs[] = {
{ "wsxga", 1600,1024 },
{ "wuxga", 1920,1200 },
{ "woxga", 2560,1600 },
+ { "wqhd", 2560,1440 },
{ "wqsxga", 3200,2048 },
{ "wquxga", 3840,2400 },
{ "whsxga", 6400,4096 },
@@ -111,6 +112,7 @@ static const VideoSizeAbbr video_size_abbrs[] = {
{ "hd480", 852, 480 },
{ "hd720", 1280, 720 },
{ "hd1080", 1920,1080 },
+ { "quadhd", 2560,1440 },
{ "2k", 2048,1080 }, /* Digital Cinema System Specification */
{ "2kdci", 2048,1080 },
{ "2kflat", 1998,1080 },
diff --git a/media/ffvpx/libavutil/parseutils.h b/media/ffvpx/libavutil/parseutils.h
index e66d24b76e..dad5c2775b 100644
--- a/media/ffvpx/libavutil/parseutils.h
+++ b/media/ffvpx/libavutil/parseutils.h
@@ -79,6 +79,8 @@ int av_parse_video_rate(AVRational *rate, const char *str);
/**
* Put the RGBA values that correspond to color_string in rgba_color.
*
+ * @param rgba_color 4-elements array of uint8_t values, where the respective
+ * red, green, blue and alpha component values are written.
* @param color_string a string specifying a color. It can be the name of
* a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
* possibly followed by "@" and a string representing the alpha
@@ -92,6 +94,8 @@ int av_parse_video_rate(AVRational *rate, const char *str);
* @param slen length of the initial part of color_string containing the
* color. It can be set to -1 if color_string is a null terminated string
* containing nothing else than the color.
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log()). Can be NULL.
* @return >= 0 in case of success, a negative value in case of
* failure (for example if color_string cannot be parsed).
*/
@@ -106,7 +110,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
* av_parse_color().
*
* @param color_idx index of the requested color, starting from 0
- * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB
+ * @param rgb if not NULL, will point to a 3-elements array with the color value in RGB
* @return the color name string or NULL if color_idx is not in the array
*/
const char *av_get_known_color_name(int color_idx, const uint8_t **rgb);
@@ -162,19 +166,19 @@ int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info
* by the standard strptime().
*
* The supported input field descriptors are listed below.
- * - %H: the hour as a decimal number, using a 24-hour clock, in the
+ * - `%%H`: the hour as a decimal number, using a 24-hour clock, in the
* range '00' through '23'
- * - %J: hours as a decimal number, in the range '0' through INT_MAX
- * - %M: the minute as a decimal number, using a 24-hour clock, in the
+ * - `%%J`: hours as a decimal number, in the range '0' through INT_MAX
+ * - `%%M`: the minute as a decimal number, using a 24-hour clock, in the
* range '00' through '59'
- * - %S: the second as a decimal number, using a 24-hour clock, in the
+ * - `%%S`: the second as a decimal number, using a 24-hour clock, in the
* range '00' through '59'
- * - %Y: the year as a decimal number, using the Gregorian calendar
- * - %m: the month as a decimal number, in the range '1' through '12'
- * - %d: the day of the month as a decimal number, in the range '1'
+ * - `%%Y`: the year as a decimal number, using the Gregorian calendar
+ * - `%%m`: the month as a decimal number, in the range '1' through '12'
+ * - `%%d`: the day of the month as a decimal number, in the range '1'
* through '31'
- * - %T: alias for '%H:%M:%S'
- * - %%: a literal '%'
+ * - `%%T`: alias for `%%H:%%M:%%S`
+ * - `%%`: a literal `%`
*
* @return a pointer to the first character not processed in this function
* call. In case the input string contains more characters than
diff --git a/media/ffvpx/libavutil/pixdesc.c b/media/ffvpx/libavutil/pixdesc.c
index b97b0665b0..62a2ae08d9 100644
--- a/media/ffvpx/libavutil/pixdesc.c
+++ b/media/ffvpx/libavutil/pixdesc.c
@@ -22,14 +22,11 @@
#include <stdio.h>
#include <string.h>
-#include "avassert.h"
#include "avstring.h"
#include "common.h"
#include "pixfmt.h"
#include "pixdesc.h"
-#include "internal.h"
#include "intreadwrite.h"
-#include "version.h"
void av_read_image_line2(void *dst,
const uint8_t *data[4], const int linesize[4],
@@ -49,19 +46,35 @@ void av_read_image_line2(void *dst,
uint32_t *dst32 = dst;
if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
- int skip = x * step + comp.offset;
- const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
- int shift = 8 - depth - (skip & 7);
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ const uint32_t *p = (uint32_t *)byte_p;
- while (w--) {
- int val = (*p >> shift) & mask;
- if (read_pal_component)
- val = data[1][4*val + c];
- shift -= step;
- p -= shift >> 3;
- shift &= 7;
- if (dst_element_size == 4) *dst32++ = val;
- else *dst16++ = val;
+ while (w--) {
+ int val = AV_RB32(p);
+ val = (val >> comp.offset) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ const uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ int val = (*p >> shift) & mask;
+ if (read_pal_component)
+ val = data[1][4*val + c];
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ if (dst_element_size == 4) *dst32++ = val;
+ else *dst16++ = val;
+ }
}
} else {
const uint8_t *p = data[plane] + y * linesize[plane] +
@@ -112,15 +125,29 @@ void av_write_image_line2(const void *src,
const uint16_t *src16 = src;
if (flags & AV_PIX_FMT_FLAG_BITSTREAM) {
- int skip = x * step + comp.offset;
- uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
- int shift = 8 - depth - (skip & 7);
+ if (depth == 10) {
+ // Assume all channels are packed into a 32bit value
+ const uint8_t *byte_p = data[plane] + y * linesize[plane];
+ uint32_t *p = (uint32_t *)byte_p;
+ int offset = comp.offset;
+ uint32_t mask = ((1ULL << depth) - 1) << offset;
- while (w--) {
- *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift;
- shift -= step;
- p -= shift >> 3;
- shift &= 7;
+ while (w--) {
+ uint16_t val = src_element_size == 4 ? *src32++ : *src16++;
+ AV_WB32(p, (AV_RB32(p) & ~mask) | (val << offset));
+ p++;
+ }
+ } else {
+ int skip = x * step + comp.offset;
+ uint8_t *p = data[plane] + y * linesize[plane] + (skip >> 3);
+ int shift = 8 - depth - (skip & 7);
+
+ while (w--) {
+ *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift;
+ shift -= step;
+ p -= shift >> 3;
+ shift &= 7;
+ }
}
} else {
int shift = comp.shift;
@@ -167,9 +194,6 @@ void av_write_image_line(const uint16_t *src,
av_write_image_line2(src, data, linesize, desc, x, y, c, w, 2);
}
-#if FF_API_PLUS1_MINUS1
-FF_DISABLE_DEPRECATION_WARNINGS
-#endif
static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
[AV_PIX_FMT_YUV420P] = {
.name = "yuv420p",
@@ -177,9 +201,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -189,9 +213,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 8, 1, 7, 1 }, /* Y */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* U */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* V */
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 3, 0, 8 }, /* V */
},
},
[AV_PIX_FMT_YVYU422] = {
@@ -200,20 +224,43 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 8, 1, 7, 1 }, /* Y */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* U */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* V */
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 4, 3, 0, 8 }, /* U */
+ { 0, 4, 1, 0, 8 }, /* V */
},
},
+ [AV_PIX_FMT_Y210LE] = {
+ .name = "y210le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 6, 10 }, /* Y */
+ { 0, 8, 2, 6, 10 }, /* U */
+ { 0, 8, 6, 6, 10 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_Y210BE] = {
+ .name = "y210be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 6, 10 }, /* Y */
+ { 0, 8, 2, 6, 10 }, /* U */
+ { 0, 8, 6, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
[AV_PIX_FMT_RGB24] = {
.name = "rgb24",
.nb_components = 3,
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 3, 0, 0, 8, 2, 7, 1 }, /* R */
- { 0, 3, 1, 0, 8, 2, 7, 2 }, /* G */
- { 0, 3, 2, 0, 8, 2, 7, 3 }, /* B */
+ { 0, 3, 0, 0, 8 }, /* R */
+ { 0, 3, 1, 0, 8 }, /* G */
+ { 0, 3, 2, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -223,21 +270,69 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 3, 2, 0, 8, 2, 7, 3 }, /* R */
- { 0, 3, 1, 0, 8, 2, 7, 2 }, /* G */
- { 0, 3, 0, 0, 8, 2, 7, 1 }, /* B */
+ { 0, 3, 2, 0, 8 }, /* R */
+ { 0, 3, 1, 0, 8 }, /* G */
+ { 0, 3, 0, 0, 8 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_X2RGB10LE] = {
+ .name = "x2rgb10le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 2, 4, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 0, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB,
+ },
+ [AV_PIX_FMT_X2RGB10BE] = {
+ .name = "x2rgb10be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 0, 4, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 2, 0, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_X2BGR10LE] = {
+ .name = "x2bgr10le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 0, 0, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 2, 4, 10 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
+ [AV_PIX_FMT_X2BGR10BE] = {
+ .name = "x2bgr10be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 2, 0, 10 }, /* R */
+ { 0, 4, 1, 2, 10 }, /* G */
+ { 0, 4, 0, 4, 10 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
+ },
[AV_PIX_FMT_YUV422P] = {
.name = "yuv422p",
.nb_components = 3,
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -247,9 +342,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -259,9 +354,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 2,
.log2_chroma_h = 2,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -271,9 +366,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 2,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -283,9 +378,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 2,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -295,9 +390,8 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
+ { 0, 1, 0, 0, 8 }, /* Y */
},
- .flags = FF_PSEUDOPAL,
.alias = "gray8,y8",
},
[AV_PIX_FMT_MONOWHITE] = {
@@ -306,7 +400,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 1, 0, 0, 1 }, /* Y */
+ { 0, 1, 0, 0, 1 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BITSTREAM,
},
@@ -316,7 +410,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 7, 1, 0, 0, 1 }, /* Y */
+ { 0, 1, 0, 7, 1 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BITSTREAM,
},
@@ -326,7 +420,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 },
+ { 0, 1, 0, 0, 8 },
},
.flags = AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -336,9 +430,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -348,9 +442,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -360,25 +454,27 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
+#if FF_API_XVMC
[AV_PIX_FMT_XVMC] = {
.name = "xvmc",
.flags = AV_PIX_FMT_FLAG_HWACCEL,
},
+#endif
[AV_PIX_FMT_UYVY422] = {
.name = "uyvy422",
.nb_components = 3,
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 1, 0, 8, 1, 7, 2 }, /* Y */
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* U */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* V */
+ { 0, 2, 1, 0, 8 }, /* Y */
+ { 0, 4, 0, 0, 8 }, /* U */
+ { 0, 4, 2, 0, 8 }, /* V */
},
},
[AV_PIX_FMT_UYYVYY411] = {
@@ -387,9 +483,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 2,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* Y */
- { 0, 6, 0, 0, 8, 5, 7, 1 }, /* U */
- { 0, 6, 3, 0, 8, 5, 7, 4 }, /* V */
+ { 0, 4, 1, 0, 8 }, /* Y */
+ { 0, 6, 0, 0, 8 }, /* U */
+ { 0, 6, 3, 0, 8 }, /* V */
},
},
[AV_PIX_FMT_BGR8] = {
@@ -398,11 +494,11 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 3, 0, 2, 1 }, /* R */
- { 0, 1, 0, 3, 3, 0, 2, 1 }, /* G */
- { 0, 1, 0, 6, 2, 0, 1, 1 }, /* B */
+ { 0, 1, 0, 0, 3 }, /* R */
+ { 0, 1, 0, 3, 3 }, /* G */
+ { 0, 1, 0, 6, 2 }, /* B */
},
- .flags = AV_PIX_FMT_FLAG_RGB | FF_PSEUDOPAL,
+ .flags = AV_PIX_FMT_FLAG_RGB,
},
[AV_PIX_FMT_BGR4] = {
.name = "bgr4",
@@ -410,9 +506,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 3, 0, 1, 3, 0, 4 }, /* R */
- { 0, 4, 1, 0, 2, 3, 1, 2 }, /* G */
- { 0, 4, 0, 0, 1, 3, 0, 1 }, /* B */
+ { 0, 4, 3, 0, 1 }, /* R */
+ { 0, 4, 1, 0, 2 }, /* G */
+ { 0, 4, 0, 0, 1 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_RGB,
},
@@ -422,11 +518,11 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 1, 0, 0, 1 }, /* R */
- { 0, 1, 0, 1, 2, 0, 1, 1 }, /* G */
- { 0, 1, 0, 3, 1, 0, 0, 1 }, /* B */
+ { 0, 1, 0, 0, 1 }, /* R */
+ { 0, 1, 0, 1, 2 }, /* G */
+ { 0, 1, 0, 3, 1 }, /* B */
},
- .flags = AV_PIX_FMT_FLAG_RGB | FF_PSEUDOPAL,
+ .flags = AV_PIX_FMT_FLAG_RGB,
},
[AV_PIX_FMT_RGB8] = {
.name = "rgb8",
@@ -434,11 +530,11 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 6, 2, 0, 1, 1 }, /* R */
- { 0, 1, 0, 3, 3, 0, 2, 1 }, /* G */
- { 0, 1, 0, 0, 3, 0, 2, 1 }, /* B */
+ { 0, 1, 0, 6, 2 }, /* R */
+ { 0, 1, 0, 3, 3 }, /* G */
+ { 0, 1, 0, 0, 3 }, /* B */
},
- .flags = AV_PIX_FMT_FLAG_RGB | FF_PSEUDOPAL,
+ .flags = AV_PIX_FMT_FLAG_RGB,
},
[AV_PIX_FMT_RGB4] = {
.name = "rgb4",
@@ -446,9 +542,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 0, 0, 1, 3, 0, 1 }, /* R */
- { 0, 4, 1, 0, 2, 3, 1, 2 }, /* G */
- { 0, 4, 3, 0, 1, 3, 0, 4 }, /* B */
+ { 0, 4, 0, 0, 1 }, /* R */
+ { 0, 4, 1, 0, 2 }, /* G */
+ { 0, 4, 3, 0, 1 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BITSTREAM | AV_PIX_FMT_FLAG_RGB,
},
@@ -458,11 +554,11 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 3, 1, 0, 0, 1 }, /* R */
- { 0, 1, 0, 1, 2, 0, 1, 1 }, /* G */
- { 0, 1, 0, 0, 1, 0, 0, 1 }, /* B */
+ { 0, 1, 0, 3, 1 }, /* R */
+ { 0, 1, 0, 1, 2 }, /* G */
+ { 0, 1, 0, 0, 1 }, /* B */
},
- .flags = AV_PIX_FMT_FLAG_RGB | FF_PSEUDOPAL,
+ .flags = AV_PIX_FMT_FLAG_RGB,
},
[AV_PIX_FMT_NV12] = {
.name = "nv12",
@@ -470,9 +566,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 2, 0, 0, 8, 1, 7, 1 }, /* U */
- { 1, 2, 1, 0, 8, 1, 7, 2 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -482,9 +578,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 2, 1, 0, 8, 1, 7, 2 }, /* U */
- { 1, 2, 0, 0, 8, 1, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 1, 0, 8 }, /* U */
+ { 1, 2, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -494,10 +590,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* R */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* G */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* B */
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* A */
+ { 0, 4, 1, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 3, 0, 8 }, /* B */
+ { 0, 4, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -507,10 +603,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* R */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* G */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* B */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* A */
+ { 0, 4, 0, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 2, 0, 8 }, /* B */
+ { 0, 4, 3, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -520,10 +616,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* R */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* G */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* B */
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* A */
+ { 0, 4, 3, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 1, 0, 8 }, /* B */
+ { 0, 4, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -533,10 +629,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* R */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* G */
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* B */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* A */
+ { 0, 4, 2, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 0, 0, 8 }, /* B */
+ { 0, 4, 3, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -546,9 +642,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0,
.log2_chroma_h= 0,
.comp = {
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* R */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* G */
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* B */
+ { 0, 4, 1, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 3, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -558,9 +654,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0,
.log2_chroma_h= 0,
.comp = {
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* R */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* G */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* B */
+ { 0, 4, 0, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 2, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -570,9 +666,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0,
.log2_chroma_h= 0,
.comp = {
- { 0, 4, 3, 0, 8, 3, 7, 4 }, /* R */
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* G */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* B */
+ { 0, 4, 3, 0, 8 }, /* R */
+ { 0, 4, 2, 0, 8 }, /* G */
+ { 0, 4, 1, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -582,9 +678,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0,
.log2_chroma_h= 0,
.comp = {
- { 0, 4, 2, 0, 8, 3, 7, 3 }, /* R */
- { 0, 4, 1, 0, 8, 3, 7, 2 }, /* G */
- { 0, 4, 0, 0, 8, 3, 7, 1 }, /* B */
+ { 0, 4, 2, 0, 8 }, /* R */
+ { 0, 4, 1, 0, 8 }, /* G */
+ { 0, 4, 0, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -594,7 +690,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
+ { 0, 2, 0, 0, 9 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE,
.alias = "y9be",
@@ -605,7 +701,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
+ { 0, 2, 0, 0, 9 }, /* Y */
},
.alias = "y9le",
},
@@ -615,7 +711,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
+ { 0, 2, 0, 0, 10 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE,
.alias = "y10be",
@@ -626,7 +722,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
+ { 0, 2, 0, 0, 10 }, /* Y */
},
.alias = "y10le",
},
@@ -636,7 +732,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
+ { 0, 2, 0, 0, 12 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE,
.alias = "y12be",
@@ -647,7 +743,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
+ { 0, 2, 0, 0, 12 }, /* Y */
},
.alias = "y12le",
},
@@ -657,7 +753,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
+ { 0, 2, 0, 0, 14 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE,
.alias = "y14be",
@@ -668,7 +764,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
+ { 0, 2, 0, 0, 14 }, /* Y */
},
.alias = "y14le",
},
@@ -678,7 +774,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
+ { 0, 2, 0, 0, 16 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE,
.alias = "y16be",
@@ -689,7 +785,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
+ { 0, 2, 0, 0, 16 }, /* Y */
},
.alias = "y16le",
},
@@ -699,9 +795,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -711,9 +807,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -723,9 +819,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -735,9 +831,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -747,9 +843,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -759,9 +855,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -771,10 +867,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
- { 3, 1, 0, 0, 8, 0, 7, 1 }, /* A */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -784,10 +880,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
- { 3, 1, 0, 0, 8, 0, 7, 1 }, /* A */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -797,10 +893,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* U */
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* V */
- { 3, 1, 0, 0, 8, 0, 7, 1 }, /* A */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 1, 0, 0, 8 }, /* U */
+ { 2, 1, 0, 0, 8 }, /* V */
+ { 3, 1, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -810,10 +906,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -823,10 +919,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -836,10 +932,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -849,10 +945,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -862,10 +958,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -875,10 +971,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
- { 3, 2, 0, 0, 9, 1, 8, 1 }, /* A */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
+ { 3, 2, 0, 0, 9 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -888,10 +984,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -901,10 +997,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -914,10 +1010,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -927,10 +1023,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -940,10 +1036,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -953,10 +1049,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -966,10 +1062,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -979,10 +1075,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -992,10 +1088,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1005,10 +1101,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1018,10 +1114,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1031,10 +1127,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1044,9 +1140,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 0, 0, 16, 5, 15, 1 }, /* R */
- { 0, 6, 2, 0, 16, 5, 15, 3 }, /* G */
- { 0, 6, 4, 0, 16, 5, 15, 5 }, /* B */
+ { 0, 6, 0, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 4, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_BE,
},
@@ -1056,9 +1152,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 0, 0, 16, 5, 15, 1 }, /* R */
- { 0, 6, 2, 0, 16, 5, 15, 3 }, /* G */
- { 0, 6, 4, 0, 16, 5, 15, 5 }, /* B */
+ { 0, 6, 0, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 4, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1068,10 +1164,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* R */
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* G */
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* B */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* A */
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1081,10 +1177,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* R */
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* G */
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* B */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* A */
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1094,9 +1190,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, -1, 3, 5, 1, 4, 0 }, /* R */
- { 0, 2, 0, 5, 6, 1, 5, 1 }, /* G */
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* B */
+ { 0, 2, -1, 3, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1106,9 +1202,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 1, 3, 5, 1, 4, 2 }, /* R */
- { 0, 2, 0, 5, 6, 1, 5, 1 }, /* G */
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* B */
+ { 0, 2, 1, 3, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1118,9 +1214,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, -1, 2, 5, 1, 4, 0 }, /* R */
- { 0, 2, 0, 5, 5, 1, 4, 1 }, /* G */
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* B */
+ { 0, 2, -1, 2, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1130,9 +1226,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 1, 2, 5, 1, 4, 2 }, /* R */
- { 0, 2, 0, 5, 5, 1, 4, 1 }, /* G */
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* B */
+ { 0, 2, 1, 2, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 0, 0, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1142,9 +1238,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, -1, 0, 4, 1, 3, 0 }, /* R */
- { 0, 2, 0, 4, 4, 1, 3, 1 }, /* G */
- { 0, 2, 0, 0, 4, 1, 3, 1 }, /* B */
+ { 0, 2, -1, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 0, 0, 4 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1154,9 +1250,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 1, 0, 4, 1, 3, 2 }, /* R */
- { 0, 2, 0, 4, 4, 1, 3, 1 }, /* G */
- { 0, 2, 0, 0, 4, 1, 3, 1 }, /* B */
+ { 0, 2, 1, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 0, 0, 4 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1166,9 +1262,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 4, 0, 16, 5, 15, 5 }, /* R */
- { 0, 6, 2, 0, 16, 5, 15, 3 }, /* G */
- { 0, 6, 0, 0, 16, 5, 15, 1 }, /* B */
+ { 0, 6, 4, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 0, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1178,9 +1274,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 4, 0, 16, 5, 15, 5 }, /* R */
- { 0, 6, 2, 0, 16, 5, 15, 3 }, /* G */
- { 0, 6, 0, 0, 16, 5, 15, 1 }, /* B */
+ { 0, 6, 4, 0, 16 }, /* R */
+ { 0, 6, 2, 0, 16 }, /* G */
+ { 0, 6, 0, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1190,10 +1286,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* R */
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* G */
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* B */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* A */
+ { 0, 8, 4, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 0, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1203,10 +1299,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* R */
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* G */
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* B */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* A */
+ { 0, 8, 4, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 0, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1216,9 +1312,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* R */
- { 0, 2, 0, 5, 6, 1, 5, 1 }, /* G */
- { 0, 2, -1, 3, 5, 1, 4, 0 }, /* B */
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, -1, 3, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1228,9 +1324,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* R */
- { 0, 2, 0, 5, 6, 1, 5, 1 }, /* G */
- { 0, 2, 1, 3, 5, 1, 4, 2 }, /* B */
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 6 }, /* G */
+ { 0, 2, 1, 3, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1240,9 +1336,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* R */
- { 0, 2, 0, 5, 5, 1, 4, 1 }, /* G */
- { 0, 2, -1, 2, 5, 1, 4, 0 }, /* B */
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, -1, 2, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1252,9 +1348,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 5, 1, 4, 1 }, /* R */
- { 0, 2, 0, 5, 5, 1, 4, 1 }, /* G */
- { 0, 2, 1, 2, 5, 1, 4, 2 }, /* B */
+ { 0, 2, 0, 0, 5 }, /* R */
+ { 0, 2, 0, 5, 5 }, /* G */
+ { 0, 2, 1, 2, 5 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
@@ -1264,9 +1360,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 4, 1, 3, 1 }, /* R */
- { 0, 2, 0, 4, 4, 1, 3, 1 }, /* G */
- { 0, 2, -1, 0, 4, 1, 3, 0 }, /* B */
+ { 0, 2, 0, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, -1, 0, 4 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB,
},
@@ -1276,48 +1372,27 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 4, 1, 3, 1 }, /* R */
- { 0, 2, 0, 4, 4, 1, 3, 1 }, /* G */
- { 0, 2, 1, 0, 4, 1, 3, 2 }, /* B */
+ { 0, 2, 0, 0, 4 }, /* R */
+ { 0, 2, 0, 4, 4 }, /* G */
+ { 0, 2, 1, 0, 4 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_RGB,
},
-#if FF_API_VAAPI
- [AV_PIX_FMT_VAAPI_MOCO] = {
- .name = "vaapi_moco",
- .log2_chroma_w = 1,
- .log2_chroma_h = 1,
- .flags = AV_PIX_FMT_FLAG_HWACCEL,
- },
- [AV_PIX_FMT_VAAPI_IDCT] = {
- .name = "vaapi_idct",
- .log2_chroma_w = 1,
- .log2_chroma_h = 1,
- .flags = AV_PIX_FMT_FLAG_HWACCEL,
- },
- [AV_PIX_FMT_VAAPI_VLD] = {
- .name = "vaapi_vld",
- .log2_chroma_w = 1,
- .log2_chroma_h = 1,
- .flags = AV_PIX_FMT_FLAG_HWACCEL,
- },
-#else
[AV_PIX_FMT_VAAPI] = {
.name = "vaapi",
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.flags = AV_PIX_FMT_FLAG_HWACCEL,
},
-#endif
[AV_PIX_FMT_YUV420P9LE] = {
.name = "yuv420p9le",
.nb_components = 3,
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1327,9 +1402,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1339,9 +1414,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1351,9 +1426,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1363,9 +1438,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1375,9 +1450,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1387,9 +1462,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1399,9 +1474,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1411,9 +1486,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1423,9 +1498,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1435,9 +1510,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1447,9 +1522,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1459,9 +1534,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1471,9 +1546,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1483,9 +1558,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1495,9 +1570,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1507,9 +1582,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1519,9 +1594,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1531,9 +1606,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1543,9 +1618,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1555,9 +1630,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1567,9 +1642,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* U */
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 2, 0, 0, 16 }, /* U */
+ { 2, 2, 0, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1579,9 +1654,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1591,9 +1666,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* U */
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 2, 0, 0, 10 }, /* U */
+ { 2, 2, 0, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1603,9 +1678,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1615,9 +1690,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* Y */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* U */
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* V */
+ { 0, 2, 0, 0, 9 }, /* Y */
+ { 1, 2, 0, 0, 9 }, /* U */
+ { 2, 2, 0, 0, 9 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1627,9 +1702,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1639,9 +1714,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1651,9 +1726,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1663,9 +1738,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* Y */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* U */
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* V */
+ { 0, 2, 0, 0, 14 }, /* Y */
+ { 1, 2, 0, 0, 14 }, /* U */
+ { 2, 2, 0, 0, 14 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR,
},
@@ -1685,8 +1760,8 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.name = "ya8",
.nb_components = 2,
.comp = {
- { 0, 2, 0, 0, 8, 1, 7, 1 }, /* Y */
- { 0, 2, 1, 0, 8, 1, 7, 2 }, /* A */
+ { 0, 2, 0, 0, 8 }, /* Y */
+ { 0, 2, 1, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_ALPHA,
.alias = "gray8a",
@@ -1695,8 +1770,8 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.name = "ya16le",
.nb_components = 2,
.comp = {
- { 0, 4, 0, 0, 16, 3, 15, 1 }, /* Y */
- { 0, 4, 2, 0, 16, 3, 15, 3 }, /* A */
+ { 0, 4, 0, 0, 16 }, /* Y */
+ { 0, 4, 2, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1704,8 +1779,8 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.name = "ya16be",
.nb_components = 2,
.comp = {
- { 0, 4, 0, 0, 16, 3, 15, 1 }, /* Y */
- { 0, 4, 2, 0, 16, 3, 15, 3 }, /* A */
+ { 0, 4, 0, 0, 16 }, /* Y */
+ { 0, 4, 2, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -1719,9 +1794,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* R */
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* G */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* B */
+ { 2, 1, 0, 0, 8 }, /* R */
+ { 0, 1, 0, 0, 8 }, /* G */
+ { 1, 1, 0, 0, 8 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1731,9 +1806,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* R */
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* G */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* B */
+ { 2, 2, 0, 0, 9 }, /* R */
+ { 0, 2, 0, 0, 9 }, /* G */
+ { 1, 2, 0, 0, 9 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1743,9 +1818,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 9, 1, 8, 1 }, /* R */
- { 0, 2, 0, 0, 9, 1, 8, 1 }, /* G */
- { 1, 2, 0, 0, 9, 1, 8, 1 }, /* B */
+ { 2, 2, 0, 0, 9 }, /* R */
+ { 0, 2, 0, 0, 9 }, /* G */
+ { 1, 2, 0, 0, 9 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1755,9 +1830,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* R */
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* G */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* B */
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1767,9 +1842,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* R */
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* G */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* B */
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1779,9 +1854,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* R */
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* G */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* B */
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1791,9 +1866,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* R */
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* G */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* B */
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1803,9 +1878,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* R */
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* G */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* B */
+ { 2, 2, 0, 0, 14 }, /* R */
+ { 0, 2, 0, 0, 14 }, /* G */
+ { 1, 2, 0, 0, 14 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1815,9 +1890,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 14, 1, 13, 1 }, /* R */
- { 0, 2, 0, 0, 14, 1, 13, 1 }, /* G */
- { 1, 2, 0, 0, 14, 1, 13, 1 }, /* B */
+ { 2, 2, 0, 0, 14 }, /* R */
+ { 0, 2, 0, 0, 14 }, /* G */
+ { 1, 2, 0, 0, 14 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1827,9 +1902,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* R */
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* G */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* B */
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1839,9 +1914,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* R */
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* G */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* B */
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB,
},
@@ -1851,10 +1926,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 1, 0, 0, 8, 0, 7, 1 }, /* R */
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* G */
- { 1, 1, 0, 0, 8, 0, 7, 1 }, /* B */
- { 3, 1, 0, 0, 8, 0, 7, 1 }, /* A */
+ { 2, 1, 0, 0, 8 }, /* R */
+ { 0, 1, 0, 0, 8 }, /* G */
+ { 1, 1, 0, 0, 8 }, /* B */
+ { 3, 1, 0, 0, 8 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
AV_PIX_FMT_FLAG_ALPHA,
@@ -1865,10 +1940,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* R */
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* G */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* B */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
AV_PIX_FMT_FLAG_ALPHA,
@@ -1879,10 +1954,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 16, 1, 15, 1 }, /* R */
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* G */
- { 1, 2, 0, 0, 16, 1, 15, 1 }, /* B */
- { 3, 2, 0, 0, 16, 1, 15, 1 }, /* A */
+ { 2, 2, 0, 0, 16 }, /* R */
+ { 0, 2, 0, 0, 16 }, /* G */
+ { 1, 2, 0, 0, 16 }, /* B */
+ { 3, 2, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
@@ -1899,9 +1974,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 0, 4, 12, 5, 11, 1 }, /* X */
- { 0, 6, 2, 4, 12, 5, 11, 3 }, /* Y */
- { 0, 6, 4, 4, 12, 5, 11, 5 }, /* Z */
+ { 0, 6, 0, 4, 12 }, /* X */
+ { 0, 6, 2, 4, 12 }, /* Y */
+ { 0, 6, 4, 4, 12 }, /* Z */
},
/*.flags = -- not used*/
},
@@ -1911,9 +1986,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 6, 0, 4, 12, 5, 11, 1 }, /* X */
- { 0, 6, 2, 4, 12, 5, 11, 3 }, /* Y */
- { 0, 6, 4, 4, 12, 5, 11, 5 }, /* Z */
+ { 0, 6, 0, 4, 12 }, /* X */
+ { 0, 6, 2, 4, 12 }, /* Y */
+ { 0, 6, 4, 4, 12 }, /* Z */
},
.flags = AV_PIX_FMT_FLAG_BE,
},
@@ -1923,9 +1998,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0, \
.log2_chroma_h= 0, \
.comp = { \
- {0,1,0,0,2,0,1,1},\
- {0,1,0,0,4,0,3,1},\
- {0,1,0,0,2,0,1,1},\
+ { 0, 1, 0, 0, 2 }, \
+ { 0, 1, 0, 0, 4 }, \
+ { 0, 1, 0, 0, 2 }, \
}, \
#define BAYER16_DESC_COMMON \
@@ -1933,9 +2008,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w= 0, \
.log2_chroma_h= 0, \
.comp = { \
- {0,2,0,0,4,1,3,1},\
- {0,2,0,0,8,1,7,1},\
- {0,2,0,0,4,1,3,1},\
+ { 0, 2, 0, 0, 4 }, \
+ { 0, 2, 0, 0, 8 }, \
+ { 0, 2, 0, 0, 4 }, \
}, \
[AV_PIX_FMT_BAYER_BGGR8] = {
@@ -2004,9 +2079,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 2, 0, 0, 8, 1, 7, 1 }, /* U */
- { 1, 2, 1, 0, 8, 1, 7, 2 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -2016,9 +2091,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 4, 0, 0, 10, 3, 9, 1 }, /* U */
- { 1, 4, 2, 0, 10, 3, 9, 3 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 4, 0, 0, 10 }, /* U */
+ { 1, 4, 2, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -2028,9 +2103,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* Y */
- { 1, 4, 0, 0, 10, 3, 9, 1 }, /* U */
- { 1, 4, 2, 0, 10, 3, 9, 3 }, /* V */
+ { 0, 2, 0, 0, 10 }, /* Y */
+ { 1, 4, 0, 0, 10 }, /* U */
+ { 1, 4, 2, 0, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
},
@@ -2056,10 +2131,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* Y */
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* U */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* V */
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* A */
+ { 0, 8, 2, 0, 16 }, /* Y */
+ { 0, 8, 4, 0, 16 }, /* U */
+ { 0, 8, 6, 0, 16 }, /* V */
+ { 0, 8, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2069,10 +2144,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 8, 2, 0, 16, 7, 15, 3 }, /* Y */
- { 0, 8, 4, 0, 16, 7, 15, 5 }, /* U */
- { 0, 8, 6, 0, 16, 7, 15, 7 }, /* V */
- { 0, 8, 0, 0, 16, 7, 15, 1 }, /* A */
+ { 0, 8, 2, 0, 16 }, /* Y */
+ { 0, 8, 4, 0, 16 }, /* U */
+ { 0, 8, 6, 0, 16 }, /* V */
+ { 0, 8, 0, 0, 16 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2082,9 +2157,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 6, 10, 1, 9, 1 }, /* Y */
- { 1, 4, 0, 6, 10, 3, 9, 1 }, /* U */
- { 1, 4, 2, 6, 10, 3, 9, 3 }, /* V */
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -2094,9 +2169,33 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 6, 10, 1, 9, 1 }, /* Y */
- { 1, 4, 0, 6, 10, 3, 9, 1 }, /* U */
- { 1, 4, 2, 6, 10, 3, 9, 3 }, /* V */
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P012LE] = {
+ .name = "p012le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 4, 12 }, /* Y */
+ { 1, 4, 0, 4, 12 }, /* U */
+ { 1, 4, 2, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P012BE] = {
+ .name = "p012be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 1,
+ .comp = {
+ { 0, 2, 0, 4, 12 }, /* Y */
+ { 1, 4, 0, 4, 12 }, /* U */
+ { 1, 4, 2, 4, 12 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
},
@@ -2106,9 +2205,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 4, 0, 0, 16, 3, 15, 1 }, /* U */
- { 1, 4, 2, 0, 16, 3, 15, 3 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -2118,9 +2217,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 1,
.comp = {
- { 0, 2, 0, 0, 16, 1, 15, 1 }, /* Y */
- { 1, 4, 0, 0, 16, 3, 15, 1 }, /* U */
- { 1, 4, 2, 0, 16, 3, 15, 3 }, /* V */
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
},
@@ -2130,10 +2229,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* R */
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* G */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* B */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
AV_PIX_FMT_FLAG_ALPHA,
@@ -2144,10 +2243,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* R */
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* G */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* B */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 2, 2, 0, 0, 12 }, /* R */
+ { 0, 2, 0, 0, 12 }, /* G */
+ { 1, 2, 0, 0, 12 }, /* B */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
@@ -2158,10 +2257,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* R */
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* G */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* B */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB |
AV_PIX_FMT_FLAG_ALPHA,
@@ -2172,10 +2271,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 2, 0, 0, 10, 1, 9, 1 }, /* R */
- { 0, 2, 0, 0, 10, 1, 9, 1 }, /* G */
- { 1, 2, 0, 0, 10, 1, 9, 1 }, /* B */
- { 3, 2, 0, 0, 10, 1, 9, 1 }, /* A */
+ { 2, 2, 0, 0, 10 }, /* R */
+ { 0, 2, 0, 0, 10 }, /* G */
+ { 1, 2, 0, 0, 10 }, /* B */
+ { 3, 2, 0, 0, 10 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA,
@@ -2190,9 +2289,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 4, 0, 0, 32, 3, 31, 1 }, /* R */
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* G */
- { 1, 4, 0, 0, 32, 3, 31, 1 }, /* B */
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
@@ -2203,9 +2302,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 4, 0, 0, 32, 3, 31, 1 }, /* R */
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* G */
- { 1, 4, 0, 0, 32, 3, 31, 1 }, /* B */
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_FLOAT | AV_PIX_FMT_FLAG_RGB,
},
@@ -2215,10 +2314,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 4, 0, 0, 32, 3, 31, 1 }, /* R */
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* G */
- { 1, 4, 0, 0, 32, 3, 31, 1 }, /* B */
- { 3, 4, 0, 0, 32, 3, 31, 1 }, /* A */
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ { 3, 4, 0, 0, 32 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_RGB |
@@ -2230,10 +2329,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 2, 4, 0, 0, 32, 3, 31, 1 }, /* R */
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* G */
- { 1, 4, 0, 0, 32, 3, 31, 1 }, /* B */
- { 3, 4, 0, 0, 32, 3, 31, 1 }, /* A */
+ { 2, 4, 0, 0, 32 }, /* R */
+ { 0, 4, 0, 0, 32 }, /* G */
+ { 1, 4, 0, 0, 32 }, /* B */
+ { 3, 4, 0, 0, 32 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA |
AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
@@ -2252,7 +2351,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* Y */
+ { 0, 4, 0, 0, 32 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_FLOAT,
.alias = "yf32be",
@@ -2263,7 +2362,7 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 4, 0, 0, 32, 3, 31, 1 }, /* Y */
+ { 0, 4, 0, 0, 32 }, /* Y */
},
.flags = AV_PIX_FMT_FLAG_FLOAT,
.alias = "yf32le",
@@ -2274,10 +2373,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2287,10 +2386,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 1,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2300,10 +2399,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2313,10 +2412,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 2, 0, 0, 12, 1, 11, 1 }, /* Y */
- { 1, 2, 0, 0, 12, 1, 11, 1 }, /* U */
- { 2, 2, 0, 0, 12, 1, 11, 1 }, /* V */
- { 3, 2, 0, 0, 12, 1, 11, 1 }, /* A */
+ { 0, 2, 0, 0, 12 }, /* Y */
+ { 1, 2, 0, 0, 12 }, /* U */
+ { 2, 2, 0, 0, 12 }, /* V */
+ { 3, 2, 0, 0, 12 }, /* A */
},
.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_ALPHA,
},
@@ -2326,9 +2425,9 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 2, 0, 0, 8, 1, 7, 1 }, /* U */
- { 1, 2, 1, 0, 8, 1, 7, 2 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 0, 0, 8 }, /* U */
+ { 1, 2, 1, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
@@ -2338,16 +2437,287 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
.log2_chroma_w = 0,
.log2_chroma_h = 0,
.comp = {
- { 0, 1, 0, 0, 8, 0, 7, 1 }, /* Y */
- { 1, 2, 1, 0, 8, 1, 7, 2 }, /* U */
- { 1, 2, 0, 0, 8, 1, 7, 1 }, /* V */
+ { 0, 1, 0, 0, 8 }, /* Y */
+ { 1, 2, 1, 0, 8 }, /* U */
+ { 1, 2, 0, 0, 8 }, /* V */
},
.flags = AV_PIX_FMT_FLAG_PLANAR,
},
+ [AV_PIX_FMT_VULKAN] = {
+ .name = "vulkan",
+ .flags = AV_PIX_FMT_FLAG_HWACCEL,
+ },
+ [AV_PIX_FMT_P210BE] = {
+ .name = "p210be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P210LE] = {
+ .name = "p210le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P410BE] = {
+ .name = "p410be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P410LE] = {
+ .name = "p410le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 6, 10 }, /* Y */
+ { 1, 4, 0, 6, 10 }, /* U */
+ { 1, 4, 2, 6, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P216BE] = {
+ .name = "p216be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P216LE] = {
+ .name = "p216le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_P416BE] = {
+ .name = "p416be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_P416LE] = {
+ .name = "p416le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 2, 0, 0, 16 }, /* Y */
+ { 1, 4, 0, 0, 16 }, /* U */
+ { 1, 4, 2, 0, 16 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_PLANAR,
+ },
+ [AV_PIX_FMT_VUYA] = {
+ .name = "vuya",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 0, 0, 8 }, /* V */
+ { 0, 4, 3, 0, 8 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_VUYX] = {
+ .name = "vuyx",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 2, 0, 8 }, /* Y */
+ { 0, 4, 1, 0, 8 }, /* U */
+ { 0, 4, 0, 0, 8 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_RGBAF16BE] = {
+ .name = "rgbaf16be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBAF16LE] = {
+ .name = "rgbaf16le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 8, 0, 0, 16 }, /* R */
+ { 0, 8, 2, 0, 16 }, /* G */
+ { 0, 8, 4, 0, 16 }, /* B */
+ { 0, 8, 6, 0, 16 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_ALPHA |
+ AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_Y212LE] = {
+ .name = "y212le",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 4, 12 }, /* Y */
+ { 0, 8, 2, 4, 12 }, /* U */
+ { 0, 8, 6, 4, 12 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_Y212BE] = {
+ .name = "y212be",
+ .nb_components = 3,
+ .log2_chroma_w = 1,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 4, 0, 4, 12 }, /* Y */
+ { 0, 8, 2, 4, 12 }, /* U */
+ { 0, 8, 6, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_XV30LE] = {
+ .name = "xv30le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 4, 1, 2, 10 }, /* Y */
+ { 0, 4, 0, 0, 10 }, /* U */
+ { 0, 4, 2, 4, 10 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_XV30BE] = {
+ .name = "xv30be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 32, 10, 0, 10 }, /* Y */
+ { 0, 32, 0, 0, 10 }, /* U */
+ { 0, 32, 20, 0, 10 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_BITSTREAM,
+ },
+ [AV_PIX_FMT_XV36LE] = {
+ .name = "xv36le",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 8, 2, 4, 12 }, /* Y */
+ { 0, 8, 0, 4, 12 }, /* U */
+ { 0, 8, 4, 4, 12 }, /* V */
+ },
+ },
+ [AV_PIX_FMT_XV36BE] = {
+ .name = "xv36be",
+ .nb_components= 3,
+ .log2_chroma_w= 0,
+ .log2_chroma_h= 0,
+ .comp = {
+ { 0, 8, 2, 4, 12 }, /* Y */
+ { 0, 8, 0, 4, 12 }, /* U */
+ { 0, 8, 4, 4, 12 }, /* V */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE,
+ },
+ [AV_PIX_FMT_RGBF32BE] = {
+ .name = "rgbf32be",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 12, 0, 0, 32 }, /* R */
+ { 0, 12, 4, 0, 32 }, /* G */
+ { 0, 12, 8, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBF32LE] = {
+ .name = "rgbf32le",
+ .nb_components = 3,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 12, 0, 0, 32 }, /* R */
+ { 0, 12, 4, 0, 32 }, /* G */
+ { 0, 12, 8, 0, 32 }, /* B */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT,
+ },
+ [AV_PIX_FMT_RGBAF32BE] = {
+ .name = "rgbaf32be",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 16, 0, 0, 32 }, /* R */
+ { 0, 16, 4, 0, 32 }, /* G */
+ { 0, 16, 8, 0, 32 }, /* B */
+ { 0, 16, 12, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_RGB |
+ AV_PIX_FMT_FLAG_FLOAT | AV_PIX_FMT_FLAG_ALPHA,
+ },
+ [AV_PIX_FMT_RGBAF32LE] = {
+ .name = "rgbaf32le",
+ .nb_components = 4,
+ .log2_chroma_w = 0,
+ .log2_chroma_h = 0,
+ .comp = {
+ { 0, 16, 0, 0, 32 }, /* R */
+ { 0, 16, 4, 0, 32 }, /* G */
+ { 0, 16, 8, 0, 32 }, /* B */
+ { 0, 16, 12, 0, 32 }, /* A */
+ },
+ .flags = AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_FLOAT |
+ AV_PIX_FMT_FLAG_ALPHA,
+ },
};
-#if FF_API_PLUS1_MINUS1
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
static const char * const color_range_names[] = {
[AVCOL_RANGE_UNSPECIFIED] = "unknown",
@@ -2369,7 +2739,7 @@ static const char * const color_primaries_names[AVCOL_PRI_NB] = {
[AVCOL_PRI_SMPTE428] = "smpte428",
[AVCOL_PRI_SMPTE431] = "smpte431",
[AVCOL_PRI_SMPTE432] = "smpte432",
- [AVCOL_PRI_JEDEC_P22] = "jedec-p22",
+ [AVCOL_PRI_EBU3213] = "ebu3213",
};
static const char * const color_transfer_names[] = {
@@ -2464,10 +2834,6 @@ enum AVPixelFormat av_get_pix_fmt(const char *name)
pix_fmt = get_pix_fmt_internal(name2);
}
-#if FF_API_VAAPI
- if (pix_fmt == AV_PIX_FMT_NONE && !strcmp(name, "vaapi"))
- pix_fmt = AV_PIX_FMT_VAAPI;
-#endif
return pix_fmt;
}
@@ -2574,47 +2940,6 @@ int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
return ret;
}
-void ff_check_pixfmt_descriptors(void){
- int i, j;
-
- for (i=0; i<FF_ARRAY_ELEMS(av_pix_fmt_descriptors); i++) {
- const AVPixFmtDescriptor *d = &av_pix_fmt_descriptors[i];
- uint8_t fill[4][8+6+3] = {{0}};
- uint8_t *data[4] = {fill[0], fill[1], fill[2], fill[3]};
- int linesize[4] = {0,0,0,0};
- uint16_t tmp[2];
-
- if (!d->name && !d->nb_components && !d->log2_chroma_w && !d->log2_chroma_h && !d->flags)
- continue;
-// av_log(NULL, AV_LOG_DEBUG, "Checking: %s\n", d->name);
- av_assert0(d->log2_chroma_w <= 3);
- av_assert0(d->log2_chroma_h <= 3);
- av_assert0(d->nb_components <= 4);
- av_assert0(d->name && d->name[0]);
- av_assert2(av_get_pix_fmt(d->name) == i);
-
- for (j=0; j<FF_ARRAY_ELEMS(d->comp); j++) {
- const AVComponentDescriptor *c = &d->comp[j];
- if(j>=d->nb_components) {
- av_assert0(!c->plane && !c->step && !c->offset && !c->shift && !c->depth);
- continue;
- }
- if (d->flags & AV_PIX_FMT_FLAG_BITSTREAM) {
- av_assert0(c->step >= c->depth);
- } else {
- av_assert0(8*c->step >= c->depth);
- }
- if (d->flags & AV_PIX_FMT_FLAG_BAYER)
- continue;
- av_read_image_line(tmp, (void*)data, linesize, d, 0, 0, j, 2, 0);
- av_assert0(tmp[0] == 0 && tmp[1] == 0);
- tmp[0] = tmp[1] = (1<<c->depth) - 1;
- av_write_image_line(tmp, data, linesize, d, 0, 0, j, 2);
- }
- }
-}
-
-
enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
@@ -2651,11 +2976,13 @@ static int get_color_type(const AVPixFmtDescriptor *desc) {
if(desc->nb_components == 1 || desc->nb_components == 2)
return FF_COLOR_GRAY;
- if(desc->name && !strncmp(desc->name, "yuvj", 4))
- return FF_COLOR_YUV_JPEG;
+ if (desc->name) {
+ if (av_strstart(desc->name, "yuvj", NULL))
+ return FF_COLOR_YUV_JPEG;
- if(desc->name && !strncmp(desc->name, "xyz", 3))
- return FF_COLOR_XYZ;
+ if (av_strstart(desc->name, "xyz", NULL))
+ return FF_COLOR_XYZ;
+ }
if(desc->flags & AV_PIX_FMT_FLAG_RGB)
return FF_COLOR_RGB;
@@ -2726,9 +3053,16 @@ static int get_pix_fmt_score(enum AVPixelFormat dst_pix_fmt,
for (i = 0; i < nb_components; i++) {
int depth_minus1 = (dst_pix_fmt == AV_PIX_FMT_PAL8) ? 7/nb_components : (dst_desc->comp[i].depth - 1);
- if (src_desc->comp[i].depth - 1 > depth_minus1 && (consider & FF_LOSS_DEPTH)) {
+ int depth_delta = src_desc->comp[i].depth - 1 - depth_minus1;
+ if (depth_delta > 0 && (consider & FF_LOSS_DEPTH)) {
loss |= FF_LOSS_DEPTH;
score -= 65536 >> depth_minus1;
+ } else if (depth_delta < 0 && (consider & FF_LOSS_EXCESS_DEPTH)) {
+ // Favour formats where bit depth exactly matches. If all other
+ // scoring is equal, we'd rather use the bit depth that most closely
+ // matches the source.
+ loss |= FF_LOSS_EXCESS_DEPTH;
+ score += depth_delta;
}
}
@@ -2748,6 +3082,28 @@ static int get_pix_fmt_score(enum AVPixelFormat dst_pix_fmt,
}
}
+ if (consider & FF_LOSS_EXCESS_RESOLUTION) {
+ // Favour formats where chroma subsampling exactly matches. If all other
+ // scoring is equal, we'd rather use the subsampling that most closely
+ // matches the source.
+ if (dst_desc->log2_chroma_w < src_desc->log2_chroma_w) {
+ loss |= FF_LOSS_EXCESS_RESOLUTION;
+ score -= 1 << (src_desc->log2_chroma_w - dst_desc->log2_chroma_w);
+ }
+
+ if (dst_desc->log2_chroma_h < src_desc->log2_chroma_h) {
+ loss |= FF_LOSS_EXCESS_RESOLUTION;
+ score -= 1 << (src_desc->log2_chroma_h - dst_desc->log2_chroma_h);
+ }
+
+ // don't favour 411 over 420, because 420 has much better support on the
+ // decoder side.
+ if (dst_desc->log2_chroma_w == 1 && src_desc->log2_chroma_w == 2 &&
+ dst_desc->log2_chroma_h == 1 && src_desc->log2_chroma_h == 2) {
+ score += 4;
+ }
+ }
+
if(consider & FF_LOSS_COLORSPACE)
switch(dst_color) {
case FF_COLOR_RGB:
@@ -2856,8 +3212,7 @@ int av_color_range_from_name(const char *name)
int i;
for (i = 0; i < FF_ARRAY_ELEMS(color_range_names); i++) {
- size_t len = strlen(color_range_names[i]);
- if (!strncmp(color_range_names[i], name, len))
+ if (av_strstart(name, color_range_names[i], NULL))
return i;
}
@@ -2875,13 +3230,10 @@ int av_color_primaries_from_name(const char *name)
int i;
for (i = 0; i < FF_ARRAY_ELEMS(color_primaries_names); i++) {
- size_t len;
-
if (!color_primaries_names[i])
continue;
- len = strlen(color_primaries_names[i]);
- if (!strncmp(color_primaries_names[i], name, len))
+ if (av_strstart(name, color_primaries_names[i], NULL))
return i;
}
@@ -2899,13 +3251,10 @@ int av_color_transfer_from_name(const char *name)
int i;
for (i = 0; i < FF_ARRAY_ELEMS(color_transfer_names); i++) {
- size_t len;
-
if (!color_transfer_names[i])
continue;
- len = strlen(color_transfer_names[i]);
- if (!strncmp(color_transfer_names[i], name, len))
+ if (av_strstart(name, color_transfer_names[i], NULL))
return i;
}
@@ -2923,13 +3272,10 @@ int av_color_space_from_name(const char *name)
int i;
for (i = 0; i < FF_ARRAY_ELEMS(color_space_names); i++) {
- size_t len;
-
if (!color_space_names[i])
continue;
- len = strlen(color_space_names[i]);
- if (!strncmp(color_space_names[i], name, len))
+ if (av_strstart(name, color_space_names[i], NULL))
return i;
}
@@ -2947,15 +3293,35 @@ int av_chroma_location_from_name(const char *name)
int i;
for (i = 0; i < FF_ARRAY_ELEMS(chroma_location_names); i++) {
- size_t len;
-
if (!chroma_location_names[i])
continue;
- len = strlen(chroma_location_names[i]);
- if (!strncmp(chroma_location_names[i], name, len))
+ if (av_strstart(name, chroma_location_names[i], NULL))
return i;
}
return AVERROR(EINVAL);
}
+
+int av_chroma_location_enum_to_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
+{
+ if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB)
+ return AVERROR(EINVAL);
+ pos--;
+
+ *xpos = (pos&1) * 128;
+ *ypos = ((pos>>1)^(pos<4)) * 128;
+
+ return 0;
+}
+
+enum AVChromaLocation av_chroma_location_pos_to_enum(int xpos, int ypos)
+{
+ int pos, xout, yout;
+
+ for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) {
+ if (av_chroma_location_enum_to_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos)
+ return pos;
+ }
+ return AVCHROMA_LOC_UNSPECIFIED;
+}
diff --git a/media/ffvpx/libavutil/pixdesc.h b/media/ffvpx/libavutil/pixdesc.h
index c055810ae8..0df73e6efe 100644
--- a/media/ffvpx/libavutil/pixdesc.h
+++ b/media/ffvpx/libavutil/pixdesc.h
@@ -26,7 +26,6 @@
#include "attributes.h"
#include "pixfmt.h"
-#include "version.h"
typedef struct AVComponentDescriptor {
/**
@@ -56,17 +55,6 @@ typedef struct AVComponentDescriptor {
* Number of bits in the component.
*/
int depth;
-
-#if FF_API_PLUS1_MINUS1
- /** deprecated, use step instead */
- attribute_deprecated int step_minus1;
-
- /** deprecated, use depth instead */
- attribute_deprecated int depth_minus1;
-
- /** deprecated, use offset instead */
- attribute_deprecated int offset_plus1;
-#endif
} AVComponentDescriptor;
/**
@@ -148,24 +136,6 @@ typedef struct AVPixFmtDescriptor {
#define AV_PIX_FMT_FLAG_RGB (1 << 5)
/**
- * The pixel format is "pseudo-paletted". This means that it contains a
- * fixed palette in the 2nd plane but the palette is fixed/constant for each
- * PIX_FMT. This allows interpreting the data as if it was PAL8, which can
- * in some cases be simpler. Or the data can be interpreted purely based on
- * the pixel format without using the palette.
- * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8
- *
- * @deprecated This flag is deprecated, and will be removed. When it is removed,
- * the extra palette allocation in AVFrame.data[1] is removed as well. Only
- * actual paletted formats (as indicated by AV_PIX_FMT_FLAG_PAL) will have a
- * palette. Starting with FFmpeg versions which have this flag deprecated, the
- * extra "pseudo" palette is already ignored, and API users are not required to
- * allocate a palette for AV_PIX_FMT_FLAG_PSEUDOPAL formats (it was required
- * before the deprecation, though).
- */
-#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6)
-
-/**
* The pixel format has an alpha channel. This is set on all formats that
* support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always
* straight, never pre-multiplied.
@@ -295,6 +265,28 @@ const char *av_chroma_location_name(enum AVChromaLocation location);
int av_chroma_location_from_name(const char *name);
/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+int av_chroma_location_enum_to_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+enum AVChromaLocation av_chroma_location_pos_to_enum(int xpos, int ypos);
+
+/**
* Return the pixel format corresponding to name.
*
* If there is no pixel format with name name, then looks for a
@@ -387,12 +379,15 @@ void av_write_image_line(const uint16_t *src, uint8_t *data[4],
*/
enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
-#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
-#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
-#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
-#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
-#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
-#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+#define FF_LOSS_EXCESS_RESOLUTION 0x0040 /**< loss due to unneeded extra resolution */
+#define FF_LOSS_EXCESS_DEPTH 0x0080 /**< loss due to unneeded extra color depth */
+
/**
* Compute what kind of losses will occur when converting from one specific
diff --git a/media/ffvpx/libavutil/pixelutils.c b/media/ffvpx/libavutil/pixelutils.c
index ebee3d6f90..820889a143 100644
--- a/media/ffvpx/libavutil/pixelutils.c
+++ b/media/ffvpx/libavutil/pixelutils.c
@@ -16,12 +16,17 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <stddef.h>
+
#include "config.h"
-#include "common.h"
#include "pixelutils.h"
-#include "internal.h"
#if CONFIG_PIXELUTILS
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "macros.h"
#include "x86/pixelutils.h"
@@ -60,7 +65,8 @@ static const av_pixelutils_sad_fn sad_c[] = {
block_sad_16x16_c,
block_sad_32x32_c,
};
-
+#else
+#include "log.h"
#endif /* CONFIG_PIXELUTILS */
av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
diff --git a/media/ffvpx/libavutil/pixelutils.h b/media/ffvpx/libavutil/pixelutils.h
index a8dbc157e1..7a997cde1c 100644
--- a/media/ffvpx/libavutil/pixelutils.h
+++ b/media/ffvpx/libavutil/pixelutils.h
@@ -21,7 +21,6 @@
#include <stddef.h>
#include <stdint.h>
-#include "common.h"
/**
* Sum of abs(src1[x] - src2[x])
diff --git a/media/ffvpx/libavutil/pixfmt.h b/media/ffvpx/libavutil/pixfmt.h
index 8b54c9415b..37c2c79e01 100644
--- a/media/ffvpx/libavutil/pixfmt.h
+++ b/media/ffvpx/libavutil/pixfmt.h
@@ -112,21 +112,11 @@ enum AVPixelFormat {
AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
-#if FF_API_VAAPI
- /** @name Deprecated pixel formats */
- /**@{*/
- AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
- AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
- AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID
- /**@}*/
- AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
-#else
/**
* Hardware acceleration through VA-API, data[3] contains a
* VASurfaceID.
*/
AV_PIX_FMT_VAAPI,
-#endif
AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
@@ -216,8 +206,36 @@ enum AVPixelFormat {
AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
/**
- * HW acceleration through QSV, data[3] contains a pointer to the
- * mfxFrameSurface1 structure.
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ *
+ * Before FFmpeg 5.0:
+ * mfxFrameSurface1.Data.MemId contains a pointer when importing
+ * the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxFrameSurface1.Data.MemId contains a pointer to VASurfaceID
+ *
+ * DXVA2:
+ * mfxFrameSurface1.Data.MemId contains a pointer to IDirect3DSurface9
+ *
+ * FFmpeg 5.0 and above:
+ * mfxFrameSurface1.Data.MemId contains a pointer to the mfxHDLPair
+ * structure when importing the following frames as QSV frames:
+ *
+ * VAAPI:
+ * mfxHDLPair.first contains a VASurfaceID pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * DXVA2:
+ * mfxHDLPair.first contains IDirect3DSurface9 pointer.
+ * mfxHDLPair.second is always MFX_INFINITE.
+ *
+ * D3D11:
+ * mfxHDLPair.first contains a ID3D11Texture2D pointer.
+ * mfxHDLPair.second contains the texture array index of the frame if the
+ * ID3D11Texture2D is an array texture, or always MFX_INFINITE if it is a
+ * normal texture.
*/
AV_PIX_FMT_QSV,
/**
@@ -257,20 +275,22 @@ enum AVPixelFormat {
AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
- AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
- AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
- AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
- AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
- AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
- AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
- AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
- AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
- AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
- AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
- AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
- AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
-
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian
+
+#if FF_API_XVMC
AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+#endif
AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
@@ -348,6 +368,58 @@ enum AVPixelFormat {
AV_PIX_FMT_NV24, ///< planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
AV_PIX_FMT_NV42, ///< as above, but U and V bytes are swapped
+ /**
+ * Vulkan hardware images.
+ *
+ * data[0] points to an AVVkFrame
+ */
+ AV_PIX_FMT_VULKAN,
+
+ AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, big-endian
+ AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10LE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_X2BGR10BE, ///< packed BGR 10:10:10, 30bpp, (msb)2X 10B 10G 10R(lsb), big-endian, X=unused/undefined
+
+ AV_PIX_FMT_P210BE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, big-endian
+ AV_PIX_FMT_P210LE, ///< interleaved chroma YUV 4:2:2, 20bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_P410BE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, big-endian
+ AV_PIX_FMT_P410LE, ///< interleaved chroma YUV 4:4:4, 30bpp, data in the high bits, little-endian
+
+ AV_PIX_FMT_P216BE, ///< interleaved chroma YUV 4:2:2, 32bpp, big-endian
+ AV_PIX_FMT_P216LE, ///< interleaved chroma YUV 4:2:2, 32bpp, little-endian
+
+ AV_PIX_FMT_P416BE, ///< interleaved chroma YUV 4:4:4, 48bpp, big-endian
+ AV_PIX_FMT_P416LE, ///< interleaved chroma YUV 4:4:4, 48bpp, little-endian
+
+ AV_PIX_FMT_VUYA, ///< packed VUYA 4:4:4, 32bpp, VUYAVUYA...
+
+ AV_PIX_FMT_RGBAF16BE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF16LE, ///< IEEE-754 half precision packed RGBA 16:16:16:16, 64bpp, RGBARGBA..., little-endian
+
+ AV_PIX_FMT_VUYX, ///< packed VUYX 4:4:4, 32bpp, Variant of VUYA where alpha channel is left undefined
+
+ AV_PIX_FMT_P012LE, ///< like NV12, with 12bpp per component, data in the high bits, zeros in the low bits, little-endian
+ AV_PIX_FMT_P012BE, ///< like NV12, with 12bpp per component, data in the high bits, zeros in the low bits, big-endian
+
+ AV_PIX_FMT_Y212BE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits, big-endian
+ AV_PIX_FMT_Y212LE, ///< packed YUV 4:2:2 like YUYV422, 24bpp, data in the high bits, zeros in the low bits, little-endian
+
+ AV_PIX_FMT_XV30BE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), big-endian, variant of Y410 where alpha channel is left undefined
+ AV_PIX_FMT_XV30LE, ///< packed XVYU 4:4:4, 32bpp, (msb)2X 10V 10Y 10U(lsb), little-endian, variant of Y410 where alpha channel is left undefined
+
+ AV_PIX_FMT_XV36BE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, big-endian, variant of Y412 where alpha channel is left undefined
+ AV_PIX_FMT_XV36LE, ///< packed XVYU 4:4:4, 48bpp, data in the high bits, zeros in the low bits, little-endian, variant of Y412 where alpha channel is left undefined
+
+ AV_PIX_FMT_RGBF32BE, ///< IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian
+ AV_PIX_FMT_RGBF32LE, ///< IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian
+
+ AV_PIX_FMT_RGBAF32BE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian
+ AV_PIX_FMT_RGBAF32LE, ///< IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian
+
AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
@@ -434,35 +506,54 @@ enum AVPixelFormat {
#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE)
+#define AV_PIX_FMT_P012 AV_PIX_FMT_NE(P012BE, P012LE)
#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE)
+#define AV_PIX_FMT_Y210 AV_PIX_FMT_NE(Y210BE, Y210LE)
+#define AV_PIX_FMT_Y212 AV_PIX_FMT_NE(Y212BE, Y212LE)
+#define AV_PIX_FMT_XV30 AV_PIX_FMT_NE(XV30BE, XV30LE)
+#define AV_PIX_FMT_XV36 AV_PIX_FMT_NE(XV36BE, XV36LE)
+#define AV_PIX_FMT_X2RGB10 AV_PIX_FMT_NE(X2RGB10BE, X2RGB10LE)
+#define AV_PIX_FMT_X2BGR10 AV_PIX_FMT_NE(X2BGR10BE, X2BGR10LE)
+
+#define AV_PIX_FMT_P210 AV_PIX_FMT_NE(P210BE, P210LE)
+#define AV_PIX_FMT_P410 AV_PIX_FMT_NE(P410BE, P410LE)
+#define AV_PIX_FMT_P216 AV_PIX_FMT_NE(P216BE, P216LE)
+#define AV_PIX_FMT_P416 AV_PIX_FMT_NE(P416BE, P416LE)
+
+#define AV_PIX_FMT_RGBAF16 AV_PIX_FMT_NE(RGBAF16BE, RGBAF16LE)
+
+#define AV_PIX_FMT_RGBF32 AV_PIX_FMT_NE(RGBF32BE, RGBF32LE)
+#define AV_PIX_FMT_RGBAF32 AV_PIX_FMT_NE(RGBAF32BE, RGBAF32LE)
+
/**
* Chromaticity coordinates of the source primaries.
- * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.1 and ITU-T H.273.
*/
enum AVColorPrimaries {
AVCOL_PRI_RESERVED0 = 0,
- AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
AVCOL_PRI_UNSPECIFIED = 2,
AVCOL_PRI_RESERVED = 3,
AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
- AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_SMPTE240M = 7, ///< identical to above, also called "SMPTE C" even though it uses D65
AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428,
AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3
AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3
- AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors
+ AVCOL_PRI_EBU3213 = 22, ///< EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors
+ AVCOL_PRI_JEDEC_P22 = AVCOL_PRI_EBU3213,
AVCOL_PRI_NB ///< Not part of ABI
};
/**
* Color Transfer Characteristic.
- * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.2.
*/
enum AVColorTransferCharacteristic {
AVCOL_TRC_RESERVED0 = 0,
@@ -491,18 +582,18 @@ enum AVColorTransferCharacteristic {
/**
* YUV colorspace type.
- * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3.
+ * These values match the ones defined by ISO/IEC 23091-2_2019 subclause 8.3.
*/
enum AVColorSpace {
- AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
- AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
AVCOL_SPC_UNSPECIFIED = 2,
- AVCOL_SPC_RESERVED = 3,
+ AVCOL_SPC_RESERVED = 3, ///< reserved for future use by ITU-T and ISO/IEC just like 15-255 are
AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
- AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
- AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above
- AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7, ///< derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
+ AVCOL_SPC_YCGCO = 8, ///< used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO,
AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
@@ -514,12 +605,60 @@ enum AVColorSpace {
};
/**
- * MPEG vs JPEG YUV range.
+ * Visual content value range.
+ *
+ * These values are based on definitions that can be found in multiple
+ * specifications, such as ITU-T BT.709 (3.4 - Quantization of RGB, luminance
+ * and colour-difference signals), ITU-T BT.2020 (Table 5 - Digital
+ * Representation) as well as ITU-T BT.2100 (Table 9 - Digital 10- and 12-bit
+ * integer representation). At the time of writing, the BT.2100 one is
+ * recommended, as it also defines the full range representation.
+ *
+ * Common definitions:
+ * - For RGB and luma planes such as Y in YCbCr and I in ICtCp,
+ * 'E' is the original value in range of 0.0 to 1.0.
+ * - For chroma planes such as Cb,Cr and Ct,Cp, 'E' is the original
+ * value in range of -0.5 to 0.5.
+ * - 'n' is the output bit depth.
+ * - For additional definitions such as rounding and clipping to valid n
+ * bit unsigned integer range, please refer to BT.2100 (Table 9).
*/
enum AVColorRange {
AVCOL_RANGE_UNSPECIFIED = 0,
- AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
- AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+
+ /**
+ * Narrow or limited range content.
+ *
+ * - For luma planes:
+ *
+ * (219 * E + 16) * 2^(n-8)
+ *
+ * F.ex. the range of 16-235 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (224 * E + 128) * 2^(n-8)
+ *
+ * F.ex. the range of 16-240 for 8 bits
+ */
+ AVCOL_RANGE_MPEG = 1,
+
+ /**
+ * Full range content.
+ *
+ * - For RGB and luma planes:
+ *
+ * (2^n - 1) * E
+ *
+ * F.ex. the range of 0-255 for 8 bits
+ *
+ * - For chroma planes:
+ *
+ * (2^n - 1) * E + 2^(n - 1)
+ *
+ * F.ex. the range of 1-255 for 8 bits
+ */
+ AVCOL_RANGE_JPEG = 2,
AVCOL_RANGE_NB ///< Not part of ABI
};
diff --git a/media/ffvpx/libavutil/qsort.h b/media/ffvpx/libavutil/qsort.h
index 39b7a08852..6014f88be3 100644
--- a/media/ffvpx/libavutil/qsort.h
+++ b/media/ffvpx/libavutil/qsort.h
@@ -21,7 +21,7 @@
#ifndef AVUTIL_QSORT_H
#define AVUTIL_QSORT_H
-#include "common.h"
+#include "macros.h"
/**
diff --git a/media/ffvpx/libavutil/rational.c b/media/ffvpx/libavutil/rational.c
index 35ee08877f..eb148ddb12 100644
--- a/media/ffvpx/libavutil/rational.c
+++ b/media/ffvpx/libavutil/rational.c
@@ -182,3 +182,12 @@ uint32_t av_q2intfloat(AVRational q) {
return sign<<31 | (150-shift)<<23 | (n - (1<<23));
}
+
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def)
+{
+ int64_t gcd, lcm;
+
+ gcd = av_gcd(a.den, b.den);
+ lcm = (a.den / gcd) * b.den;
+ return lcm < max_den ? av_make_q(av_gcd(a.num, b.num), lcm) : def;
+}
diff --git a/media/ffvpx/libavutil/rational.h b/media/ffvpx/libavutil/rational.h
index 5c6b67b4e9..8cbfc8e066 100644
--- a/media/ffvpx/libavutil/rational.h
+++ b/media/ffvpx/libavutil/rational.h
@@ -179,7 +179,8 @@ AVRational av_d2q(double d, int max) av_const;
* Find which of the two rationals is closer to another rational.
*
* @param q Rational to be compared against
- * @param q1,q2 Rationals to be tested
+ * @param q1 Rational to be tested
+ * @param q2 Rational to be tested
* @return One of the following values:
* - 1 if `q1` is nearer to `q` than `q2`
* - -1 if `q2` is nearer to `q` than `q1`
@@ -208,6 +209,12 @@ int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
uint32_t av_q2intfloat(AVRational q);
/**
+ * Return the best rational so that a and b are multiple of it.
+ * If the resulting denominator is larger than max_den, return def.
+ */
+AVRational av_gcd_q(AVRational a, AVRational b, int max_den, AVRational def);
+
+/**
* @}
*/
diff --git a/media/ffvpx/libavutil/samplefmt.c b/media/ffvpx/libavutil/samplefmt.c
index fc077f6444..6d3ec34dab 100644
--- a/media/ffvpx/libavutil/samplefmt.c
+++ b/media/ffvpx/libavutil/samplefmt.c
@@ -16,11 +16,13 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "common.h"
+#include "error.h"
+#include "macros.h"
+#include "mem.h"
#include "samplefmt.h"
+#include <limits.h>
#include <stdio.h>
-#include <stdlib.h>
#include <string.h>
typedef struct SampleFmtInfo {
@@ -160,13 +162,20 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
if (buf_size < 0)
return buf_size;
+ if (linesize)
+ *linesize = line_size;
+
+ memset(audio_data, 0, planar
+ ? sizeof(*audio_data) * nb_channels
+ : sizeof(*audio_data));
+
+ if (!buf)
+ return buf_size;
+
audio_data[0] = (uint8_t *)buf;
for (ch = 1; planar && ch < nb_channels; ch++)
audio_data[ch] = audio_data[ch-1] + line_size;
- if (linesize)
- *linesize = line_size;
-
return buf_size;
}
diff --git a/media/ffvpx/libavutil/samplefmt.h b/media/ffvpx/libavutil/samplefmt.h
index 8cd43ae856..6bad0e254a 100644
--- a/media/ffvpx/libavutil/samplefmt.h
+++ b/media/ffvpx/libavutil/samplefmt.h
@@ -21,9 +21,6 @@
#include <stdint.h>
-#include "avutil.h"
-#include "attributes.h"
-
/**
* @addtogroup lavu_audio
* @{
@@ -195,9 +192,8 @@ int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
* @param nb_samples the number of samples in a single channel
* @param sample_fmt the sample format
* @param align buffer size alignment (0 = default, 1 = no alignment)
- * @return >=0 on success or a negative error code on failure
- * @todo return minimum size in bytes required for the buffer in case
- * of success at the next bump
+ * @return minimum size in bytes required for the buffer on success,
+ * or a negative error code on failure
*/
int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
const uint8_t *buf,
@@ -217,6 +213,7 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
* @param[out] linesize aligned size for audio buffer(s), may be NULL
* @param nb_channels number of audio channels
* @param nb_samples number of samples per channel
+ * @param sample_fmt the sample format
* @param align buffer size alignment (0 = default, 1 = no alignment)
* @return >=0 on success or a negative error code on failure
* @todo return the size of the allocated buffer in case of success at the next bump
diff --git a/media/ffvpx/libavutil/slicethread.c b/media/ffvpx/libavutil/slicethread.c
index dfbe551ef2..115b099736 100644
--- a/media/ffvpx/libavutil/slicethread.c
+++ b/media/ffvpx/libavutil/slicethread.c
@@ -17,11 +17,15 @@
*/
#include <stdatomic.h>
+#include "cpu.h"
+#include "internal.h"
#include "slicethread.h"
#include "mem.h"
#include "thread.h"
#include "avassert.h"
+#define MAX_AUTO_THREADS 16
+
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
typedef struct WorkerContext {
@@ -103,7 +107,7 @@ int avpriv_slicethread_create(AVSliceThread **pctx, void *priv,
if (!nb_threads) {
int nb_cpus = av_cpu_count();
if (nb_cpus > 1)
- nb_threads = nb_cpus + 1;
+ nb_threads = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
else
nb_threads = 1;
}
@@ -239,7 +243,7 @@ int avpriv_slicethread_create(AVSliceThread **pctx, void *priv,
int nb_threads)
{
*pctx = NULL;
- return AVERROR(EINVAL);
+ return AVERROR(ENOSYS);
}
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
diff --git a/media/ffvpx/libavutil/thread.h b/media/ffvpx/libavutil/thread.h
index cc5272d379..2f5e7e1cb5 100644
--- a/media/ffvpx/libavutil/thread.h
+++ b/media/ffvpx/libavutil/thread.h
@@ -24,6 +24,12 @@
#include "config.h"
+#if HAVE_PRCTL
+#include <sys/prctl.h>
+#endif
+
+#include "error.h"
+
#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
#if HAVE_PTHREADS
@@ -31,18 +37,24 @@
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#include <stdlib.h>
+
#include "log.h"
+#include "macros.h"
+
+#define ASSERT_PTHREAD_ABORT(func, ret) do { \
+ char errbuf[AV_ERROR_MAX_STRING_SIZE] = ""; \
+ av_log(NULL, AV_LOG_FATAL, AV_STRINGIFY(func) \
+ " failed with error: %s\n", \
+ av_make_error_string(errbuf, AV_ERROR_MAX_STRING_SIZE, \
+ AVERROR(ret))); \
+ abort(); \
+} while (0)
#define ASSERT_PTHREAD_NORET(func, ...) do { \
int ret = func(__VA_ARGS__); \
- if (ret) { \
- char errbuf[AV_ERROR_MAX_STRING_SIZE] = ""; \
- av_log(NULL, AV_LOG_FATAL, AV_STRINGIFY(func) \
- " failed with error: %s\n", \
- av_make_error_string(errbuf, AV_ERROR_MAX_STRING_SIZE, \
- AVERROR(ret))); \
- abort(); \
- } \
+ if (ret) \
+ ASSERT_PTHREAD_ABORT(func, ret); \
} while (0)
#define ASSERT_PTHREAD(func, ...) do { \
@@ -109,6 +121,15 @@ static inline int strict_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t
ASSERT_PTHREAD(pthread_cond_wait, cond, mutex);
}
+static inline int strict_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime)
+{
+ int ret = pthread_cond_timedwait(cond, mutex, abstime);
+ if (ret && ret != ETIMEDOUT)
+ ASSERT_PTHREAD_ABORT(pthread_cond_timedwait, ret);
+ return ret;
+}
+
static inline int strict_pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
{
ASSERT_PTHREAD(pthread_once, once_control, init_routine);
@@ -124,6 +145,7 @@ static inline int strict_pthread_once(pthread_once_t *once_control, void (*init_
#define pthread_cond_signal strict_pthread_cond_signal
#define pthread_cond_broadcast strict_pthread_cond_broadcast
#define pthread_cond_wait strict_pthread_cond_wait
+#define pthread_cond_timedwait strict_pthread_cond_timedwait
#define pthread_once strict_pthread_once
#endif
@@ -170,4 +192,13 @@ static inline int ff_thread_once(char *control, void (*routine)(void))
#endif
+static inline int ff_thread_setname(const char *name)
+{
+#if HAVE_PRCTL
+ return AVERROR(prctl(PR_SET_NAME, name));
+#endif
+
+ return AVERROR(ENOSYS);
+}
+
#endif /* AVUTIL_THREAD_H */
diff --git a/media/ffvpx/libavutil/threadmessage.c b/media/ffvpx/libavutil/threadmessage.c
index 764b7fb813..f0e23f28fc 100644
--- a/media/ffvpx/libavutil/threadmessage.c
+++ b/media/ffvpx/libavutil/threadmessage.c
@@ -18,13 +18,15 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <limits.h>
#include "fifo.h"
+#include "mem.h"
#include "threadmessage.h"
#include "thread.h"
struct AVThreadMessageQueue {
#if HAVE_THREADS
- AVFifoBuffer *fifo;
+ AVFifo *fifo;
pthread_mutex_t lock;
pthread_cond_t cond_recv;
pthread_cond_t cond_send;
@@ -64,7 +66,7 @@ int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
av_free(rmq);
return AVERROR(ret);
}
- if (!(rmq->fifo = av_fifo_alloc(elsize * nelem))) {
+ if (!(rmq->fifo = av_fifo_alloc2(nelem, elsize, 0))) {
pthread_cond_destroy(&rmq->cond_send);
pthread_cond_destroy(&rmq->cond_recv);
pthread_mutex_destroy(&rmq->lock);
@@ -93,7 +95,7 @@ void av_thread_message_queue_free(AVThreadMessageQueue **mq)
#if HAVE_THREADS
if (*mq) {
av_thread_message_flush(*mq);
- av_fifo_freep(&(*mq)->fifo);
+ av_fifo_freep2(&(*mq)->fifo);
pthread_cond_destroy(&(*mq)->cond_send);
pthread_cond_destroy(&(*mq)->cond_recv);
pthread_mutex_destroy(&(*mq)->lock);
@@ -107,9 +109,9 @@ int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq)
#if HAVE_THREADS
int ret;
pthread_mutex_lock(&mq->lock);
- ret = av_fifo_size(mq->fifo);
+ ret = av_fifo_can_read(mq->fifo);
pthread_mutex_unlock(&mq->lock);
- return ret / mq->elsize;
+ return ret;
#else
return AVERROR(ENOSYS);
#endif
@@ -121,14 +123,14 @@ static int av_thread_message_queue_send_locked(AVThreadMessageQueue *mq,
void *msg,
unsigned flags)
{
- while (!mq->err_send && av_fifo_space(mq->fifo) < mq->elsize) {
+ while (!mq->err_send && !av_fifo_can_write(mq->fifo)) {
if ((flags & AV_THREAD_MESSAGE_NONBLOCK))
return AVERROR(EAGAIN);
pthread_cond_wait(&mq->cond_send, &mq->lock);
}
if (mq->err_send)
return mq->err_send;
- av_fifo_generic_write(mq->fifo, msg, mq->elsize, NULL);
+ av_fifo_write(mq->fifo, msg, 1);
/* one message is sent, signal one receiver */
pthread_cond_signal(&mq->cond_recv);
return 0;
@@ -138,14 +140,14 @@ static int av_thread_message_queue_recv_locked(AVThreadMessageQueue *mq,
void *msg,
unsigned flags)
{
- while (!mq->err_recv && av_fifo_size(mq->fifo) < mq->elsize) {
+ while (!mq->err_recv && !av_fifo_can_read(mq->fifo)) {
if ((flags & AV_THREAD_MESSAGE_NONBLOCK))
return AVERROR(EAGAIN);
pthread_cond_wait(&mq->cond_recv, &mq->lock);
}
- if (av_fifo_size(mq->fifo) < mq->elsize)
+ if (!av_fifo_can_read(mq->fifo))
return mq->err_recv;
- av_fifo_generic_read(mq->fifo, msg, mq->elsize, NULL);
+ av_fifo_read(mq->fifo, msg, 1);
/* one message space appeared, signal one sender */
pthread_cond_signal(&mq->cond_send);
return 0;
@@ -208,25 +210,25 @@ void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
}
#if HAVE_THREADS
-static void free_func_wrap(void *arg, void *msg, int size)
+static int free_func_wrap(void *arg, void *buf, size_t *nb_elems)
{
AVThreadMessageQueue *mq = arg;
- mq->free_func(msg);
+ uint8_t *msg = buf;
+ for (size_t i = 0; i < *nb_elems; i++)
+ mq->free_func(msg + i * mq->elsize);
+ return 0;
}
#endif
void av_thread_message_flush(AVThreadMessageQueue *mq)
{
#if HAVE_THREADS
- int used, off;
- void *free_func = mq->free_func;
+ size_t used;
pthread_mutex_lock(&mq->lock);
- used = av_fifo_size(mq->fifo);
- if (free_func)
- for (off = 0; off < used; off += mq->elsize)
- av_fifo_generic_peek_at(mq->fifo, mq, off, mq->elsize, free_func_wrap);
- av_fifo_drain(mq->fifo, used);
+ used = av_fifo_can_read(mq->fifo);
+ if (mq->free_func)
+ av_fifo_read_to_cb(mq->fifo, free_func_wrap, mq, &used);
/* only the senders need to be notified since the queue is empty and there
* is nothing to read */
pthread_cond_broadcast(&mq->cond_send);
diff --git a/media/ffvpx/libavutil/time.c b/media/ffvpx/libavutil/time.c
index 80d1faf264..83743da0b2 100644
--- a/media/ffvpx/libavutil/time.c
+++ b/media/ffvpx/libavutil/time.c
@@ -57,7 +57,7 @@ int64_t av_gettime_relative(void)
{
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
#ifdef __APPLE__
- if (clock_gettime)
+ if (&clock_gettime)
#endif
{
struct timespec ts;
@@ -72,7 +72,7 @@ int av_gettime_relative_is_monotonic(void)
{
#if HAVE_CLOCK_GETTIME && defined(CLOCK_MONOTONIC)
#ifdef __APPLE__
- if (!clock_gettime)
+ if (!&clock_gettime)
return 0;
#endif
return 1;
diff --git a/media/ffvpx/libavutil/timecode.c b/media/ffvpx/libavutil/timecode.c
index f029f25839..b93f05b4b8 100644
--- a/media/ffvpx/libavutil/timecode.c
+++ b/media/ffvpx/libavutil/timecode.c
@@ -27,22 +27,20 @@
*/
#include <stdio.h>
+#include "common.h"
#include "timecode.h"
#include "log.h"
#include "error.h"
int av_timecode_adjust_ntsc_framenum2(int framenum, int fps)
{
- /* only works for NTSC 29.97 and 59.94 */
+ /* only works for multiples of NTSC 29.97 */
int drop_frames = 0;
int d, m, frames_per_10mins;
- if (fps == 30) {
- drop_frames = 2;
- frames_per_10mins = 17982;
- } else if (fps == 60) {
- drop_frames = 4;
- frames_per_10mins = 35964;
+ if (fps && fps % 30 == 0) {
+ drop_frames = fps / 30 * 2;
+ frames_per_10mins = fps / 30 * 17982;
} else
return framenum;
@@ -65,27 +63,48 @@ uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum)
ss = framenum / fps % 60;
mm = framenum / (fps*60) % 60;
hh = framenum / (fps*3600) % 24;
- return 0 << 31 | // color frame flag (0: unsync mode, 1: sync mode)
- drop << 30 | // drop frame flag (0: non drop, 1: drop)
- (ff / 10) << 28 | // tens of frames
- (ff % 10) << 24 | // units of frames
- 0 << 23 | // PC (NTSC) or BGF0 (PAL)
- (ss / 10) << 20 | // tens of seconds
- (ss % 10) << 16 | // units of seconds
- 0 << 15 | // BGF0 (NTSC) or BGF2 (PAL)
- (mm / 10) << 12 | // tens of minutes
- (mm % 10) << 8 | // units of minutes
- 0 << 7 | // BGF2 (NTSC) or PC (PAL)
- 0 << 6 | // BGF1
- (hh / 10) << 4 | // tens of hours
- (hh % 10); // units of hours
+ return av_timecode_get_smpte(tc->rate, drop, hh, mm, ss, ff);
+}
+
+uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
+{
+ uint32_t tc = 0;
+
+ /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS.
+ See SMPTE ST 12-1:2014 Sec 12.1 for more info. */
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ if (ff % 2 == 1) {
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ tc |= (1 << 7);
+ else
+ tc |= (1 << 23);
+ }
+ ff /= 2;
+ }
+
+ hh = hh % 24;
+ mm = av_clip(mm, 0, 59);
+ ss = av_clip(ss, 0, 59);
+ ff = ff % 40;
+
+ tc |= drop << 30;
+ tc |= (ff / 10) << 28;
+ tc |= (ff % 10) << 24;
+ tc |= (ss / 10) << 20;
+ tc |= (ss % 10) << 16;
+ tc |= (mm / 10) << 12;
+ tc |= (mm % 10) << 8;
+ tc |= (hh / 10) << 4;
+ tc |= (hh % 10);
+
+ return tc;
}
char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
{
int fps = tc->fps;
int drop = tc->flags & AV_TIMECODE_FLAG_DROPFRAME;
- int hh, mm, ss, ff, neg = 0;
+ int hh, mm, ss, ff, ff_len, neg = 0;
framenum += tc->start;
if (drop)
@@ -100,9 +119,10 @@ char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum)
hh = framenum / (fps*3600LL);
if (tc->flags & AV_TIMECODE_FLAG_24HOURSMAX)
hh = hh % 24;
- snprintf(buf, AV_TIMECODE_STR_SIZE, "%s%02d:%02d:%02d%c%02d",
+ ff_len = fps > 10000 ? 5 : fps > 1000 ? 4 : fps > 100 ? 3 : fps > 10 ? 2 : 1;
+ snprintf(buf, AV_TIMECODE_STR_SIZE, "%s%02d:%02d:%02d%c%0*d",
neg ? "-" : "",
- hh, mm, ss, drop ? ';' : ':', ff);
+ hh, mm, ss, drop ? ';' : ':', ff_len, ff);
return buf;
}
@@ -115,16 +135,33 @@ static unsigned bcd2uint(uint8_t bcd)
return low + 10*high;
}
-char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df)
+char *av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
{
unsigned hh = bcd2uint(tcsmpte & 0x3f); // 6-bit hours
unsigned mm = bcd2uint(tcsmpte>>8 & 0x7f); // 7-bit minutes
unsigned ss = bcd2uint(tcsmpte>>16 & 0x7f); // 7-bit seconds
unsigned ff = bcd2uint(tcsmpte>>24 & 0x3f); // 6-bit frames
unsigned drop = tcsmpte & 1<<30 && !prevent_df; // 1-bit drop if not arbitrary bit
+
+ if (av_cmp_q(rate, (AVRational) {30, 1}) == 1) {
+ ff <<= 1;
+ if (!skip_field) {
+ if (av_cmp_q(rate, (AVRational) {50, 1}) == 0)
+ ff += !!(tcsmpte & 1 << 7);
+ else
+ ff += !!(tcsmpte & 1 << 23);
+ }
+ }
+
snprintf(buf, AV_TIMECODE_STR_SIZE, "%02u:%02u:%02u%c%02u",
hh, mm, ss, drop ? ';' : ':', ff);
return buf;
+
+}
+
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df)
+{
+ return av_timecode_make_smpte_tc_string2(buf, (AVRational){30, 1}, tcsmpte, prevent_df, 1);
}
char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
@@ -158,8 +195,8 @@ static int check_timecode(void *log_ctx, AVTimecode *tc)
av_log(log_ctx, AV_LOG_ERROR, "Valid timecode frame rate must be specified. Minimum value is 1\n");
return AVERROR(EINVAL);
}
- if ((tc->flags & AV_TIMECODE_FLAG_DROPFRAME) && tc->fps != 30 && tc->fps != 60) {
- av_log(log_ctx, AV_LOG_ERROR, "Drop frame is only allowed with 30000/1001 or 60000/1001 FPS\n");
+ if ((tc->flags & AV_TIMECODE_FLAG_DROPFRAME) && tc->fps % 30 != 0) {
+ av_log(log_ctx, AV_LOG_ERROR, "Drop frame is only allowed with multiples of 30000/1001 FPS\n");
return AVERROR(EINVAL);
}
if (check_fps(tc->fps) < 0) {
@@ -191,19 +228,12 @@ int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start
return check_timecode(log_ctx, tc);
}
-int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx)
+int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx)
{
- char c;
- int hh, mm, ss, ff, ret;
-
- if (sscanf(str, "%d:%d:%d%c%d", &hh, &mm, &ss, &c, &ff) != 5) {
- av_log(log_ctx, AV_LOG_ERROR, "Unable to parse timecode, "
- "syntax: hh:mm:ss[:;.]ff\n");
- return AVERROR_INVALIDDATA;
- }
+ int ret;
memset(tc, 0, sizeof(*tc));
- tc->flags = c != ':' ? AV_TIMECODE_FLAG_DROPFRAME : 0; // drop if ';', '.', ...
+ tc->flags = flags;
tc->rate = rate;
tc->fps = fps_from_frame_rate(rate);
@@ -214,7 +244,22 @@ int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *st
tc->start = (hh*3600 + mm*60 + ss) * tc->fps + ff;
if (tc->flags & AV_TIMECODE_FLAG_DROPFRAME) { /* adjust frame number */
int tmins = 60*hh + mm;
- tc->start -= (tc->fps == 30 ? 2 : 4) * (tmins - tmins/10);
+ tc->start -= (tc->fps / 30 * 2) * (tmins - tmins/10);
}
return 0;
}
+
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx)
+{
+ char c;
+ int hh, mm, ss, ff, flags;
+
+ if (sscanf(str, "%d:%d:%d%c%d", &hh, &mm, &ss, &c, &ff) != 5) {
+ av_log(log_ctx, AV_LOG_ERROR, "Unable to parse timecode, "
+ "syntax: hh:mm:ss[:;.]ff\n");
+ return AVERROR_INVALIDDATA;
+ }
+ flags = c != ':' ? AV_TIMECODE_FLAG_DROPFRAME : 0; // drop if ';', '.', ...
+
+ return av_timecode_init_from_components(tc, rate, flags, hh, mm, ss, ff, log_ctx);
+}
diff --git a/media/ffvpx/libavutil/timecode.h b/media/ffvpx/libavutil/timecode.h
index 37c1361bc2..060574a172 100644
--- a/media/ffvpx/libavutil/timecode.h
+++ b/media/ffvpx/libavutil/timecode.h
@@ -49,9 +49,9 @@ typedef struct {
* Adjust frame number for NTSC drop frame time code.
*
* @param framenum frame number to adjust
- * @param fps frame per second, 30 or 60
+ * @param fps frame per second, multiples of 30
* @return adjusted frame number
- * @warning adjustment is only valid in NTSC 29.97 and 59.94
+ * @warning adjustment is only valid for multiples of NTSC 29.97
*/
int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
@@ -62,15 +62,40 @@ int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
* @param framenum frame number
* @return the SMPTE binary representation
*
+ * See SMPTE ST 314M-2005 Sec 4.4.2.2.1 "Time code pack (TC)"
+ * the format description as follows:
+ * bits 0-5: hours, in BCD(6bits)
+ * bits 6: BGF1
+ * bits 7: BGF2 (NTSC) or FIELD (PAL)
+ * bits 8-14: minutes, in BCD(7bits)
+ * bits 15: BGF0 (NTSC) or BGF2 (PAL)
+ * bits 16-22: seconds, in BCD(7bits)
+ * bits 23: FIELD (NTSC) or BGF0 (PAL)
+ * bits 24-29: frames, in BCD(6bits)
+ * bits 30: drop frame flag (0: non drop, 1: drop)
+ * bits 31: color frame flag (0: unsync mode, 1: sync mode)
+ * @note BCD numbers (6 or 7 bits): 4 or 5 lower bits for units, 2 higher bits for tens.
* @note Frame number adjustment is automatically done in case of drop timecode,
* you do NOT have to call av_timecode_adjust_ntsc_framenum2().
* @note The frame number is relative to tc->start.
- * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity
- * correction (PC) bits are set to zero.
+ * @note Color frame (CF) and binary group flags (BGF) bits are set to zero.
*/
uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);
/**
+ * Convert sei info to SMPTE 12M binary representation.
+ *
+ * @param rate frame rate in rational form
+ * @param drop drop flag
+ * @param hh hour
+ * @param mm minute
+ * @param ss second
+ * @param ff frame number
+ * @return the SMPTE binary representation
+ */
+uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff);
+
+/**
* Load timecode string in buf.
*
* @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
@@ -87,6 +112,23 @@ char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);
/**
* Get the timecode string from the SMPTE timecode format.
*
+ * In contrast to av_timecode_make_smpte_tc_string this function supports 50/60
+ * fps timecodes by using the field bit.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param rate frame rate of the timecode
+ * @param tcsmpte the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ * is arbitrary
+ * @param skip_field prevent the use of a field flag when it is known the field
+ * bit is arbitrary (e.g. because it is used as PC flag)
+ * @return the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
* @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
* @param tcsmpte the 32-bit SMPTE timecode
* @param prevent_df prevent the use of a drop flag when it is known the DF bit
@@ -119,6 +161,23 @@ char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);
int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);
/**
+ * Init a timecode struct from the passed timecode components.
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log)
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param flags miscellaneous flags such as drop frame, +24 hours, ...
+ * (see AVTimecodeFlag)
+ * @param hh hours
+ * @param mm minutes
+ * @param ss seconds
+ * @param ff frames
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_components(AVTimecode *tc, AVRational rate, int flags, int hh, int mm, int ss, int ff, void *log_ctx);
+
+/**
* Parse timecode representation (hh:mm:ss[:;.]ff).
*
* @param log_ctx a pointer to an arbitrary struct of which the first field is a
diff --git a/media/ffvpx/libavutil/timer.h b/media/ffvpx/libavutil/timer.h
index 0bb353cfce..d3db5a27ef 100644
--- a/media/ffvpx/libavutil/timer.h
+++ b/media/ffvpx/libavutil/timer.h
@@ -42,10 +42,13 @@
#include <stdint.h>
#include <inttypes.h>
-#if HAVE_MACH_ABSOLUTE_TIME
+#if CONFIG_MACOS_KPERF
+#include "macos_kperf.h"
+#elif HAVE_MACH_ABSOLUTE_TIME
#include <mach/mach_time.h>
#endif
+#include "common.h"
#include "log.h"
#if ARCH_AARCH64
@@ -54,6 +57,8 @@
# include "arm/timer.h"
#elif ARCH_PPC
# include "ppc/timer.h"
+#elif ARCH_RISCV
+# include "riscv/timer.h"
#elif ARCH_X86
# include "x86/timer.h"
#endif
@@ -87,7 +92,7 @@
if (((tcount + tskip_count) & (tcount + tskip_count - 1)) == 0) { \
int i; \
av_log(NULL, AV_LOG_ERROR, \
- "%7"PRIu64" " FF_TIMER_UNITS " in %s,%8d runs,%7d skips", \
+ "%7" PRIu64 " " FF_TIMER_UNITS " in %s,%8d runs,%7d skips",\
tsum * 10 / tcount, id, tcount, tskip_count); \
for (i = 0; i < 32; i++) \
av_log(NULL, AV_LOG_VERBOSE, " %2d", av_log2(2*thistogram[i]));\
@@ -125,6 +130,16 @@
read(linux_perf_fd, &tperf, sizeof(tperf)); \
TIMER_REPORT(id, tperf)
+#elif CONFIG_MACOS_KPERF
+
+#define START_TIMER \
+ uint64_t tperf; \
+ ff_kperf_init(); \
+ tperf = ff_kperf_cycles();
+
+#define STOP_TIMER(id) \
+ TIMER_REPORT(id, ff_kperf_cycles() - tperf);
+
#elif defined(AV_READ_TIME)
#define START_TIMER \
uint64_t tend; \
diff --git a/media/ffvpx/libavutil/utils.c b/media/ffvpx/libavutil/utils.c
index 230081ea47..94d247bbee 100644
--- a/media/ffvpx/libavutil/utils.c
+++ b/media/ffvpx/libavutil/utils.c
@@ -19,60 +19,12 @@
#include "config.h"
#include "avutil.h"
#include "avassert.h"
-#include "samplefmt.h"
-#include "internal.h"
/**
* @file
* various utility functions
*/
-#include "libavutil/ffversion.h"
-const char av_util_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
-
-const char *av_version_info(void)
-{
- return FFMPEG_VERSION;
-}
-
-unsigned avutil_version(void)
-{
- static int checks_done;
- if (checks_done)
- return LIBAVUTIL_VERSION_INT;
-
- av_assert0(AV_SAMPLE_FMT_DBLP == 9);
- av_assert0(AVMEDIA_TYPE_ATTACHMENT == 4);
- av_assert0(AV_PICTURE_TYPE_BI == 7);
- av_assert0(LIBAVUTIL_VERSION_MICRO >= 100);
- av_assert0(HAVE_MMX2 == HAVE_MMXEXT);
-
- av_assert0(((size_t)-1) > 0); // C guarantees this but if false on a platform we care about revert at least b284e1ffe343d6697fb950d1ee517bafda8a9844
-
- if (av_sat_dadd32(1, 2) != 5) {
- av_log(NULL, AV_LOG_FATAL, "Libavutil has been built with a broken binutils, please upgrade binutils and rebuild\n");
- abort();
- }
-
- if (llrint(1LL<<60) != 1LL<<60) {
- av_log(NULL, AV_LOG_ERROR, "Libavutil has been linked to a broken llrint()\n");
- }
-
- checks_done = 1;
- return LIBAVUTIL_VERSION_INT;
-}
-
-const char *avutil_configuration(void)
-{
- return FFMPEG_CONFIGURATION;
-}
-
-const char *avutil_license(void)
-{
-#define LICENSE_PREFIX "libavutil license: "
- return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
-}
-
const char *av_get_media_type_string(enum AVMediaType media_type)
{
switch (media_type) {
diff --git a/media/ffvpx/libavutil/version.h b/media/ffvpx/libavutil/version.h
index 24ca8ab7db..e8ec604349 100644
--- a/media/ffvpx/libavutil/version.h
+++ b/media/ffvpx/libavutil/version.h
@@ -78,8 +78,8 @@
* @{
*/
-#define LIBAVUTIL_VERSION_MAJOR 56
-#define LIBAVUTIL_VERSION_MINOR 31
+#define LIBAVUTIL_VERSION_MAJOR 58
+#define LIBAVUTIL_VERSION_MINOR 2
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
@@ -105,31 +105,14 @@
* @{
*/
-#ifndef FF_API_VAAPI
-#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_FRAME_QP
-#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_PLUS1_MINUS1
-#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_ERROR_FRAME
-#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_PKT_PTS
-#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_CRYPTO_SIZE_T
-#define FF_API_CRYPTO_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_FRAME_GET_SET
-#define FF_API_FRAME_GET_SET (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-#ifndef FF_API_PSEUDOPAL
-#define FF_API_PSEUDOPAL (LIBAVUTIL_VERSION_MAJOR < 57)
-#endif
-
+#define FF_API_FIFO_PEEK2 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FIFO_OLD_API (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_OLD_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_AV_FOPEN_UTF8 (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_PKT_DURATION (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_REORDERED_OPAQUE (LIBAVUTIL_VERSION_MAJOR < 59)
+#define FF_API_FRAME_PICTURE_NUMBER (LIBAVUTIL_VERSION_MAJOR < 59)
/**
* @}
diff --git a/media/ffvpx/libavutil/video_enc_params.c b/media/ffvpx/libavutil/video_enc_params.c
new file mode 100644
index 0000000000..33592dc128
--- /dev/null
+++ b/media/ffvpx/libavutil/video_enc_params.c
@@ -0,0 +1,80 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "buffer.h"
+#include "frame.h"
+#include "mem.h"
+#include "video_enc_params.h"
+
+AVVideoEncParams *av_video_enc_params_alloc(enum AVVideoEncParamsType type,
+ unsigned int nb_blocks, size_t *out_size)
+{
+ struct TestStruct {
+ AVVideoEncParams p;
+ AVVideoBlockParams b;
+ };
+ const size_t blocks_offset = offsetof(struct TestStruct, b);
+ size_t size = blocks_offset;
+ AVVideoEncParams *par;
+
+ if (nb_blocks > (SIZE_MAX - size) / sizeof(AVVideoBlockParams))
+ return NULL;
+ size += sizeof(AVVideoBlockParams) * nb_blocks;
+
+ par = av_mallocz(size);
+ if (!par)
+ return NULL;
+
+ par->type = type;
+ par->nb_blocks = nb_blocks;
+ par->block_size = sizeof(AVVideoBlockParams);
+ par->blocks_offset = blocks_offset;
+
+ if (out_size)
+ *out_size = size;
+
+ return par;
+}
+
+AVVideoEncParams*
+av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type,
+ unsigned int nb_blocks)
+{
+ AVBufferRef *buf;
+ AVVideoEncParams *par;
+ size_t size;
+
+ par = av_video_enc_params_alloc(type, nb_blocks, &size);
+ if (!par)
+ return NULL;
+ buf = av_buffer_create((uint8_t *)par, size, NULL, NULL, 0);
+ if (!buf) {
+ av_freep(&par);
+ return NULL;
+ }
+
+ if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_VIDEO_ENC_PARAMS, buf)) {
+ av_buffer_unref(&buf);
+ return NULL;
+ }
+
+ return par;
+}
diff --git a/media/ffvpx/libavutil/video_enc_params.h b/media/ffvpx/libavutil/video_enc_params.h
new file mode 100644
index 0000000000..fc0c3bc1a5
--- /dev/null
+++ b/media/ffvpx/libavutil/video_enc_params.h
@@ -0,0 +1,171 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VIDEO_ENC_PARAMS_H
+#define AVUTIL_VIDEO_ENC_PARAMS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/frame.h"
+
+enum AVVideoEncParamsType {
+ AV_VIDEO_ENC_PARAMS_NONE = -1,
+ /**
+ * VP9 stores:
+ * - per-frame base (luma AC) quantizer index, exported as AVVideoEncParams.qp
+ * - deltas for luma DC, chroma AC and chroma DC, exported in the
+ * corresponding entries in AVVideoEncParams.delta_qp
+ * - per-segment delta, exported as for each block as AVVideoBlockParams.delta_qp
+ *
+ * To compute the resulting quantizer index for a block:
+ * - for luma AC, add the base qp and the per-block delta_qp, saturating to
+ * unsigned 8-bit.
+ * - for luma DC and chroma AC/DC, add the corresponding
+ * AVVideoBlockParams.delta_qp to the luma AC index, again saturating to
+ * unsigned 8-bit.
+ */
+ AV_VIDEO_ENC_PARAMS_VP9,
+
+ /**
+ * H.264 stores:
+ * - in PPS (per-picture):
+ * * initial QP_Y (luma) value, exported as AVVideoEncParams.qp
+ * * delta(s) for chroma QP values (same for both, or each separately),
+ * exported as in the corresponding entries in AVVideoEncParams.delta_qp
+ * - per-slice QP delta, not exported directly, added to the per-MB value
+ * - per-MB delta; not exported directly; the final per-MB quantizer
+ * parameter - QP_Y - minus the value in AVVideoEncParams.qp is exported
+ * as AVVideoBlockParams.qp_delta.
+ */
+ AV_VIDEO_ENC_PARAMS_H264,
+
+ /*
+ * MPEG-2-compatible quantizer.
+ *
+ * Summing the frame-level qp with the per-block delta_qp gives the
+ * resulting quantizer for the block.
+ */
+ AV_VIDEO_ENC_PARAMS_MPEG2,
+};
+
+/**
+ * Video encoding parameters for a given frame. This struct is allocated along
+ * with an optional array of per-block AVVideoBlockParams descriptors.
+ * Must be allocated with av_video_enc_params_alloc().
+ */
+typedef struct AVVideoEncParams {
+ /**
+ * Number of blocks in the array.
+ *
+ * May be 0, in which case no per-block information is present. In this case
+ * the values of blocks_offset / block_size are unspecified and should not
+ * be accessed.
+ */
+ unsigned int nb_blocks;
+ /**
+ * Offset in bytes from the beginning of this structure at which the array
+ * of blocks starts.
+ */
+ size_t blocks_offset;
+ /*
+ * Size of each block in bytes. May not match sizeof(AVVideoBlockParams).
+ */
+ size_t block_size;
+
+ /**
+ * Type of the parameters (the codec they are used with).
+ */
+ enum AVVideoEncParamsType type;
+
+ /**
+ * Base quantisation parameter for the frame. The final quantiser for a
+ * given block in a given plane is obtained from this value, possibly
+ * combined with {@code delta_qp} and the per-block delta in a manner
+ * documented for each type.
+ */
+ int32_t qp;
+
+ /**
+ * Quantisation parameter offset from the base (per-frame) qp for a given
+ * plane (first index) and AC/DC coefficients (second index).
+ */
+ int32_t delta_qp[4][2];
+} AVVideoEncParams;
+
+/**
+ * Data structure for storing block-level encoding information.
+ * It is allocated as a part of AVVideoEncParams and should be retrieved with
+ * av_video_enc_params_block().
+ *
+ * sizeof(AVVideoBlockParams) is not a part of the ABI and new fields may be
+ * added to it.
+ */
+typedef struct AVVideoBlockParams {
+ /**
+ * Distance in luma pixels from the top-left corner of the visible frame
+ * to the top-left corner of the block.
+ * Can be negative if top/right padding is present on the coded frame.
+ */
+ int src_x, src_y;
+ /**
+ * Width and height of the block in luma pixels.
+ */
+ int w, h;
+
+ /**
+ * Difference between this block's final quantization parameter and the
+ * corresponding per-frame value.
+ */
+ int32_t delta_qp;
+} AVVideoBlockParams;
+
+/*
+ * Get the block at the specified {@code idx}. Must be between 0 and nb_blocks.
+ */
+static av_always_inline AVVideoBlockParams*
+av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
+{
+ av_assert0(idx < par->nb_blocks);
+ return (AVVideoBlockParams *)((uint8_t *)par + par->blocks_offset +
+ idx * par->block_size);
+}
+
+/**
+ * Allocates memory for AVVideoEncParams of the given type, plus an array of
+ * {@code nb_blocks} AVVideoBlockParams and initializes the variables. Can be
+ * freed with a normal av_free() call.
+ *
+ * @param out_size if non-NULL, the size in bytes of the resulting data array is
+ * written here.
+ */
+AVVideoEncParams *av_video_enc_params_alloc(enum AVVideoEncParamsType type,
+ unsigned int nb_blocks, size_t *out_size);
+
+/**
+ * Allocates memory for AVEncodeInfoFrame plus an array of
+ * {@code nb_blocks} AVEncodeInfoBlock in the given AVFrame {@code frame}
+ * as AVFrameSideData of type AV_FRAME_DATA_VIDEO_ENC_PARAMS
+ * and initializes the variables.
+ */
+AVVideoEncParams*
+av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type,
+ unsigned int nb_blocks);
+
+#endif /* AVUTIL_VIDEO_ENC_PARAMS_H */
diff --git a/media/ffvpx/libavutil/x86/bswap.h b/media/ffvpx/libavutil/x86/bswap.h
index ffa59e4c82..b2f18b6c93 100644
--- a/media/ffvpx/libavutil/x86/bswap.h
+++ b/media/ffvpx/libavutil/x86/bswap.h
@@ -26,6 +26,7 @@
#include <stdint.h>
#if defined(_MSC_VER)
+#include <stdlib.h>
#include <intrin.h>
#endif
#include "config.h"
diff --git a/media/ffvpx/libavutil/x86/cpu.c b/media/ffvpx/libavutil/x86/cpu.c
index bcd41a50a2..d6cd4fab9c 100644
--- a/media/ffvpx/libavutil/x86/cpu.c
+++ b/media/ffvpx/libavutil/x86/cpu.c
@@ -150,9 +150,13 @@ int ff_get_cpu_flags_x86(void)
rval |= AV_CPU_FLAG_AVX2;
#if HAVE_AVX512 /* F, CD, BW, DQ, VL */
if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
- if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000)
+ if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
rval |= AV_CPU_FLAG_AVX512;
-
+#if HAVE_AVX512ICL
+ if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
+ rval |= AV_CPU_FLAG_AVX512ICL;
+#endif /* HAVE_AVX512ICL */
+ }
}
#endif /* HAVE_AVX512 */
#endif /* HAVE_AVX2 */
@@ -196,6 +200,10 @@ int ff_get_cpu_flags_x86(void)
used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
rval |= AV_CPU_FLAG_AVXSLOW;
+
+ /* Zen 3 and earlier have slow gather */
+ if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
+ rval |= AV_CPU_FLAG_SLOW_GATHER;
}
/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
@@ -235,6 +243,10 @@ int ff_get_cpu_flags_x86(void)
if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
family == 6 && model < 23)
rval |= AV_CPU_FLAG_SSSE3SLOW;
+
+ /* Haswell has slow gather */
+ if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 70)
+ rval |= AV_CPU_FLAG_SLOW_GATHER;
}
#endif /* cpuid */
diff --git a/media/ffvpx/libavutil/x86/cpu.h b/media/ffvpx/libavutil/x86/cpu.h
index 937c697fa0..40a1eef0ab 100644
--- a/media/ffvpx/libavutil/x86/cpu.h
+++ b/media/ffvpx/libavutil/x86/cpu.h
@@ -80,6 +80,7 @@
#define EXTERNAL_AVX2_SLOW(flags) CPUEXT_SUFFIX_SLOW2(flags, _EXTERNAL, AVX2, AVX)
#define EXTERNAL_AESNI(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AESNI)
#define EXTERNAL_AVX512(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX512)
+#define EXTERNAL_AVX512ICL(flags) CPUEXT_SUFFIX(flags, _EXTERNAL, AVX512ICL)
#define INLINE_AMD3DNOW(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOW)
#define INLINE_AMD3DNOWEXT(flags) CPUEXT_SUFFIX(flags, _INLINE, AMD3DNOWEXT)
diff --git a/media/ffvpx/libavutil/x86/cpuid.asm b/media/ffvpx/libavutil/x86/cpuid.asm
index c3f7866ec7..766f77fcdf 100644
--- a/media/ffvpx/libavutil/x86/cpuid.asm
+++ b/media/ffvpx/libavutil/x86/cpuid.asm
@@ -21,7 +21,7 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION .text
diff --git a/media/ffvpx/libavutil/x86/emms.asm b/media/ffvpx/libavutil/x86/emms.asm
index 8611762d73..df84f2221b 100644
--- a/media/ffvpx/libavutil/x86/emms.asm
+++ b/media/ffvpx/libavutil/x86/emms.asm
@@ -18,7 +18,7 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION .text
diff --git a/media/ffvpx/libavutil/x86/emms.h b/media/ffvpx/libavutil/x86/emms.h
index c21e34b451..8ceec110cf 100644
--- a/media/ffvpx/libavutil/x86/emms.h
+++ b/media/ffvpx/libavutil/x86/emms.h
@@ -21,11 +21,14 @@
#include "config.h"
#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
void avpriv_emms_asm(void);
#if HAVE_MMX_INLINE
+#ifndef __MMX__
+#include "libavutil/cpu.h"
+#endif
+
# define emms_c emms_c
/**
* Empty mmx state.
diff --git a/media/ffvpx/libavutil/x86/fixed_dsp.asm b/media/ffvpx/libavutil/x86/fixed_dsp.asm
index 979dd5c334..2f411850f4 100644
--- a/media/ffvpx/libavutil/x86/fixed_dsp.asm
+++ b/media/ffvpx/libavutil/x86/fixed_dsp.asm
@@ -20,7 +20,7 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION .text
diff --git a/media/ffvpx/libavutil/x86/fixed_dsp_init.c b/media/ffvpx/libavutil/x86/fixed_dsp_init.c
index 303a2eb922..d3f4b2e325 100644
--- a/media/ffvpx/libavutil/x86/fixed_dsp_init.c
+++ b/media/ffvpx/libavutil/x86/fixed_dsp_init.c
@@ -23,7 +23,7 @@
#include "libavutil/fixed_dsp.h"
#include "cpu.h"
-void ff_butterflies_fixed_sse2(int *src0, int *src1, int len);
+void ff_butterflies_fixed_sse2(int *av_restrict src0, int *av_restrict src1, int len);
av_cold void ff_fixed_dsp_init_x86(AVFixedDSPContext *fdsp)
{
diff --git a/media/ffvpx/libavutil/x86/float_dsp.asm b/media/ffvpx/libavutil/x86/float_dsp.asm
index 517fd63638..e84ba52566 100644
--- a/media/ffvpx/libavutil/x86/float_dsp.asm
+++ b/media/ffvpx/libavutil/x86/float_dsp.asm
@@ -20,7 +20,7 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
@@ -48,7 +48,7 @@ ALIGN 16
sub lenq, 64
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -141,7 +141,7 @@ cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
%endif ; mmsize
sub lenq, 64
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -178,7 +178,7 @@ cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
mova [dstq+lenq], m1
sub lenq, mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -233,7 +233,7 @@ cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
movaps [dstq+lenq+3*mmsize], m4
sub lenq, mmsize*4
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -280,7 +280,7 @@ cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
movaps [dstq+lenq+mmsize], m2
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse2
@@ -294,7 +294,7 @@ VECTOR_DMUL_SCALAR
; vector_fmul_window(float *dst, const float *src0,
; const float *src1, const float *win, int len);
;-----------------------------------------------------------------------------
-%macro VECTOR_FMUL_WINDOW 0
+INIT_XMM sse
cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
shl lend, 2
lea len1q, [lenq - mmsize]
@@ -305,7 +305,6 @@ cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
.loop:
mova m0, [winq + lenq]
mova m4, [src0q + lenq]
-%if cpuflag(sse)
mova m1, [winq + len1q]
mova m5, [src1q + len1q]
shufps m1, m1, 0x1b
@@ -319,34 +318,12 @@ cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
addps m2, m3
subps m1, m0
shufps m2, m2, 0x1b
-%else
- pswapd m1, [winq + len1q]
- pswapd m5, [src1q + len1q]
- mova m2, m0
- mova m3, m1
- pfmul m2, m4
- pfmul m3, m5
- pfmul m1, m4
- pfmul m0, m5
- pfadd m2, m3
- pfsub m1, m0
- pswapd m2, m2
-%endif
mova [dstq + lenq], m1
mova [dstq + len1q], m2
sub len1q, mmsize
add lenq, mmsize
jl .loop
-%if mmsize == 8
- femms
-%endif
- REP_RET
-%endmacro
-
-INIT_MMX 3dnowext
-VECTOR_FMUL_WINDOW
-INIT_XMM sse
-VECTOR_FMUL_WINDOW
+ RET
;-----------------------------------------------------------------------------
; vector_fmul_add(float *dst, const float *src0, const float *src1,
@@ -375,7 +352,7 @@ ALIGN 16
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -424,7 +401,7 @@ ALIGN 16
add src1q, 2*mmsize
sub lenq, 2*mmsize
jge .loop
- REP_RET
+ RET
%endmacro
INIT_XMM sse
@@ -463,6 +440,133 @@ cglobal scalarproduct_float, 3,3,2, v1, v2, offset
%endif
RET
+INIT_YMM fma3
+cglobal scalarproduct_float, 3,5,8, v1, v2, size, len, offset
+ xor offsetq, offsetq
+ xorps m0, m0, m0
+ shl sized, 2
+ mov lenq, sizeq
+ cmp lenq, 32
+ jl .l16
+ cmp lenq, 64
+ jl .l32
+ xorps m1, m1, m1
+ cmp lenq, 128
+ jl .l64
+ and lenq, ~127
+ xorps m2, m2, m2
+ xorps m3, m3, m3
+.loop128:
+ movups m4, [v1q+offsetq]
+ movups m5, [v1q+offsetq + 32]
+ movups m6, [v1q+offsetq + 64]
+ movups m7, [v1q+offsetq + 96]
+ fmaddps m0, m4, [v2q+offsetq ], m0
+ fmaddps m1, m5, [v2q+offsetq + 32], m1
+ fmaddps m2, m6, [v2q+offsetq + 64], m2
+ fmaddps m3, m7, [v2q+offsetq + 96], m3
+ add offsetq, 128
+ cmp offsetq, lenq
+ jl .loop128
+ addps m0, m0, m2
+ addps m1, m1, m3
+ mov lenq, sizeq
+ and lenq, 127
+ cmp lenq, 64
+ jge .l64
+ addps m0, m0, m1
+ cmp lenq, 32
+ jge .l32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l64:
+ and lenq, ~63
+ add lenq, offsetq
+.loop64:
+ movups m4, [v1q+offsetq]
+ movups m5, [v1q+offsetq + 32]
+ fmaddps m0, m4, [v2q+offsetq], m0
+ fmaddps m1, m5, [v2q+offsetq + 32], m1
+ add offsetq, 64
+ cmp offsetq, lenq
+ jl .loop64
+ addps m0, m0, m1
+ mov lenq, sizeq
+ and lenq, 63
+ cmp lenq, 32
+ jge .l32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l32:
+ and lenq, ~31
+ add lenq, offsetq
+.loop32:
+ movups m4, [v1q+offsetq]
+ fmaddps m0, m4, [v2q+offsetq], m0
+ add offsetq, 32
+ cmp offsetq, lenq
+ jl .loop32
+ vextractf128 xmm2, m0, 1
+ addps xmm0, xmm2
+ mov lenq, sizeq
+ and lenq, 31
+ cmp lenq, 16
+ jge .l16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+.l16:
+ and lenq, ~15
+ add lenq, offsetq
+.loop16:
+ movaps xmm1, [v1q+offsetq]
+ mulps xmm1, [v2q+offsetq]
+ addps xmm0, xmm1
+ add offsetq, 16
+ cmp offsetq, lenq
+ jl .loop16
+ movhlps xmm1, xmm0
+ addps xmm0, xmm1
+ movss xmm1, xmm0
+ shufps xmm0, xmm0, 1
+ addss xmm0, xmm1
+%if ARCH_X86_64 == 0
+ movss r0m, xm0
+ fld dword r0m
+%endif
+ RET
+
;-----------------------------------------------------------------------------
; void ff_butterflies_float(float *src0, float *src1, int len);
;-----------------------------------------------------------------------------
@@ -481,4 +585,4 @@ cglobal butterflies_float, 3,3,3, src0, src1, len
mova [src0q + lenq], m0
add lenq, mmsize
jl .loop
- REP_RET
+ RET
diff --git a/media/ffvpx/libavutil/x86/float_dsp_init.c b/media/ffvpx/libavutil/x86/float_dsp_init.c
index 8826e4e2c9..ad6b506259 100644
--- a/media/ffvpx/libavutil/x86/float_dsp_init.c
+++ b/media/ffvpx/libavutil/x86/float_dsp_init.c
@@ -56,8 +56,6 @@ void ff_vector_dmul_scalar_sse2(double *dst, const double *src,
void ff_vector_dmul_scalar_avx(double *dst, const double *src,
double mul, int len);
-void ff_vector_fmul_window_3dnowext(float *dst, const float *src0,
- const float *src1, const float *win, int len);
void ff_vector_fmul_window_sse(float *dst, const float *src0,
const float *src1, const float *win, int len);
@@ -76,6 +74,7 @@ void ff_vector_fmul_reverse_avx2(float *dst, const float *src0,
const float *src1, int len);
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
+float ff_scalarproduct_float_fma3(const float *v1, const float *v2, int order);
void ff_butterflies_float_sse(float *av_restrict src0, float *av_restrict src1, int len);
@@ -83,9 +82,6 @@ av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
{
int cpu_flags = av_get_cpu_flags();
- if (EXTERNAL_AMD3DNOWEXT(cpu_flags)) {
- fdsp->vector_fmul_window = ff_vector_fmul_window_3dnowext;
- }
if (EXTERNAL_SSE(cpu_flags)) {
fdsp->vector_fmul = ff_vector_fmul_sse;
fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_sse;
@@ -117,5 +113,6 @@ av_cold void ff_float_dsp_init_x86(AVFloatDSPContext *fdsp)
fdsp->vector_fmac_scalar = ff_vector_fmac_scalar_fma3;
fdsp->vector_fmul_add = ff_vector_fmul_add_fma3;
fdsp->vector_dmac_scalar = ff_vector_dmac_scalar_fma3;
+ fdsp->scalarproduct_float = ff_scalarproduct_float_fma3;
}
}
diff --git a/media/ffvpx/libavutil/x86/imgutils_init.c b/media/ffvpx/libavutil/x86/imgutils_init.c
index 4ea398205e..91a16cf594 100644
--- a/media/ffvpx/libavutil/x86/imgutils_init.c
+++ b/media/ffvpx/libavutil/x86/imgutils_init.c
@@ -21,9 +21,8 @@
#include "libavutil/cpu.h"
#include "libavutil/error.h"
-#include "libavutil/imgutils.h"
#include "libavutil/imgutils_internal.h"
-#include "libavutil/internal.h"
+#include "libavutil/macros.h"
#include "cpu.h"
diff --git a/media/ffvpx/libavutil/x86/intmath.h b/media/ffvpx/libavutil/x86/intmath.h
index 40743fd13e..8a6b5ae261 100644
--- a/media/ffvpx/libavutil/x86/intmath.h
+++ b/media/ffvpx/libavutil/x86/intmath.h
@@ -110,8 +110,8 @@ static av_always_inline av_const double av_clipd_sse2(double a, double amin, dou
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
- __asm__ ("minsd %2, %0 \n\t"
- "maxsd %1, %0 \n\t"
+ __asm__ ("maxsd %1, %0 \n\t"
+ "minsd %2, %0 \n\t"
: "+&x"(a) : "xm"(amin), "xm"(amax));
return a;
}
@@ -126,14 +126,44 @@ static av_always_inline av_const float av_clipf_sse(float a, float amin, float a
#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
if (amin > amax) abort();
#endif
- __asm__ ("minss %2, %0 \n\t"
- "maxss %1, %0 \n\t"
+ __asm__ ("maxss %1, %0 \n\t"
+ "minss %2, %0 \n\t"
: "+&x"(a) : "xm"(amin), "xm"(amax));
return a;
}
#endif /* __SSE__ */
+#if defined(__AVX__) && !defined(__INTEL_COMPILER)
+
+#undef av_clipd
+#define av_clipd av_clipd_avx
+static av_always_inline av_const double av_clipd_avx(double a, double amin, double amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("vmaxsd %1, %0, %0 \n\t"
+ "vminsd %2, %0, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#undef av_clipf
+#define av_clipf av_clipf_avx
+static av_always_inline av_const float av_clipf_avx(float a, float amin, float amax)
+{
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ __asm__ ("vmaxss %1, %0, %0 \n\t"
+ "vminss %2, %0, %0 \n\t"
+ : "+&x"(a) : "xm"(amin), "xm"(amax));
+ return a;
+}
+
+#endif /* __AVX__ */
+
#endif /* __GNUC__ */
#endif /* AVUTIL_X86_INTMATH_H */
diff --git a/media/ffvpx/libavutil/x86/intreadwrite.h b/media/ffvpx/libavutil/x86/intreadwrite.h
index 4061d19231..40f375b013 100644
--- a/media/ffvpx/libavutil/x86/intreadwrite.h
+++ b/media/ffvpx/libavutil/x86/intreadwrite.h
@@ -29,6 +29,8 @@
#if !HAVE_FAST_64BIT && defined(__MMX__)
+#define FF_COPY_SWAP_ZERO_USES_MMX
+
#define AV_COPY64 AV_COPY64
static av_always_inline void AV_COPY64(void *d, const void *s)
{
diff --git a/media/ffvpx/libavutil/x86/lls.asm b/media/ffvpx/libavutil/x86/lls.asm
index 317fba6fca..e8141e6c4f 100644
--- a/media/ffvpx/libavutil/x86/lls.asm
+++ b/media/ffvpx/libavutil/x86/lls.asm
@@ -20,7 +20,7 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION .text
@@ -123,7 +123,7 @@ cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
test id, id
jle .loop2x1
.ret:
- REP_RET
+ RET
%macro UPDATE_LLS 0
cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
@@ -240,7 +240,7 @@ cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
cmp id, countd
jle .loop2x1
.ret:
- REP_RET
+ RET
%endmacro ; UPDATE_LLS
%if HAVE_AVX_EXTERNAL
diff --git a/media/ffvpx/libavutil/x86/lls_init.c b/media/ffvpx/libavutil/x86/lls_init.c
index 1c5dca42dc..c786376915 100644
--- a/media/ffvpx/libavutil/x86/lls_init.c
+++ b/media/ffvpx/libavutil/x86/lls_init.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/attributes.h"
#include "libavutil/lls.h"
#include "libavutil/x86/cpu.h"
diff --git a/media/ffvpx/libavutil/x86/pixelutils.asm b/media/ffvpx/libavutil/x86/pixelutils.asm
index 36c57c5f7f..fbe9b45971 100644
--- a/media/ffvpx/libavutil/x86/pixelutils.asm
+++ b/media/ffvpx/libavutil/x86/pixelutils.asm
@@ -21,49 +21,11 @@
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
-%include "x86util.asm"
+%include "libavutil/x86/x86util.asm"
SECTION .text
;-------------------------------------------------------------------------------
-; int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
-; const uint8_t *src2, ptrdiff_t stride2);
-;-------------------------------------------------------------------------------
-INIT_MMX mmx
-cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
- pxor m7, m7
- pxor m6, m6
-%rep 4
- mova m0, [src1q]
- mova m2, [src1q + stride1q]
- mova m1, [src2q]
- mova m3, [src2q + stride2q]
- psubusb m4, m0, m1
- psubusb m5, m2, m3
- psubusb m1, m0
- psubusb m3, m2
- por m1, m4
- por m3, m5
- punpcklbw m0, m1, m7
- punpcklbw m2, m3, m7
- punpckhbw m1, m7
- punpckhbw m3, m7
- paddw m0, m1
- paddw m2, m3
- paddw m0, m2
- paddw m6, m0
- lea src1q, [src1q + 2*stride1q]
- lea src2q, [src2q + 2*stride2q]
-%endrep
- psrlq m0, m6, 32
- paddw m6, m0
- psrlq m0, m6, 16
- paddw m6, m0
- movd eax, m6
- movzx eax, ax
- RET
-
-;-------------------------------------------------------------------------------
; int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
; const uint8_t *src2, ptrdiff_t stride2);
;-------------------------------------------------------------------------------
@@ -84,26 +46,6 @@ cglobal pixelutils_sad_8x8, 4,4,0, src1, stride1, src2, stride2
RET
;-------------------------------------------------------------------------------
-; int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
-; const uint8_t *src2, ptrdiff_t stride2);
-;-------------------------------------------------------------------------------
-INIT_MMX mmxext
-cglobal pixelutils_sad_16x16, 4,4,0, src1, stride1, src2, stride2
- pxor m2, m2
-%rep 16
- mova m0, [src1q]
- mova m1, [src1q + 8]
- psadbw m0, [src2q]
- psadbw m1, [src2q + 8]
- paddw m2, m0
- paddw m2, m1
- add src1q, stride1q
- add src2q, stride2q
-%endrep
- movd eax, m2
- RET
-
-;-------------------------------------------------------------------------------
; int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
; const uint8_t *src2, ptrdiff_t stride2);
;-------------------------------------------------------------------------------
diff --git a/media/ffvpx/libavutil/x86/pixelutils_init.c b/media/ffvpx/libavutil/x86/pixelutils_init.c
index 184a3a4a9f..c3c0662414 100644
--- a/media/ffvpx/libavutil/x86/pixelutils_init.c
+++ b/media/ffvpx/libavutil/x86/pixelutils_init.c
@@ -21,13 +21,9 @@
#include "pixelutils.h"
#include "cpu.h"
-int ff_pixelutils_sad_8x8_mmx(const uint8_t *src1, ptrdiff_t stride1,
- const uint8_t *src2, ptrdiff_t stride2);
int ff_pixelutils_sad_8x8_mmxext(const uint8_t *src1, ptrdiff_t stride1,
const uint8_t *src2, ptrdiff_t stride2);
-int ff_pixelutils_sad_16x16_mmxext(const uint8_t *src1, ptrdiff_t stride1,
- const uint8_t *src2, ptrdiff_t stride2);
int ff_pixelutils_sad_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
const uint8_t *src2, ptrdiff_t stride2);
int ff_pixelutils_sad_a_16x16_sse2(const uint8_t *src1, ptrdiff_t stride1,
@@ -53,10 +49,6 @@ void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned)
{
int cpu_flags = av_get_cpu_flags();
- if (EXTERNAL_MMX(cpu_flags)) {
- sad[2] = ff_pixelutils_sad_8x8_mmx;
- }
-
// The best way to use SSE2 would be to do 2 SADs in parallel,
// but we'd have to modify the pixelutils API to return SIMD functions.
@@ -65,7 +57,6 @@ void ff_pixelutils_sad_init_x86(av_pixelutils_sad_fn *sad, int aligned)
// so just use the MMX 8x8 version even when SSE2 is available.
if (EXTERNAL_MMXEXT(cpu_flags)) {
sad[2] = ff_pixelutils_sad_8x8_mmxext;
- sad[3] = ff_pixelutils_sad_16x16_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
diff --git a/media/ffvpx/libavutil/x86/x86inc.asm b/media/ffvpx/libavutil/x86/x86inc.asm
index 5044ee86f0..251ee797de 100644
--- a/media/ffvpx/libavutil/x86/x86inc.asm
+++ b/media/ffvpx/libavutil/x86/x86inc.asm
@@ -411,16 +411,6 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
%endif
%endmacro
-%macro DEFINE_ARGS_INTERNAL 3+
- %ifnum %2
- DEFINE_ARGS %3
- %elif %1 == 4
- DEFINE_ARGS %2
- %elif %1 > 4
- DEFINE_ARGS %2, %3
- %endif
-%endmacro
-
%if WIN64 ; Windows x64 ;=================================================
DECLARE_REG 0, rcx
@@ -439,7 +429,7 @@ DECLARE_REG 12, R15, 104
DECLARE_REG 13, R12, 112
DECLARE_REG 14, R13, 120
-%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
%assign num_args %1
%assign regs_used %2
ASSERT regs_used >= num_args
@@ -451,7 +441,15 @@ DECLARE_REG 14, R13, 120
WIN64_SPILL_XMM %3
%endif
LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- DEFINE_ARGS_INTERNAL %0, %4, %5
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
%endmacro
%macro WIN64_PUSH_XMM 0
@@ -547,7 +545,7 @@ DECLARE_REG 12, R15, 56
DECLARE_REG 13, R12, 64
DECLARE_REG 14, R13, 72
-%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
%assign num_args %1
%assign regs_used %2
%assign xmm_regs_used %3
@@ -557,7 +555,15 @@ DECLARE_REG 14, R13, 72
PUSH_IF_USED 9, 10, 11, 12, 13, 14
ALLOC_STACK %4
LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
- DEFINE_ARGS_INTERNAL %0, %4, %5
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
%endmacro
%define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
@@ -598,7 +604,7 @@ DECLARE_REG 6, ebp, 28
DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
-%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
%assign num_args %1
%assign regs_used %2
ASSERT regs_used >= num_args
@@ -613,7 +619,15 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
PUSH_IF_USED 3, 4, 5, 6
ALLOC_STACK %4
LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
- DEFINE_ARGS_INTERNAL %0, %4, %5
+ %if %0 > 4
+ %ifnum %4
+ DEFINE_ARGS %5
+ %else
+ DEFINE_ARGS %4, %5
+ %endif
+ %elifnnum %4
+ DEFINE_ARGS %4
+ %endif
%endmacro
%define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
@@ -803,32 +817,33 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae,
; cpuflags
-%assign cpuflags_mmx (1<<0)
-%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
-%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
-%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
-%assign cpuflags_sse (1<<4) | cpuflags_mmx2
-%assign cpuflags_sse2 (1<<5) | cpuflags_sse
-%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
-%assign cpuflags_lzcnt (1<<7) | cpuflags_sse2
-%assign cpuflags_sse3 (1<<8) | cpuflags_sse2
-%assign cpuflags_ssse3 (1<<9) | cpuflags_sse3
-%assign cpuflags_sse4 (1<<10)| cpuflags_ssse3
-%assign cpuflags_sse42 (1<<11)| cpuflags_sse4
-%assign cpuflags_aesni (1<<12)| cpuflags_sse42
-%assign cpuflags_avx (1<<13)| cpuflags_sse42
-%assign cpuflags_xop (1<<14)| cpuflags_avx
-%assign cpuflags_fma4 (1<<15)| cpuflags_avx
-%assign cpuflags_fma3 (1<<16)| cpuflags_avx
-%assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt
-%assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1
-%assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2
-%assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
-
-%assign cpuflags_cache32 (1<<21)
-%assign cpuflags_cache64 (1<<22)
-%assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant
-%assign cpuflags_atom (1<<24)
+%assign cpuflags_mmx (1<<0)
+%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2 (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_lzcnt (1<<7) | cpuflags_sse2
+%assign cpuflags_sse3 (1<<8) | cpuflags_sse2
+%assign cpuflags_ssse3 (1<<9) | cpuflags_sse3
+%assign cpuflags_sse4 (1<<10)| cpuflags_ssse3
+%assign cpuflags_sse42 (1<<11)| cpuflags_sse4
+%assign cpuflags_aesni (1<<12)| cpuflags_sse42
+%assign cpuflags_avx (1<<13)| cpuflags_sse42
+%assign cpuflags_xop (1<<14)| cpuflags_avx
+%assign cpuflags_fma4 (1<<15)| cpuflags_avx
+%assign cpuflags_fma3 (1<<16)| cpuflags_avx
+%assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt
+%assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1
+%assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2
+%assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
+%assign cpuflags_avx512icl (1<<25)| cpuflags_avx512
+
+%assign cpuflags_cache32 (1<<21)
+%assign cpuflags_cache64 (1<<22)
+%assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<24)
; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)