30 #include <DeckLinkAPI.h>
51 #define MAX_WIDTH_VANC 1920
69 {bmdModeNTSC, 11, 19, 274, 282},
70 {bmdModeNTSC2398, 11, 19, 274, 282},
71 {bmdModePAL, 7, 22, 320, 335},
72 {bmdModeNTSCp, 11, -1, -1, 39},
73 {bmdModePALp, 7, -1, -1, 45},
77 {bmdModeHD1080p2398, 8, -1, -1, 42},
78 {bmdModeHD1080p24, 8, -1, -1, 42},
79 {bmdModeHD1080p25, 8, -1, -1, 42},
80 {bmdModeHD1080p2997, 8, -1, -1, 42},
81 {bmdModeHD1080p30, 8, -1, -1, 42},
82 {bmdModeHD1080i50, 8, 20, 570, 585},
83 {bmdModeHD1080i5994, 8, 20, 570, 585},
84 {bmdModeHD1080i6000, 8, 20, 570, 585},
85 {bmdModeHD1080p50, 8, -1, -1, 42},
86 {bmdModeHD1080p5994, 8, -1, -1, 42},
87 {bmdModeHD1080p6000, 8, -1, -1, 42},
91 {bmdModeHD720p50, 8, -1, -1, 26},
92 {bmdModeHD720p5994, 8, -1, -1, 26},
93 {bmdModeHD720p60, 8, -1, -1, 26},
96 {bmdModeUnknown, 0, -1, -1, -1}
103 if (mode == vanc_line_numbers[i].mode)
112 for (i = 0; i <
len; i++)
118 uint16_t vanc_sum = 0;
119 for (i = 3; i < len - 1; i++) {
123 if ((!!p ^ !!(v & 0x100)) || (np != 1 && np != 2)) {
130 vanc_sum |= ((~vanc_sum & 0x100) << 1);
131 if (checksum != vanc_sum) {
142 for (i = 0; i < width / 3; i++) {
143 *dst++ = (src[1] >> 2) + ((src[2] & 15) << 6);
144 *dst++ = src[4] + ((src[5] & 3) << 8);
145 *dst++ = (src[6] >> 4) + ((src[7] & 63) << 4);
152 uint8_t ret = (line < 313) << 5;
153 if (line >= 7 && line <= 22)
155 if (line >= 320 && line <= 335)
171 vbi_bit_slicer slicer;
173 vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
175 if (vbi_bit_slice(&slicer, src, tgt + 4) ==
FALSE)
192 *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
193 *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
194 *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
197 return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
205 if (py[0] != 0x255 || py[1] != 0x255 || py[2] != 0x227)
213 for (i = 0; i < 42; i++)
222 if (line >= 6 && line <= 22)
224 if (line >= 318 && line <= 335)
225 shift = line - 318 + 17;
226 return shift >= 0 && ((1ULL <<
shift) & mask);
232 if (py[0] == 0x151 && py[1] == 0x115 && py[3] == 0x102) {
233 uint16_t *descriptors = py + 4;
236 for (i = 0; i < 5 && py < pend - 45; i++, py += 45) {
237 int line = (descriptors[i] & 31) + (!(descriptors[i] & 128)) * 313;
248 uint16_t did = py[0];
249 uint16_t sdid = py[1];
250 uint16_t
dc = py[2] & 255;
252 pend =
FFMIN(pend, py + dc);
253 if (did == 0x143 && sdid == 0x102) {
255 }
else if (allow_multipacket && did == 0x143 && sdid == 0x203) {
257 while (py < pend - 3) {
259 py += 4 + (py[2] & 255);
268 size_t i,
len = (buf[5] & 0xff) + 6 + 1;
272 uint16_t *cdp = &buf[6];
273 if (cdp[0] != 0x96 || cdp[1] != 0x69) {
286 for (i = 0; i < len - 1; i++)
288 cdp_sum = cdp_sum ? 256 - cdp_sum : 0;
289 if (cdp[len - 1] != cdp_sum) {
295 if (!(rate & 0x0f)) {
305 if (!(cdp[4] & 0x43)) {
310 hdr = (cdp[5] << 8) | cdp[6];
311 if (cdp[7] != 0x72) {
317 if (!(cc_count & 0xe0)) {
323 if ((len - 13) < cc_count * 3) {
328 if (cdp[len - 4] != 0x74) {
333 ftr = (cdp[len - 3] << 8) | cdp[len - 2];
345 for (
size_t i = 0; i < cc_count; i++) {
346 cc[3*i + 0] = cdp[9 + 3*i+0] ;
347 cc[3*i + 1] = cdp[9 + 3*i+1];
348 cc[3*i + 2] = cdp[9 + 3*i+2];
359 uint16_t *max_buf = buf + width;
361 while (buf < max_buf - 6) {
363 uint16_t did = buf[3] & 0xFF;
364 uint16_t sdid = buf[4] & 0xFF;
366 if (buf[0] != 0 || buf[1] != 0x3ff || buf[2] != 0x3ff) {
370 len = (buf[5] & 0xff) + 6 + 1;
371 if (len > max_buf - buf) {
377 if (did == 0x43 && (sdid == 0x02 || sdid == 0x03) && cctx->
teletext_lines &&
378 width == 1920 && tgt_size >= 1920) {
384 }
else if (did == 0x61 && sdid == 0x01) {
385 unsigned int data_len;
392 data =
vanc_to_cc(avctx, buf, width, data_len);
444 unsigned long long size;
532 virtual HRESULT STDMETHODCALLTYPE
VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
584 IDeckLinkAudioInputPacket *audioFrame,
590 BMDTimeValue bmd_pts;
591 BMDTimeValue bmd_duration;
596 res = audioFrame->GetPacketTime(&bmd_pts, time_base.
den);
600 res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.
den);
604 res = videoFrame->GetHardwareReferenceTimestamp(time_base.
den, &bmd_pts, &bmd_duration);
618 pts = bmd_pts / time_base.
num;
629 IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
632 void *audioFrameBytes;
633 BMDTimeValue frameTime;
634 BMDTimeValue frameDuration;
635 int64_t wallclock = 0;
648 "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
650 videoFrame->GetRowBytes() * videoFrame->GetHeight(),
651 (double)qsize / 1024 / 1024);
654 videoFrame->GetBytes(&frameBytes);
655 videoFrame->GetStreamTime(&frameTime, &frameDuration,
658 if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
659 if (
ctx->
draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
661 0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
662 0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
663 int width = videoFrame->GetWidth();
664 int height = videoFrame->GetHeight();
665 unsigned *p = (
unsigned *)frameBytes;
667 for (
int y = 0; y <
height; y++) {
668 for (
int x = 0; x <
width; x += 2)
669 *p++ = bars[(x * 8) /
width];
694 pkt.
size = videoFrame->GetRowBytes() *
695 videoFrame->GetHeight();
699 IDeckLinkVideoFrameAncillary *vanc;
704 if (videoFrame->GetAncillaryData(&vanc) ==
S_OK) {
706 int64_t line_mask = 1;
707 BMDPixelFormat vanc_format = vanc->GetPixelFormat();
712 (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
714 for (i = 6; i < 336; i++, line_mask <<= 1) {
716 if ((
ctx->
teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (
void**)&buf) ==
S_OK) {
717 if (vanc_format == bmdFormat8BitYUV)
718 txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
720 txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
727 if (vanc_format == bmdFormat10BitYUV && videoFrame->GetWidth() <=
MAX_WIDTH_VANC) {
729 for (i = vanc_line_numbers[idx].vanc_start; i <= vanc_line_numbers[idx].
vanc_end; i++) {
731 if (vanc->GetBufferForVerticalBlankingLine(i, (
void**)&buf) ==
S_OK) {
735 txt_buf,
sizeof(txt_buf0) - (txt_buf - txt_buf0), &
pkt);
737 if (i == vanc_line_numbers[idx].field0_vanc_end)
742 if (txt_buf - txt_buf0 > 1) {
743 int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
744 while (stuffing_units--) {
745 memset(txt_buf, 0xff, 46);
753 txt_pkt.
data = txt_buf0;
754 txt_pkt.
size = txt_buf - txt_buf0;
770 BMDTimeValue audio_pts;
775 audioFrame->GetBytes(&audioFrameBytes);
794 BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *
mode,
795 BMDDetectedVideoInputFormatFlags)
806 ctx->dli->SetCallback(
ctx->input_callback);
807 return ctx->dli->StartStreams();
817 if (
ctx->capture_started) {
818 ctx->dli->StopStreams();
819 ctx->dli->DisableVideoInput();
820 ctx->dli->DisableAudioInput();
871 if (
ctx->list_devices) {
877 av_log(avctx,
AV_LOG_WARNING,
"The bm_v210 option is deprecated and will be removed. Please use the -raw_format yuv422p10.\n");
882 tmp=strchr (fname,
'@');
884 av_log(avctx,
AV_LOG_WARNING,
"The @mode syntax is deprecated and will be removed. Please use the -format_code option.\n");
885 mode_num = atoi (
tmp+1);
894 if (
ctx->dl->QueryInterface(IID_IDeckLinkInput, (
void **) &
ctx->dli) !=
S_OK) {
902 if (
ctx->list_formats) {
910 av_log(avctx,
AV_LOG_ERROR,
"Could not set mode number %d or format code %s for %s\n",
918 if (
ctx->teletext_lines &&
ctx->bmd_mode == bmdModePAL) {
919 av_log(avctx,
AV_LOG_ERROR,
"Libzvbi support is needed for capturing SD PAL teletext, please recompile FFmpeg.\n");
934 st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
946 st->codecpar->width =
ctx->bmd_width;
947 st->codecpar->height =
ctx->bmd_height;
949 st->time_base.den =
ctx->bmd_tb_den;
950 st->time_base.num =
ctx->bmd_tb_num;
954 case bmdFormat8BitYUV:
956 st->codecpar->codec_tag =
MKTAG(
'U',
'Y',
'V',
'Y');
958 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
960 case bmdFormat10BitYUV:
962 st->codecpar->codec_tag =
MKTAG(
'V',
'2',
'1',
'0');
963 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
964 st->codecpar->bits_per_coded_sample = 10;
966 case bmdFormat8BitARGB:
970 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
972 case bmdFormat8BitBGRA:
976 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 32, st->time_base.den, st->time_base.num);
978 case bmdFormat10BitRGB:
980 st->codecpar->codec_tag =
MKTAG(
'R',
'2',
'1',
'0');
982 st->codecpar->bit_rate =
av_rescale(
ctx->bmd_width *
ctx->bmd_height * 30, st->time_base.den, st->time_base.num);
983 st->codecpar->bits_per_coded_sample = 10;
991 switch (
ctx->bmd_field_dominance) {
992 case bmdUpperFieldFirst:
995 case bmdLowerFieldFirst:
998 case bmdProgressiveFrame:
999 case bmdProgressiveSegmentedFrame:
1008 if (
ctx->teletext_lines) {
1016 st->time_base.den =
ctx->bmd_tb_den;
1017 st->time_base.num =
ctx->bmd_tb_num;
1020 ctx->teletext_st = st;
1024 result =
ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger,
ctx->audio_st->codecpar->channels);
1026 if (result !=
S_OK) {
1032 result =
ctx->dli->EnableVideoInput(
ctx->bmd_mode,
1034 bmdVideoInputFlagDefault);
1036 if (result !=
S_OK) {
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
static int shift(int a, int b)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
#define pthread_mutex_lock(a)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
ptrdiff_t const GLvoid * data
static uint8_t calc_parity_and_line_offset(int line)
#define AV_LOG_WARNING
Something somehow does not look correct.
const uint8_t ff_reverse[256]
ATSC A53 Part 4 Closed Captions.
int index
stream index in AVFormatContext
Convenience header that includes libavutil's core.
static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
int ff_decklink_init_device(AVFormatContext *avctx, const char *name)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
DecklinkPtsSource audio_pts_source
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static const BMDVideoConnection decklink_video_connection_map[]
DecklinkPtsSource audio_pts_source
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
static void fill_data_unit_head(int line, uint8_t *tgt)
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt)
Return a value representing the fourCC code associated to the pixel format pix_fmt, or 0 if no associated fourCC code can be found.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
static const BMDAudioConnection decklink_audio_connection_map[]
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs)
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction, int num)
#define AV_LOG_VERBOSE
Detailed information.
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs)
static int get_vanc_line_idx(BMDDisplayMode mode)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Main libavdevice API header.
static void avpacket_queue_flush(AVPacketQueue *q)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint16_t mask[17]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
simple assert() macros that are a bit more flexible than ISO C assert().
static uint8_t * teletext_data_unit_from_ancillary_packet(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines, int allow_multipacket)
av_cold int ff_decklink_read_close(AVFormatContext *avctx)
int flags
A combination of AV_PKT_FLAG values.
char filename[1024]
input or output filename
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
#define AV_TIME_BASE
Internal time base represented as integer.
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
int ff_decklink_list_input_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
#define pthread_mutex_unlock(a)
static volatile int checksum
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
static void extract_luma_from_v210(uint16_t *dst, const uint8_t *src, int width)
static void error(const char *err)
#define FF_ARRAY_ELEMS(a)
static AVRational av_make_q(int num, int den)
Create an AVRational.
DecklinkPtsSource video_pts_source
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
static HRESULT decklink_start_input(AVFormatContext *avctx)
static uint8_t * teletext_data_unit_from_op47_vbi_packet(int line, uint16_t *py, uint8_t *tgt)
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
Rational number (pair of numerator and denominator).
static int linemask_matches(int line, int64_t mask)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
void ff_decklink_cleanup(AVFormatContext *avctx)
uint8_t * vanc_to_cc(AVFormatContext *avctx, uint16_t *buf, size_t words, unsigned &cc_count)
static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame, int64_t wallclock, DecklinkPtsSource pts_src, AVRational time_base, int64_t *initial_pts)
static int check_vanc_parity_checksum(uint16_t *buf, int len, uint16_t checksum)
static void avpacket_queue_end(AVPacketQueue *q)
struct AVPacketList * next
common internal and external API header
static unsigned long long avpacket_queue_size(AVPacketQueue *q)
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
#define MKBETAG(a, b, c, d)
static void clear_parity_bits(uint16_t *buf, int len)
void * priv_data
Format private data.
av_cold int ff_decklink_read_header(AVFormatContext *avctx)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static VANCLineNumber vanc_line_numbers[]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
DecklinkPtsSource video_pts_source
uint8_t * get_metadata(AVFormatContext *avctx, uint16_t *buf, size_t width, uint8_t *tgt, size_t tgt_size, AVPacket *pkt)
AVCodecParameters * codecpar
static uint8_t * teletext_data_unit_from_op47_data(uint16_t *py, uint16_t *pend, uint8_t *tgt, int64_t wanted_lines)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
#define MKTAG(a, b, c, d)
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
AVPixelFormat
Pixel format.
This structure stores compressed data.
mode
Use these values in ebur128_init (or'ed).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.