提交 f23762b9 authored 作者: 祁增奎's avatar 祁增奎

Android替换

上级 5e4362bc
......@@ -22,14 +22,14 @@ external libraries enabled.
##### 2.1.1 Android Tools
- Android SDK Build Tools
- Android NDK r22b or later with LLDB and CMake (See [#292](https://github.com/arthenica/ffmpeg-kit/issues/292) if you want to use NDK r23b)
- Android NDK r22b or later with LLDB and CMake (See [#292](https://github.com/arthenica/ffmpeg-kit/issues/292) if you want to use NDK r23b or later)
##### 2.1.2 Packages
Use your package manager (apt, yum, dnf, brew, etc.) to install the following packages.
```
autoconf automake libtool pkg-config curl cmake gcc gperf texinfo yasm nasm bison autogen git wget autopoint meson ninja
autoconf automake libtool pkg-config curl git doxygen nasm cmake gcc gperf texinfo yasm bison autogen wget autopoint meson ninja ragel groff gtk-doc-tools libtasn1
```
##### 2.1.3 Environment Variables
......@@ -77,7 +77,7 @@ All libraries created by `android.sh` can be found under the `prebuilt` director
}
dependencies {
implementation 'com.arthenica:ffmpeg-kit-full:5.1'
implementation 'com.arthenica:ffmpeg-kit-full:6.0-2'
}
```
......
......@@ -38,7 +38,7 @@ PROJECT_NAME = "FFmpegKit Android API"
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = 5.1
PROJECT_NUMBER = 6.0
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
......
-keep class com.arthenica.ffmpegkit.FFmpegKitConfig {
native <methods>;
void log(long, int, byte[]);
void statistics(long, int, float, float, long , int, double, double);
void statistics(long, int, float, float, long , double, double, double);
int safOpen(int);
int safClose(int);
}
......
......@@ -45,7 +45,7 @@ struct CallbackData {
float statisticsFps; // statistics fps
float statisticsQuality; // statistics quality
int64_t statisticsSize; // statistics size
int statisticsTime; // statistics time
double statisticsTime; // statistics time
double statisticsBitrate; // statistics bitrate
double statisticsSpeed; // statistics speed
......@@ -312,7 +312,7 @@ void logCallbackDataAdd(int level, AVBPrint *data) {
/**
* Adds statistics data to the end of callback data list.
*/
void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, int64_t size, int time, double bitrate, double speed) {
void statisticsCallbackDataAdd(int frameNumber, float fps, float quality, int64_t size, double time, double bitrate, double speed) {
// CREATE DATA STRUCT FIRST
struct CallbackData *newData = (struct CallbackData*)av_malloc(sizeof(struct CallbackData));
......@@ -491,7 +491,7 @@ void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, v
* @param bitrate output bit rate in kbits/s
* @param speed processing speed = processed duration / operation duration
*/
void ffmpegkit_statistics_callback_function(int frameNumber, float fps, float quality, int64_t size, int time, double bitrate, double speed) {
void ffmpegkit_statistics_callback_function(int frameNumber, float fps, float quality, int64_t size, double time, double bitrate, double speed) {
statisticsCallbackDataAdd(frameNumber, fps, quality, size, time, bitrate, speed);
}
......@@ -581,6 +581,30 @@ int saf_close(int fd) {
return (*env)->CallStaticIntMethod(env, configClass, safCloseMethod, fd);
}
/**
* Used by JNI methods to enable redirection.
*/
static void enableNativeRedirection() {
mutexLock();
if (redirectionEnabled != 0) {
mutexUnlock();
return;
}
redirectionEnabled = 1;
mutexUnlock();
int rc = pthread_create(&callbackThread, 0, callbackThreadFunction, 0);
if (rc != 0) {
LOGE("Failed to create callback thread (rc=%d).\n", rc);
return;
}
av_log_set_callback(ffmpegkit_log_callback_function);
set_report_callback(ffmpegkit_statistics_callback_function);
}
/**
* Called when 'ffmpegkit' native library is loaded.
*
......@@ -620,7 +644,7 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) {
return JNI_FALSE;
}
statisticsMethod = (*env)->GetStaticMethodID(env, localConfigClass, "statistics", "(JIFFJIDD)V");
statisticsMethod = (*env)->GetStaticMethodID(env, localConfigClass, "statistics", "(JIFFJDDD)V");
if (statisticsMethod == NULL) {
LOGE("OnLoad thread failed to GetStaticMethodID for %s.\n", "statistics");
return JNI_FALSE;
......@@ -665,6 +689,8 @@ jint JNI_OnLoad(JavaVM *vm, void *reserved) {
av_set_saf_open(saf_open);
av_set_saf_close(saf_close);
enableNativeRedirection();
return JNI_VERSION_1_6;
}
......@@ -696,24 +722,7 @@ JNIEXPORT jint JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_getNativeLog
* @param object reference to the class on which this method is invoked
*/
JNIEXPORT void JNICALL Java_com_arthenica_ffmpegkit_FFmpegKitConfig_enableNativeRedirection(JNIEnv *env, jclass object) {
mutexLock();
if (redirectionEnabled != 0) {
mutexUnlock();
return;
}
redirectionEnabled = 1;
mutexUnlock();
int rc = pthread_create(&callbackThread, 0, callbackThreadFunction, 0);
if (rc != 0) {
LOGE("Failed to create callback thread (rc=%d).\n", rc);
return;
}
av_log_set_callback(ffmpegkit_log_callback_function);
set_report_callback(ffmpegkit_statistics_callback_function);
enableNativeRedirection();
}
/**
......
......@@ -27,7 +27,7 @@
#include "libavutil/ffversion.h"
/** Library version string */
#define FFMPEG_KIT_VERSION "5.1"
#define FFMPEG_KIT_VERSION "6.0"
/** Defines tag used for Android logging. */
#define LIB_NAME "ffmpeg-kit"
......
/*
* Various utilities for command line tools
* Copyright (c) 2000-2003 Fabrice Bellard
* copyright (c) 2018 Taner Sener ( tanersener gmail com )
* Copyright (c) 2018-2022 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -25,6 +26,12 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 09.2022
......@@ -129,11 +136,18 @@ void register_exit(void (*cb)(int ret))
program_exit = cb;
}
void report_and_exit(int ret)
{
av_log(NULL, AV_LOG_FATAL, "%s\n", av_err2str(ret));
exit_program(AVUNERROR(ret));
}
void exit_program(int ret)
{
if (program_exit)
program_exit(ret);
// FFmpegKit
// exit disabled and replaced with longjmp, exit value stored in longjmp_value
// exit(ret);
longjmp_value = ret;
......@@ -696,7 +710,7 @@ static void init_parse_context(OptionParseContext *octx,
octx->nb_groups = nb_groups;
octx->groups = av_calloc(octx->nb_groups, sizeof(*octx->groups));
if (!octx->groups)
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
for (i = 0; i < octx->nb_groups; i++)
octx->groups[i].group_def = &groups[i];
......@@ -843,12 +857,7 @@ do { \
void print_error(const char *filename, int err)
{
char errbuf[128];
const char *errbuf_ptr = errbuf;
if (av_strerror(err, errbuf, sizeof(errbuf)) < 0)
errbuf_ptr = strerror(AVUNERROR(err));
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, errbuf_ptr);
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, av_err2str(err));
}
int read_yesno(void)
......@@ -971,7 +980,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
break;
}
while ((t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX))) {
while ((t = av_dict_iterate(opts, t))) {
const AVClass *priv_class;
char *p = strchr(t->key, ':');
......@@ -1009,11 +1018,8 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
if (!s->nb_streams)
return NULL;
opts = av_calloc(s->nb_streams, sizeof(*opts));
if (!opts) {
av_log(NULL, AV_LOG_ERROR,
"Could not alloc memory for stream options.\n");
exit_program(1);
}
if (!opts)
report_and_exit(AVERROR(ENOMEM));
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
s, s->streams[i], NULL);
......@@ -1028,10 +1034,8 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
}
if (*size < new_size) {
uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
if (!tmp) {
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
exit_program(1);
}
if (!tmp)
report_and_exit(AVERROR(ENOMEM));
memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size);
*size = new_size;
return tmp;
......@@ -1044,10 +1048,8 @@ void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
void *new_elem;
if (!(new_elem = av_mallocz(elem_size)) ||
av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
exit_program(1);
}
av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0)
report_and_exit(AVERROR(ENOMEM));
return new_elem;
}
......
/*
* Various utilities for command line tools
* copyright (c) 2003 Fabrice Bellard
* copyright (c) 2018 Taner Sener ( tanersener gmail com )
* copyright (c) 2018-2022 Taner Sener
* copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -25,6 +26,12 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 09.2022
......@@ -95,6 +102,17 @@ extern __thread int find_stream_info;
*/
void register_exit(void (*cb)(int ret));
/**
* Reports an error corresponding to the provided
* AVERROR code and calls exit_program() with the
* corresponding POSIX error code.
* @note ret must be an AVERROR-value of a POSIX error code
* (i.e. AVERROR(EFOO) and not AVERROR_FOO).
* library functions can return both, so call this only
* with AVERROR(EFOO) of your own.
*/
void report_and_exit(int ret) av_noreturn;
/**
* Wraps exit with a program-specific cleanup routine.
*/
......@@ -232,11 +250,6 @@ void show_help_children(const AVClass *clazz, int flags);
void show_help_default_ffmpeg(const char *opt, const char *arg);
void show_help_default_ffprobe(const char *opt, const char *arg);
/**
* Generic -h handler common to all fftools.
*/
int show_help(void *optctx, const char *opt, const char *arg);
/**
* Parse the command line arguments.
*
......
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* This file is part of FFmpeg.
* Copyright (c) 2018 Taner Sener ( tanersener gmail com )
* Copyright (c) 2018-2022 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
......@@ -22,6 +23,16 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - WARN_MULTIPLE_OPT_USAGE, MATCH_PER_STREAM_OPT, MATCH_PER_TYPE_OPT, SPECIFIER_OPT_FMT declarations migrated to
* ffmpeg_mux.h
* - "class" member field renamed as clazz
* - time field in set_report_callback updated as double
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 09.2022
......@@ -59,11 +70,13 @@
#include "config.h"
#include <stdatomic.h>
#include <stdint.h>
#include <stdio.h>
#include <signal.h>
#include "fftools_cmdutils.h"
#include "fftools_sync_queue.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
......@@ -85,6 +98,12 @@
#include "libswresample/swresample.h"
// deprecated features
#define FFMPEG_OPT_PSNR 1
#define FFMPEG_OPT_MAP_CHANNEL 1
#define FFMPEG_OPT_MAP_SYNC 1
#define FFMPEG_ROTATION_METADATA 1
enum VideoSyncMethod {
VSYNC_AUTO = -1,
VSYNC_PASSTHROUGH,
......@@ -113,15 +132,15 @@ typedef struct StreamMap {
int disabled; /* 1 is this mapping is disabled by a negative map */
int file_index;
int stream_index;
int sync_file_index;
int sync_stream_index;
char *linklabel; /* name of an output link, for mapping lavfi outputs */
} StreamMap;
#if FFMPEG_OPT_MAP_CHANNEL
typedef struct {
int file_idx, stream_idx, channel_idx; // input
int ofile_idx, ostream_idx; // output
} AudioChannelMap;
#endif
typedef struct OptionsContext {
OptionGroup *g;
......@@ -157,6 +176,7 @@ typedef struct OptionsContext {
int accurate_seek;
int thread_queue_size;
int input_sync_ref;
int find_stream_info;
SpecifierOpt *ts_scale;
int nb_ts_scale;
......@@ -174,11 +194,10 @@ typedef struct OptionsContext {
/* output options */
StreamMap *stream_maps;
int nb_stream_maps;
#if FFMPEG_OPT_MAP_CHANNEL
AudioChannelMap *audio_channel_maps; /* one info entry per -map_channel */
int nb_audio_channel_maps; /* number of (valid) -map_channel settings */
int metadata_global_manual;
int metadata_streams_manual;
int metadata_chapters_manual;
#endif
const char **attachments;
int nb_attachments;
......@@ -186,9 +205,10 @@ typedef struct OptionsContext {
int64_t recording_time;
int64_t stop_time;
uint64_t limit_filesize;
int64_t limit_filesize;
float mux_preload;
float mux_max_delay;
float shortest_buf_duration;
int shortest;
int bitexact;
......@@ -221,6 +241,12 @@ typedef struct OptionsContext {
int nb_force_fps;
SpecifierOpt *frame_aspect_ratios;
int nb_frame_aspect_ratios;
SpecifierOpt *display_rotations;
int nb_display_rotations;
SpecifierOpt *display_hflips;
int nb_display_hflips;
SpecifierOpt *display_vflips;
int nb_display_vflips;
SpecifierOpt *rc_overrides;
int nb_rc_overrides;
SpecifierOpt *intra_matrices;
......@@ -247,6 +273,8 @@ typedef struct OptionsContext {
int nb_reinit_filters;
SpecifierOpt *fix_sub_duration;
int nb_fix_sub_duration;
SpecifierOpt *fix_sub_duration_heartbeat;
int nb_fix_sub_duration_heartbeat;
SpecifierOpt *canvas_sizes;
int nb_canvas_sizes;
SpecifierOpt *pass;
......@@ -275,6 +303,18 @@ typedef struct OptionsContext {
int nb_autoscale;
SpecifierOpt *bits_per_raw_sample;
int nb_bits_per_raw_sample;
SpecifierOpt *enc_stats_pre;
int nb_enc_stats_pre;
SpecifierOpt *enc_stats_post;
int nb_enc_stats_post;
SpecifierOpt *mux_stats;
int nb_mux_stats;
SpecifierOpt *enc_stats_pre_fmt;
int nb_enc_stats_pre_fmt;
SpecifierOpt *enc_stats_post_fmt;
int nb_enc_stats_post_fmt;
SpecifierOpt *mux_stats_fmt;
int nb_mux_stats_fmt;
} OptionsContext;
typedef struct InputFilter {
......@@ -350,12 +390,22 @@ typedef struct InputStream {
#define DECODING_FOR_OST 1
#define DECODING_FOR_FILTER 2
int processing_needed; /* non zero if the packets must be processed */
// should attach FrameData as opaque_ref after decoding
int want_frame_data;
/**
* Codec parameters - to be used by the decoding/streamcopy code.
* st->codecpar should not be accessed, because it may be modified
* concurrently by the demuxing thread.
*/
AVCodecParameters *par;
AVCodecContext *dec_ctx;
const AVCodec *dec;
AVFrame *decoded_frame;
AVPacket *pkt;
AVRational framerate_guessed;
int64_t prev_pkt_pts;
int64_t start; /* time when read started */
/* predicted dts of the next packet read for this stream or (when there are
......@@ -368,6 +418,12 @@ typedef struct InputStream {
int64_t pts; ///< current pts of the decoded frame (in AV_TIME_BASE units)
int wrap_correction_done;
// the value of AVCodecParserContext.repeat_pict from the AVStream parser
// for the last packet returned from ifile_get_packet()
// -1 if unknown
// FIXME: this is a hack, the avstream parser should not be used
int last_pkt_repeat_pict;
int64_t filter_in_rescale_delta_last;
int64_t min_pts; /* pts with the smallest value in a current stream */
......@@ -417,12 +473,8 @@ typedef struct InputStream {
char *hwaccel_device;
enum AVPixelFormat hwaccel_output_format;
/* hwaccel context */
void *hwaccel_ctx;
void (*hwaccel_uninit)(AVCodecContext *s);
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
enum AVPixelFormat hwaccel_pix_fmt;
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
/* stats */
// combined size of all the packets read
......@@ -439,38 +491,46 @@ typedef struct InputStream {
int got_output;
} InputStream;
typedef struct LastFrameDuration {
int stream_idx;
int64_t duration;
} LastFrameDuration;
typedef struct InputFile {
int index;
AVFormatContext *ctx;
int eof_reached; /* true if eof reached */
int eagain; /* true if last read attempt returned EAGAIN */
int ist_index; /* index of first stream in input_streams */
int loop; /* set number of times input stream should be looped */
int64_t duration; /* actual duration of the longest stream in a file
at the moment when looping happens */
AVRational time_base; /* time base of the duration */
int64_t input_ts_offset;
int input_sync_ref;
/**
* Effective format start time based on enabled streams.
*/
int64_t start_time_effective;
int64_t ts_offset;
/**
* Extra timestamp offset added by discontinuity handling.
*/
int64_t ts_offset_discont;
int64_t last_ts;
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
int64_t recording_time;
int nb_streams; /* number of stream that ffmpeg is aware of; may be different
from ctx.nb_streams if new streams appear during av_read_frame() */
int nb_streams_warn; /* number of streams that the user was warned of */
/* streams that ffmpeg is aware of;
* there may be extra streams in ctx that are not mapped to an InputStream
* if new streams appear dynamically during demuxing */
InputStream **streams;
int nb_streams;
int rate_emu;
float readrate;
int accurate_seek;
AVPacket *pkt;
#if HAVE_THREADS
AVThreadMessageQueue *in_thread_queue;
pthread_t thread; /* thread reading from this file */
int non_blocking; /* reading packets from the thread should not block */
int joined; /* the thread has been joined */
int thread_queue_size; /* maximum number of queued packets */
#endif
/* when looping the input file, this queue is used by decoders to report
* the last frame duration back to the demuxer thread */
AVThreadMessageQueue *audio_duration_queue;
int audio_duration_queue_size;
} InputFile;
enum forced_keyframes_const {
......@@ -485,6 +545,41 @@ enum forced_keyframes_const {
#define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0)
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM (1 << 1)
enum EncStatsType {
ENC_STATS_LITERAL = 0,
ENC_STATS_FILE_IDX,
ENC_STATS_STREAM_IDX,
ENC_STATS_FRAME_NUM,
ENC_STATS_FRAME_NUM_IN,
ENC_STATS_TIMEBASE,
ENC_STATS_TIMEBASE_IN,
ENC_STATS_PTS,
ENC_STATS_PTS_TIME,
ENC_STATS_PTS_IN,
ENC_STATS_PTS_TIME_IN,
ENC_STATS_DTS,
ENC_STATS_DTS_TIME,
ENC_STATS_SAMPLE_NUM,
ENC_STATS_NB_SAMPLES,
ENC_STATS_PKT_SIZE,
ENC_STATS_BITRATE,
ENC_STATS_AVG_BITRATE,
};
typedef struct EncStatsComponent {
enum EncStatsType type;
uint8_t *str;
size_t str_len;
} EncStatsComponent;
typedef struct EncStats {
EncStatsComponent *components;
int nb_components;
AVIOContext *io;
} EncStats;
extern const char *const forced_keyframes_const_names[];
typedef enum {
......@@ -492,68 +587,92 @@ typedef enum {
MUXER_FINISHED = 2,
} OSTFinished ;
enum {
KF_FORCE_SOURCE = 1,
KF_FORCE_SOURCE_NO_DROP = 2,
};
typedef struct KeyframeForceCtx {
int type;
int64_t ref_pts;
// timestamps of the forced keyframes, in AV_TIME_BASE_Q
int64_t *pts;
int nb_pts;
int index;
AVExpr *pexpr;
double expr_const_values[FKF_NB];
int dropped_keyframe;
} KeyframeForceCtx;
typedef struct OutputStream {
const AVClass *clazz;
int file_index; /* file index */
int index; /* stream index in the output file */
int source_index; /* InputStream index */
/* input stream that is the source for this output stream;
* may be NULL for streams with no well-defined source, e.g.
* attachments or outputs from complex filtergraphs */
InputStream *ist;
AVStream *st; /* stream in the output file */
int encoding_needed; /* true if encoding needed for this stream */
int64_t frame_number;
/* input pts and corresponding output pts
for A/V sync */
struct InputStream *sync_ist; /* input stream to sync against */
int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
/* pts of the first frame encoded for this stream, used for limiting
* recording time */
int64_t first_pts;
/* dts of the last packet sent to the muxer */
/* number of frames emitted by the video-encoding sync code */
int64_t vsync_frame_number;
/* predicted pts of the next frame to be encoded
* audio/video encoding only */
int64_t next_pts;
/* dts of the last packet sent to the muxing queue, in AV_TIME_BASE_Q */
int64_t last_mux_dts;
/* pts of the last frame received from the filters, in AV_TIME_BASE_Q */
int64_t last_filter_pts;
// timestamp from which the streamcopied streams should start,
// in AV_TIME_BASE_Q;
// everything before it should be discarded
int64_t ts_copy_start;
// the timebase of the packets sent to the muxer
AVRational mux_timebase;
AVRational enc_timebase;
AVBSFContext *bsf_ctx;
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
const AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
AVFrame *last_frame;
AVFrame *sq_frame;
AVPacket *pkt;
int64_t last_dropped;
int64_t last_nb0_frames[3];
void *hwaccel_ctx;
/* video only */
AVRational frame_rate;
AVRational max_frame_rate;
enum VideoSyncMethod vsync_method;
int is_cfr;
const char *fps_mode;
int force_fps;
int top_field_first;
#if FFMPEG_ROTATION_METADATA
int rotate_overridden;
#endif
int autoscale;
int bitexact;
int bits_per_raw_sample;
#if FFMPEG_ROTATION_METADATA
double rotate_override_value;
#endif
AVRational frame_aspect_ratio;
/* forced key frames */
int64_t forced_kf_ref_pts;
int64_t *forced_kf_pts;
int forced_kf_count;
int forced_kf_index;
char *forced_keyframes;
AVExpr *forced_keyframes_pexpr;
double forced_keyframes_expr_const_values[FKF_NB];
int dropped_keyframe;
KeyframeForceCtx kf;
/* audio only */
#if FFMPEG_OPT_MAP_CHANNEL
int *audio_channels_map; /* list of the channels id to pick from the source stream */
int audio_channels_mapped; /* number of channels in audio_channels_map */
#endif
char *logfile_prefix;
FILE *logfile;
......@@ -569,7 +688,6 @@ typedef struct OutputStream {
char *apad;
OSTFinished finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
// init_output_stream() has been called for this stream
// The encoder and the bitstream filters have been initialized and the stream
......@@ -582,15 +700,16 @@ typedef struct OutputStream {
int streamcopy_started;
int copy_initial_nonkeyframes;
int copy_prior_start;
char *disposition;
int keep_pix_fmt;
/* stats */
// combined size of all the packets written
uint64_t data_size;
// combined size of all the packets sent to the muxer
uint64_t data_size_mux;
// combined size of all the packets received from the encoder
uint64_t data_size_enc;
// number of packets send to the muxer
uint64_t packets_written;
atomic_uint_least64_t packets_written;
// number of frames/samples sent to the encoder
uint64_t frames_encoded;
uint64_t samples_encoded;
......@@ -600,51 +719,48 @@ typedef struct OutputStream {
/* packet quality factor */
int quality;
int max_muxing_queue_size;
/* the packets are buffered here until the muxer is ready to be initialized */
AVFifo *muxing_queue;
/*
* The size of the AVPackets' buffers in queue.
* Updated when a packet is either pushed or pulled from the queue.
*/
size_t muxing_queue_data_size;
/* Threshold after which max_muxing_queue_size will be in effect */
size_t muxing_queue_data_threshold;
/* packet picture type */
int pict_type;
/* frame encode sum of squared error values */
int64_t error[4];
int sq_idx_encode;
int sq_idx_mux;
EncStats enc_stats_pre;
EncStats enc_stats_post;
/*
* bool on whether this stream should be utilized for splitting
* subtitles utilizing fix_sub_duration at random access points.
*/
unsigned int fix_sub_duration_heartbeat;
} OutputStream;
typedef struct OutputFile {
const AVClass *clazz;
int index;
const AVOutputFormat *format;
const char *url;
OutputStream **streams;
int nb_streams;
SyncQueue *sq_encode;
AVFormatContext *ctx;
AVDictionary *opts;
int ost_index; /* index of the first stream in output_streams */
int64_t recording_time; ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
int64_t start_time; ///< start time in microseconds == AV_TIME_BASE units
uint64_t limit_filesize; /* filesize limit expressed in bytes */
int shortest;
int header_written;
int bitexact;
} OutputFile;
extern __thread InputStream **input_streams;
extern __thread int nb_input_streams;
extern __thread InputFile **input_files;
extern __thread int nb_input_files;
extern __thread OutputStream **output_streams;
extern __thread int nb_output_streams;
extern __thread OutputFile **output_files;
extern __thread int nb_output_files;
......@@ -658,13 +774,10 @@ extern __thread float audio_drift_threshold;
extern __thread float dts_delta_threshold;
extern __thread float dts_error_threshold;
extern __thread int audio_volume;
extern __thread int audio_sync_method;
extern __thread enum VideoSyncMethod video_sync_method;
extern __thread float frame_drop_threshold;
extern __thread int do_benchmark;
extern __thread int do_benchmark_all;
extern __thread int do_deinterlace;
extern __thread int do_hex_dump;
extern __thread int do_pkt_dump;
extern __thread int copy_ts;
......@@ -677,7 +790,6 @@ extern __thread int print_stats;
extern __thread int64_t stats_period;
extern __thread int qp_hist;
extern __thread int stdin_interaction;
extern __thread int frame_bits_per_raw_sample;
extern __thread AVIOContext *progress_avio;
extern __thread float max_error_rate;
......@@ -688,15 +800,20 @@ extern __thread int auto_conversion_filters;
extern __thread const AVIOInterruptCB int_cb;
#if CONFIG_QSV
extern __thread char *qsv_device;
#endif
extern __thread HWDevice *filter_hw_device;
extern __thread int want_sdp;
extern __thread unsigned nb_output_dumped;
extern __thread int main_ffmpeg_return_code;
extern __thread int ignore_unknown_streams;
extern __thread int copy_unknown_streams;
extern __thread int recast_media;
#if FFMPEG_OPT_PSNR
extern __thread int do_psnr;
#endif
void term_init(void);
void term_exit(void);
......@@ -705,7 +822,12 @@ void show_usage(void);
void remove_avoptions(AVDictionary **a, AVDictionary *b);
void assert_avoptions(AVDictionary *m);
int guess_input_channel_layout(InputStream *ist);
void assert_file_overwrite(const char *filename);
char *file_read(const char *filename);
AVDictionary *strip_specifiers(const AVDictionary *dict);
const AVCodec *find_codec_or_die(void *logctx, const char *name,
enum AVMediaType type, int encoder);
int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global);
int configure_filtergraph(FilterGraph *fg);
void check_filter_outputs(void);
......@@ -719,8 +841,9 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
int ffmpeg_parse_options(int argc, char **argv);
int videotoolbox_init(AVCodecContext *s);
int qsv_init(AVCodecContext *s);
void enc_stats_write(OutputStream *ost, EncStats *es,
const AVFrame *frame, const AVPacket *pkt,
uint64_t frame_num);
HWDevice *hw_device_get_by_name(const char *name);
int hw_device_init_from_string(const char *arg, HWDevice **dev);
......@@ -732,15 +855,58 @@ int hw_device_setup_for_filter(FilterGraph *fg);
int hwaccel_decode_init(AVCodecContext *avctx);
/* open the muxer when all the streams are initialized */
int of_check_init(OutputFile *of);
/*
* Initialize muxing state for the given stream, should be called
* after the codec/streamcopy setup has been done.
*
* Open the muxer once all the streams have been initialized.
*/
int of_stream_init(OutputFile *of, OutputStream *ost);
int of_write_trailer(OutputFile *of);
int of_open(const OptionsContext *o, const char *filename);
void of_close(OutputFile **pof);
void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost,
int unqueue);
void of_enc_stats_close(void);
/*
* Send a single packet to the output, applying any bitstream filters
* associated with the output stream. This may result in any number
* of packets actually being written, depending on what bitstream
* filters are applied. The supplied packet is consumed and will be
* blank (as if newly-allocated) when this function returns.
*
* If eof is set, instead indicate EOF to all bitstream filters and
* therefore flush any delayed packets to the output. A blank packet
* must be supplied in this case.
*/
void of_output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof);
int64_t of_filesize(OutputFile *of);
int ifile_open(const OptionsContext *o, const char *filename);
void ifile_close(InputFile **f);
/**
* Get next input packet from the demuxer.
*
* @param pkt the packet is written here when this function returns 0
* @return
* - 0 when a packet has been read successfully
* - 1 when stream end was reached, but the stream is looped;
* caller should flush decoders and read from this demuxer again
* - a negative error code on failure
*/
int ifile_get_packet(InputFile *f, AVPacket **pkt);
/* iterate over all input streams in all input files;
* pass NULL to start iteration */
InputStream *ist_iter(InputStream *prev);
extern const char * const opt_name_codec_names[];
extern const char * const opt_name_codec_tags[];
extern const char * const opt_name_frame_rates[];
extern const char * const opt_name_top_field_first[];
void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double));
void set_report_callback(void (*callback)(int, float, float, int64_t, double, double, double));
void cancel_operation(long id);
#endif /* FFTOOLS_FFMPEG_H */
/*
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of ffmpeg_demux.c file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools_ prefix added to the file name
* - fftools_ffmpeg_mux.h include added
*/
#include <float.h>
#include <stdint.h>
#include "fftools_ffmpeg.h"
#include "fftools_ffmpeg_mux.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/display.h"
#include "libavutil/error.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavutil/timestamp.h"
#include "libavutil/thread.h"
#include "libavutil/threadmessage.h"
#include "libavcodec/packet.h"
#include "libavformat/avformat.h"
static const char *const opt_name_discard[] = {"discard", NULL};
static const char *const opt_name_reinit_filters[] = {"reinit_filter", NULL};
static const char *const opt_name_fix_sub_duration[] = {"fix_sub_duration", NULL};
static const char *const opt_name_canvas_sizes[] = {"canvas_size", NULL};
static const char *const opt_name_guess_layout_max[] = {"guess_layout_max", NULL};
static const char *const opt_name_ts_scale[] = {"itsscale", NULL};
static const char *const opt_name_hwaccels[] = {"hwaccel", NULL};
static const char *const opt_name_hwaccel_devices[] = {"hwaccel_device", NULL};
static const char *const opt_name_hwaccel_output_formats[] = {"hwaccel_output_format", NULL};
static const char *const opt_name_autorotate[] = {"autorotate", NULL};
static const char *const opt_name_display_rotations[] = {"display_rotation", NULL};
static const char *const opt_name_display_hflips[] = {"display_hflip", NULL};
static const char *const opt_name_display_vflips[] = {"display_vflip", NULL};
typedef struct Demuxer {
InputFile f;
/* number of times input stream should be looped */
int loop;
/* actual duration of the longest stream in a file at the moment when
* looping happens */
int64_t duration;
/* time base of the duration */
AVRational time_base;
/* number of streams that the user was warned of */
int nb_streams_warn;
AVThreadMessageQueue *in_thread_queue;
int thread_queue_size;
pthread_t thread;
int non_blocking;
} Demuxer;
typedef struct DemuxMsg {
AVPacket *pkt;
int looping;
// repeat_pict from the demuxer-internal parser
int repeat_pict;
} DemuxMsg;
static Demuxer *demuxer_from_ifile(InputFile *f)
{
return (Demuxer*)f;
}
static void report_new_stream(Demuxer *d, const AVPacket *pkt)
{
AVStream *st = d->f.ctx->streams[pkt->stream_index];
if (pkt->stream_index < d->nb_streams_warn)
return;
av_log(NULL, AV_LOG_WARNING,
"New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
av_get_media_type_string(st->codecpar->codec_type),
d->f.index, pkt->stream_index,
pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
d->nb_streams_warn = pkt->stream_index + 1;
}
static void ifile_duration_update(Demuxer *d, InputStream *ist,
int64_t last_duration)
{
/* the total duration of the stream, max_pts - min_pts is
* the duration of the stream without the last frame */
if (ist->max_pts > ist->min_pts &&
ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - last_duration)
last_duration += ist->max_pts - ist->min_pts;
if (!d->duration ||
av_compare_ts(d->duration, d->time_base,
last_duration, ist->st->time_base) < 0) {
d->duration = last_duration;
d->time_base = ist->st->time_base;
}
}
static int seek_to_start(Demuxer *d)
{
InputFile *ifile = &d->f;
AVFormatContext *is = ifile->ctx;
InputStream *ist;
int ret;
ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
if (ret < 0)
return ret;
if (ifile->audio_duration_queue_size) {
/* duration is the length of the last frame in a stream
* when audio stream is present we don't care about
* last video frame length because it's not defined exactly */
int got_durations = 0;
while (got_durations < ifile->audio_duration_queue_size) {
LastFrameDuration dur;
ret = av_thread_message_queue_recv(ifile->audio_duration_queue, &dur, 0);
if (ret < 0)
return ret;
got_durations++;
ist = ifile->streams[dur.stream_idx];
ifile_duration_update(d, ist, dur.duration);
}
} else {
for (int i = 0; i < ifile->nb_streams; i++) {
int64_t duration = 0;
ist = ifile->streams[i];
if (ist->framerate.num) {
duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
} else if (ist->st->avg_frame_rate.num) {
duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
} else {
duration = 1;
}
ifile_duration_update(d, ist, duration);
}
}
if (d->loop > 0)
d->loop--;
return ret;
}
static void ts_fixup(Demuxer *d, AVPacket *pkt, int *repeat_pict)
{
InputFile *ifile = &d->f;
InputStream *ist = ifile->streams[pkt->stream_index];
const int64_t start_time = ifile->start_time_effective;
int64_t duration;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d:%d type:%s "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s\n",
ifile->index, pkt->stream_index,
av_get_media_type_string(ist->st->codecpar->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base));
}
if (!ist->wrap_correction_done && start_time != AV_NOPTS_VALUE &&
ist->st->pts_wrap_bits < 64) {
int64_t stime, stime2;
stime = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
ist->wrap_correction_done = 1;
if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
ist->wrap_correction_done = 0;
}
}
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts *= ist->ts_scale;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts *= ist->ts_scale;
duration = av_rescale_q(d->duration, d->time_base, ist->st->time_base);
if (pkt->pts != AV_NOPTS_VALUE) {
pkt->pts += duration;
ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
}
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += duration;
*repeat_pict = -1;
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
av_stream_get_parser(ist->st))
*repeat_pict = av_stream_get_parser(ist->st)->repeat_pict;
}
static void thread_set_name(InputFile *f)
{
char name[16];
snprintf(name, sizeof(name), "dmx%d:%s", f->index, f->ctx->iformat->name);
ff_thread_setname(name);
}
static void *input_thread(void *arg)
{
Demuxer *d = arg;
InputFile *f = &d->f;
AVPacket *pkt;
unsigned flags = d->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
int ret = 0;
pkt = av_packet_alloc();
if (!pkt) {
ret = AVERROR(ENOMEM);
goto finish;
}
thread_set_name(f);
while (1) {
DemuxMsg msg = { NULL };
ret = av_read_frame(f->ctx, pkt);
if (ret == AVERROR(EAGAIN)) {
av_usleep(10000);
continue;
}
if (ret < 0) {
if (d->loop) {
/* signal looping to the consumer thread */
msg.looping = 1;
ret = av_thread_message_queue_send(d->in_thread_queue, &msg, 0);
if (ret >= 0)
ret = seek_to_start(d);
if (ret >= 0)
continue;
/* fallthrough to the error path */
}
if (ret == AVERROR_EOF)
av_log(NULL, AV_LOG_VERBOSE, "EOF in input file %d\n", f->index);
else
av_log(NULL, AV_LOG_ERROR, "Error demuxing input file %d: %s\n",
f->index, av_err2str(ret));
break;
}
if (do_pkt_dump) {
av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
f->ctx->streams[pkt->stream_index]);
}
/* the following test is needed in case new streams appear
dynamically in stream : we ignore them */
if (pkt->stream_index >= f->nb_streams) {
report_new_stream(d, pkt);
av_packet_unref(pkt);
continue;
}
if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
"%s: corrupt input packet in stream %d\n",
f->ctx->url, pkt->stream_index);
if (exit_on_error) {
av_packet_unref(pkt);
ret = AVERROR_INVALIDDATA;
break;
}
}
ts_fixup(d, pkt, &msg.repeat_pict);
msg.pkt = av_packet_alloc();
if (!msg.pkt) {
av_packet_unref(pkt);
ret = AVERROR(ENOMEM);
break;
}
av_packet_move_ref(msg.pkt, pkt);
ret = av_thread_message_queue_send(d->in_thread_queue, &msg, flags);
if (flags && ret == AVERROR(EAGAIN)) {
flags = 0;
ret = av_thread_message_queue_send(d->in_thread_queue, &msg, flags);
av_log(f->ctx, AV_LOG_WARNING,
"Thread message queue blocking; consider raising the "
"thread_queue_size option (current value: %d)\n",
d->thread_queue_size);
}
if (ret < 0) {
if (ret != AVERROR_EOF)
av_log(f->ctx, AV_LOG_ERROR,
"Unable to send packet to main thread: %s\n",
av_err2str(ret));
av_packet_free(&msg.pkt);
break;
}
}
finish:
av_assert0(ret < 0);
av_thread_message_queue_set_err_recv(d->in_thread_queue, ret);
av_packet_free(&pkt);
av_log(NULL, AV_LOG_VERBOSE, "Terminating demuxer thread %d\n", f->index);
return NULL;
}
static void thread_stop(Demuxer *d)
{
InputFile *f = &d->f;
DemuxMsg msg;
if (!d->in_thread_queue)
return;
av_thread_message_queue_set_err_send(d->in_thread_queue, AVERROR_EOF);
while (av_thread_message_queue_recv(d->in_thread_queue, &msg, 0) >= 0)
av_packet_free(&msg.pkt);
pthread_join(d->thread, NULL);
av_thread_message_queue_free(&d->in_thread_queue);
av_thread_message_queue_free(&f->audio_duration_queue);
}
static int thread_start(Demuxer *d)
{
int ret;
InputFile *f = &d->f;
if (d->thread_queue_size <= 0)
d->thread_queue_size = (nb_input_files > 1 ? 8 : 1);
if (nb_input_files > 1 &&
(f->ctx->pb ? !f->ctx->pb->seekable :
strcmp(f->ctx->iformat->name, "lavfi")))
d->non_blocking = 1;
ret = av_thread_message_queue_alloc(&d->in_thread_queue,
d->thread_queue_size, sizeof(DemuxMsg));
if (ret < 0)
return ret;
if (d->loop) {
int nb_audio_dec = 0;
for (int i = 0; i < f->nb_streams; i++) {
InputStream *ist = f->streams[i];
nb_audio_dec += !!(ist->decoding_needed &&
ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
}
if (nb_audio_dec) {
ret = av_thread_message_queue_alloc(&f->audio_duration_queue,
nb_audio_dec, sizeof(LastFrameDuration));
if (ret < 0)
goto fail;
f->audio_duration_queue_size = nb_audio_dec;
}
}
if ((ret = pthread_create(&d->thread, NULL, input_thread, d))) {
av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
ret = AVERROR(ret);
goto fail;
}
return 0;
fail:
av_thread_message_queue_free(&d->in_thread_queue);
return ret;
}
int ifile_get_packet(InputFile *f, AVPacket **pkt)
{
Demuxer *d = demuxer_from_ifile(f);
InputStream *ist;
DemuxMsg msg;
int ret;
if (!d->in_thread_queue) {
ret = thread_start(d);
if (ret < 0)
return ret;
}
if (f->readrate || f->rate_emu) {
int i;
int64_t file_start = copy_ts * (
(f->start_time_effective != AV_NOPTS_VALUE ? f->start_time_effective * !start_at_zero : 0) +
(f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
);
float scale = f->rate_emu ? 1.0 : f->readrate;
for (i = 0; i < f->nb_streams; i++) {
InputStream *ist = f->streams[i];
int64_t stream_ts_offset, pts, now;
if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
if (pts > now)
return AVERROR(EAGAIN);
}
}
ret = av_thread_message_queue_recv(d->in_thread_queue, &msg,
d->non_blocking ?
AV_THREAD_MESSAGE_NONBLOCK : 0);
if (ret < 0)
return ret;
if (msg.looping)
return 1;
ist = f->streams[msg.pkt->stream_index];
ist->last_pkt_repeat_pict = msg.repeat_pict;
*pkt = msg.pkt;
return 0;
}
static void ist_free(InputStream **pist)
{
InputStream *ist = *pist;
if (!ist)
return;
av_frame_free(&ist->decoded_frame);
av_packet_free(&ist->pkt);
av_dict_free(&ist->decoder_opts);
avsubtitle_free(&ist->prev_sub.subtitle);
av_frame_free(&ist->sub2video.frame);
av_freep(&ist->filters);
av_freep(&ist->hwaccel_device);
av_freep(&ist->dts_buffer);
avcodec_free_context(&ist->dec_ctx);
avcodec_parameters_free(&ist->par);
av_freep(pist);
}
void ifile_close(InputFile **pf)
{
InputFile *f = *pf;
Demuxer *d = demuxer_from_ifile(f);
if (!f)
return;
thread_stop(d);
for (int i = 0; i < f->nb_streams; i++)
ist_free(&f->streams[i]);
av_freep(&f->streams);
avformat_close_input(&f->ctx);
av_freep(pf);
}
static const AVCodec *choose_decoder(const OptionsContext *o, AVFormatContext *s, AVStream *st,
enum HWAccelID hwaccel_id, enum AVHWDeviceType hwaccel_device_type)
{
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (codec_name) {
const AVCodec *codec = find_codec_or_die(NULL, codec_name, st->codecpar->codec_type, 0);
st->codecpar->codec_id = codec->id;
if (recast_media && st->codecpar->codec_type != codec->type)
st->codecpar->codec_type = codec->type;
return codec;
} else {
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
hwaccel_id == HWACCEL_GENERIC &&
hwaccel_device_type != AV_HWDEVICE_TYPE_NONE) {
const AVCodec *c;
void *i = NULL;
while ((c = av_codec_iterate(&i))) {
const AVCodecHWConfig *config;
if (c->id != st->codecpar->codec_id ||
!av_codec_is_decoder(c))
continue;
for (int j = 0; (config = avcodec_get_hw_config(c, j)); j++) {
if (config->device_type == hwaccel_device_type) {
av_log(NULL, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n",
c->name, av_hwdevice_get_type_name(hwaccel_device_type));
return c;
}
}
}
}
return avcodec_find_decoder(st->codecpar->codec_id);
}
}
static int guess_input_channel_layout(InputStream *ist)
{
AVCodecContext *dec = ist->dec_ctx;
if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
char layout_name[256];
if (dec->ch_layout.nb_channels > ist->guess_layout_max)
return 0;
av_channel_layout_default(&dec->ch_layout, dec->ch_layout.nb_channels);
if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
return 0;
av_channel_layout_describe(&dec->ch_layout, layout_name, sizeof(layout_name));
av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
"#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
}
return 1;
}
static void add_display_matrix_to_stream(const OptionsContext *o,
AVFormatContext *ctx, AVStream *st)
{
double rotation = DBL_MAX;
int hflip = -1, vflip = -1;
int hflip_set = 0, vflip_set = 0, rotation_set = 0;
int32_t *buf;
MATCH_PER_STREAM_OPT(display_rotations, dbl, rotation, ctx, st);
MATCH_PER_STREAM_OPT(display_hflips, i, hflip, ctx, st);
MATCH_PER_STREAM_OPT(display_vflips, i, vflip, ctx, st);
rotation_set = rotation != DBL_MAX;
hflip_set = hflip != -1;
vflip_set = vflip != -1;
if (!rotation_set && !hflip_set && !vflip_set)
return;
buf = (int32_t *)av_stream_new_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9);
if (!buf) {
av_log(NULL, AV_LOG_FATAL, "Failed to generate a display matrix!\n");
exit_program(1);
}
av_display_rotation_set(buf,
rotation_set ? -(rotation) : -0.0f);
av_display_matrix_flip(buf,
hflip_set ? hflip : 0,
vflip_set ? vflip : 0);
}
/* Add all the streams from the given input file to the demuxer */
static void add_input_streams(const OptionsContext *o, Demuxer *d)
{
InputFile *f = &d->f;
AVFormatContext *ic = f->ctx;
int i, ret;
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecParameters *par = st->codecpar;
InputStream *ist;
char *framerate = NULL, *hwaccel_device = NULL;
const char *hwaccel = NULL;
char *hwaccel_output_format = NULL;
char *codec_tag = NULL;
char *next;
char *discard_str = NULL;
const AVClass *cc = avcodec_get_class();
const AVOption *discard_opt = av_opt_find(&cc, "skip_frame", NULL,
0, AV_OPT_SEARCH_FAKE_OBJ);
ist = ALLOC_ARRAY_ELEM(f->streams, f->nb_streams);
ist->st = st;
ist->file_index = f->index;
ist->discard = 1;
st->discard = AVDISCARD_ALL;
ist->nb_samples = 0;
ist->first_dts = AV_NOPTS_VALUE;
ist->min_pts = INT64_MAX;
ist->max_pts = INT64_MIN;
ist->ts_scale = 1.0;
MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
ist->autorotate = 1;
MATCH_PER_STREAM_OPT(autorotate, i, ist->autorotate, ic, st);
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, ic, st);
if (codec_tag) {
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
st->codecpar->codec_tag = tag;
}
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
add_display_matrix_to_stream(o, ic, st);
MATCH_PER_STREAM_OPT(hwaccels, str, hwaccel, ic, st);
MATCH_PER_STREAM_OPT(hwaccel_output_formats, str,
hwaccel_output_format, ic, st);
if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "cuvid")) {
av_log(NULL, AV_LOG_WARNING,
"WARNING: defaulting hwaccel_output_format to cuda for compatibility "
"with old commandlines. This behaviour is DEPRECATED and will be removed "
"in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n");
ist->hwaccel_output_format = AV_PIX_FMT_CUDA;
} else if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "qsv")) {
av_log(NULL, AV_LOG_WARNING,
"WARNING: defaulting hwaccel_output_format to qsv for compatibility "
"with old commandlines. This behaviour is DEPRECATED and will be removed "
"in the future. Please explicitly set \"-hwaccel_output_format qsv\".\n");
ist->hwaccel_output_format = AV_PIX_FMT_QSV;
} else if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "mediacodec")) {
// There is no real AVHWFrameContext implementation. Set
// hwaccel_output_format to avoid av_hwframe_transfer_data error.
ist->hwaccel_output_format = AV_PIX_FMT_MEDIACODEC;
} else if (hwaccel_output_format) {
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output "
"format: %s", hwaccel_output_format);
}
} else {
ist->hwaccel_output_format = AV_PIX_FMT_NONE;
}
if (hwaccel) {
// The NVDEC hwaccels use a CUDA device, so remap the name here.
if (!strcmp(hwaccel, "nvdec") || !strcmp(hwaccel, "cuvid"))
hwaccel = "cuda";
if (!strcmp(hwaccel, "none"))
ist->hwaccel_id = HWACCEL_NONE;
else if (!strcmp(hwaccel, "auto"))
ist->hwaccel_id = HWACCEL_AUTO;
else {
enum AVHWDeviceType type = av_hwdevice_find_type_by_name(hwaccel);
if (type != AV_HWDEVICE_TYPE_NONE) {
ist->hwaccel_id = HWACCEL_GENERIC;
ist->hwaccel_device_type = type;
}
if (!ist->hwaccel_id) {
av_log(NULL, AV_LOG_FATAL, "Unrecognized hwaccel: %s.\n",
hwaccel);
av_log(NULL, AV_LOG_FATAL, "Supported hwaccels: ");
type = AV_HWDEVICE_TYPE_NONE;
while ((type = av_hwdevice_iterate_types(type)) !=
AV_HWDEVICE_TYPE_NONE)
av_log(NULL, AV_LOG_FATAL, "%s ",
av_hwdevice_get_type_name(type));
av_log(NULL, AV_LOG_FATAL, "\n");
exit_program(1);
}
}
}
MATCH_PER_STREAM_OPT(hwaccel_devices, str, hwaccel_device, ic, st);
if (hwaccel_device) {
ist->hwaccel_device = av_strdup(hwaccel_device);
if (!ist->hwaccel_device)
report_and_exit(AVERROR(ENOMEM));
}
ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE;
}
ist->dec = choose_decoder(o, ic, st, ist->hwaccel_id, ist->hwaccel_device_type);
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codecpar->codec_id, ic, st, ist->dec);
ist->reinit_filters = -1;
MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st);
MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
ist->user_set_discard = AVDISCARD_NONE;
if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ||
(o->audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ||
(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) ||
(o->data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA))
ist->user_set_discard = AVDISCARD_ALL;
if (discard_str && av_opt_eval_int(&cc, discard_opt, discard_str, &ist->user_set_discard) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
discard_str);
exit_program(1);
}
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
ist->prev_pkt_pts = AV_NOPTS_VALUE;
ist->dec_ctx = avcodec_alloc_context3(ist->dec);
if (!ist->dec_ctx)
report_and_exit(AVERROR(ENOMEM));
ret = avcodec_parameters_to_context(ist->dec_ctx, par);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
ist->decoded_frame = av_frame_alloc();
if (!ist->decoded_frame)
report_and_exit(AVERROR(ENOMEM));
ist->pkt = av_packet_alloc();
if (!ist->pkt)
report_and_exit(AVERROR(ENOMEM));
if (o->bitexact)
ist->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO:
// avformat_find_stream_info() doesn't set this for us anymore.
ist->dec_ctx->framerate = st->avg_frame_rate;
MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
if (framerate && av_parse_video_rate(&ist->framerate,
framerate) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing framerate %s.\n",
framerate);
exit_program(1);
}
ist->top_field_first = -1;
MATCH_PER_STREAM_OPT(top_field_first, i, ist->top_field_first, ic, st);
ist->framerate_guessed = av_guess_frame_rate(ic, st, NULL);
break;
case AVMEDIA_TYPE_AUDIO:
ist->guess_layout_max = INT_MAX;
MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st);
guess_input_channel_layout(ist);
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_SUBTITLE: {
char *canvas_size = NULL;
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
if (canvas_size &&
av_parse_video_size(&ist->dec_ctx->width, &ist->dec_ctx->height, canvas_size) < 0) {
av_log(NULL, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
exit_program(1);
}
break;
}
case AVMEDIA_TYPE_ATTACHMENT:
case AVMEDIA_TYPE_UNKNOWN:
break;
default:
abort();
}
ist->par = avcodec_parameters_alloc();
if (!ist->par)
report_and_exit(AVERROR(ENOMEM));
ret = avcodec_parameters_from_context(ist->par, ist->dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
}
}
static void dump_attachment(AVStream *st, const char *filename)
{
int ret;
AVIOContext *out = NULL;
const AVDictionaryEntry *e;
if (!st->codecpar->extradata_size) {
av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
nb_input_files - 1, st->index);
return;
}
if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
filename = e->value;
if (!*filename) {
av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
"in stream #%d:%d.\n", nb_input_files - 1, st->index);
exit_program(1);
}
assert_file_overwrite(filename);
if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
filename);
exit_program(1);
}
avio_write(out, st->codecpar->extradata, st->codecpar->extradata_size);
avio_flush(out);
avio_close(out);
}
int ifile_open(const OptionsContext *o, const char *filename)
{
Demuxer *d;
InputFile *f;
AVFormatContext *ic;
const AVInputFormat *file_iformat = NULL;
int err, i, ret;
int64_t timestamp;
AVDictionary *unused_opts = NULL;
const AVDictionaryEntry *e = NULL;
char * video_codec_name = NULL;
char * audio_codec_name = NULL;
char *subtitle_codec_name = NULL;
char * data_codec_name = NULL;
int scan_all_pmts_set = 0;
int64_t start_time = o->start_time;
int64_t start_time_eof = o->start_time_eof;
int64_t stop_time = o->stop_time;
int64_t recording_time = o->recording_time;
if (stop_time != INT64_MAX && recording_time != INT64_MAX) {
stop_time = INT64_MAX;
av_log(NULL, AV_LOG_WARNING, "-t and -to cannot be used together; using -t.\n");
}
if (stop_time != INT64_MAX && recording_time == INT64_MAX) {
int64_t start = start_time == AV_NOPTS_VALUE ? 0 : start_time;
if (stop_time <= start) {
av_log(NULL, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
exit_program(1);
} else {
recording_time = stop_time - start;
}
}
if (o->format) {
if (!(file_iformat = av_find_input_format(o->format))) {
av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
exit_program(1);
}
}
if (!strcmp(filename, "-"))
filename = "fd:";
stdin_interaction &= strncmp(filename, "pipe:", 5) &&
strcmp(filename, "fd:") &&
strcmp(filename, "/dev/stdin");
/* get default parameters from command line */
ic = avformat_alloc_context();
if (!ic)
report_and_exit(AVERROR(ENOMEM));
if (o->nb_audio_sample_rate) {
av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i, 0);
}
if (o->nb_audio_channels) {
const AVClass *priv_class;
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "ch_layout", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
char buf[32];
snprintf(buf, sizeof(buf), "%dC", o->audio_channels[o->nb_audio_channels - 1].u.i);
av_dict_set(&o->g->format_opts, "ch_layout", buf, 0);
}
}
if (o->nb_audio_ch_layouts) {
const AVClass *priv_class;
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "ch_layout", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set(&o->g->format_opts, "ch_layout", o->audio_ch_layouts[o->nb_audio_ch_layouts - 1].u.str, 0);
}
}
if (o->nb_frame_rates) {
const AVClass *priv_class;
/* set the format-level framerate option;
* this is important for video grabbers, e.g. x11 */
if (file_iformat && (priv_class = file_iformat->priv_class) &&
av_opt_find(&priv_class, "framerate", NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ)) {
av_dict_set(&o->g->format_opts, "framerate",
o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
}
}
if (o->nb_frame_sizes) {
av_dict_set(&o->g->format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
}
if (o->nb_frame_pix_fmts)
av_dict_set(&o->g->format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
MATCH_PER_TYPE_OPT(codec_names, str, video_codec_name, ic, "v");
MATCH_PER_TYPE_OPT(codec_names, str, audio_codec_name, ic, "a");
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, ic, "s");
MATCH_PER_TYPE_OPT(codec_names, str, data_codec_name, ic, "d");
if (video_codec_name)
ic->video_codec = find_codec_or_die(NULL, video_codec_name , AVMEDIA_TYPE_VIDEO , 0);
if (audio_codec_name)
ic->audio_codec = find_codec_or_die(NULL, audio_codec_name , AVMEDIA_TYPE_AUDIO , 0);
if (subtitle_codec_name)
ic->subtitle_codec = find_codec_or_die(NULL, subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0);
if (data_codec_name)
ic->data_codec = find_codec_or_die(NULL, data_codec_name , AVMEDIA_TYPE_DATA , 0);
ic->video_codec_id = video_codec_name ? ic->video_codec->id : AV_CODEC_ID_NONE;
ic->audio_codec_id = audio_codec_name ? ic->audio_codec->id : AV_CODEC_ID_NONE;
ic->subtitle_codec_id = subtitle_codec_name ? ic->subtitle_codec->id : AV_CODEC_ID_NONE;
ic->data_codec_id = data_codec_name ? ic->data_codec->id : AV_CODEC_ID_NONE;
ic->flags |= AVFMT_FLAG_NONBLOCK;
if (o->bitexact)
ic->flags |= AVFMT_FLAG_BITEXACT;
ic->interrupt_callback = int_cb;
if (!av_dict_get(o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
av_dict_set(&o->g->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
scan_all_pmts_set = 1;
}
/* open the input file with generic avformat function */
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
if (err < 0) {
print_error(filename, err);
if (err == AVERROR_PROTOCOL_NOT_FOUND)
av_log(NULL, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
exit_program(1);
}
if (scan_all_pmts_set)
av_dict_set(&o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
remove_avoptions(&o->g->format_opts, o->g->codec_opts);
assert_avoptions(o->g->format_opts);
/* apply forced codec ids */
for (i = 0; i < ic->nb_streams; i++)
choose_decoder(o, ic, ic->streams[i], HWACCEL_NONE, AV_HWDEVICE_TYPE_NONE);
if (o->find_stream_info) {
AVDictionary **opts = setup_find_stream_info_opts(ic, o->g->codec_opts);
int orig_nb_streams = ic->nb_streams;
/* If not enough info to get the stream parameters, we decode the
first frames to get it. (used in mpeg case for example) */
ret = avformat_find_stream_info(ic, opts);
for (i = 0; i < orig_nb_streams; i++)
av_dict_free(&opts[i]);
av_freep(&opts);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
if (ic->nb_streams == 0) {
avformat_close_input(&ic);
exit_program(1);
}
}
}
if (start_time != AV_NOPTS_VALUE && start_time_eof != AV_NOPTS_VALUE) {
av_log(NULL, AV_LOG_WARNING, "Cannot use -ss and -sseof both, using -ss for %s\n", filename);
start_time_eof = AV_NOPTS_VALUE;
}
if (start_time_eof != AV_NOPTS_VALUE) {
if (start_time_eof >= 0) {
av_log(NULL, AV_LOG_ERROR, "-sseof value must be negative; aborting\n");
exit_program(1);
}
if (ic->duration > 0) {
start_time = start_time_eof + ic->duration;
if (start_time < 0) {
av_log(NULL, AV_LOG_WARNING, "-sseof value seeks to before start of file %s; ignored\n", filename);
start_time = AV_NOPTS_VALUE;
}
} else
av_log(NULL, AV_LOG_WARNING, "Cannot use -sseof, duration of %s not known\n", filename);
}
timestamp = (start_time == AV_NOPTS_VALUE) ? 0 : start_time;
/* add the stream start time */
if (!o->seek_timestamp && ic->start_time != AV_NOPTS_VALUE)
timestamp += ic->start_time;
/* if seeking requested, we execute it */
if (start_time != AV_NOPTS_VALUE) {
int64_t seek_timestamp = timestamp;
if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
int dts_heuristic = 0;
for (i=0; i<ic->nb_streams; i++) {
const AVCodecParameters *par = ic->streams[i]->codecpar;
if (par->video_delay) {
dts_heuristic = 1;
break;
}
}
if (dts_heuristic) {
seek_timestamp -= 3*AV_TIME_BASE / 23;
}
}
ret = avformat_seek_file(ic, -1, INT64_MIN, seek_timestamp, seek_timestamp, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
filename, (double)timestamp / AV_TIME_BASE);
}
}
d = allocate_array_elem(&input_files, sizeof(*d), &nb_input_files);
f = &d->f;
f->ctx = ic;
f->index = nb_input_files - 1;
f->start_time = start_time;
f->recording_time = recording_time;
f->input_sync_ref = o->input_sync_ref;
f->input_ts_offset = o->input_ts_offset;
f->ts_offset = o->input_ts_offset - (copy_ts ? (start_at_zero && ic->start_time != AV_NOPTS_VALUE ? ic->start_time : 0) : timestamp);
f->rate_emu = o->rate_emu;
f->accurate_seek = o->accurate_seek;
d->loop = o->loop;
d->duration = 0;
d->time_base = (AVRational){ 1, 1 };
f->readrate = o->readrate ? o->readrate : 0.0;
if (f->readrate < 0.0f) {
av_log(NULL, AV_LOG_ERROR, "Option -readrate for Input #%d is %0.3f; it must be non-negative.\n", f->index, f->readrate);
exit_program(1);
}
if (f->readrate && f->rate_emu) {
av_log(NULL, AV_LOG_WARNING, "Both -readrate and -re set for Input #%d. Using -readrate %0.3f.\n", f->index, f->readrate);
f->rate_emu = 0;
}
d->thread_queue_size = o->thread_queue_size;
/* update the current parameters so that they match the one of the input stream */
add_input_streams(o, d);
/* dump the file content */
av_dump_format(ic, f->index, filename, 0);
/* check if all codec options have been used */
unused_opts = strip_specifiers(o->g->codec_opts);
for (i = 0; i < f->nb_streams; i++) {
e = NULL;
while ((e = av_dict_iterate(f->streams[i]->decoder_opts, e)))
av_dict_set(&unused_opts, e->key, NULL, 0);
}
e = NULL;
while ((e = av_dict_iterate(unused_opts, e))) {
const AVClass *class = avcodec_get_class();
const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
const AVClass *fclass = avformat_get_class();
const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
if (!option || foption)
continue;
if (!(option->flags & AV_OPT_FLAG_DECODING_PARAM)) {
av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
"input file #%d (%s) is not a decoding option.\n", e->key,
option->help ? option->help : "", f->index,
filename);
exit_program(1);
}
av_log(NULL, AV_LOG_WARNING, "Codec AVOption %s (%s) specified for "
"input file #%d (%s) has not been used for any stream. The most "
"likely reason is either wrong type (e.g. a video option with "
"no video streams) or that it is a private option of some decoder "
"which was not actually used for any stream.\n", e->key,
option->help ? option->help : "", f->index, filename);
}
av_dict_free(&unused_opts);
for (i = 0; i < o->nb_dump_attachment; i++) {
int j;
for (j = 0; j < ic->nb_streams; j++) {
AVStream *st = ic->streams[j];
if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
dump_attachment(st, o->dump_attachment[i].u.str);
}
}
return 0;
}
/*
* ffmpeg filter configuration
* copyright (c) 2018 Taner Sener ( tanersener gmail com )
* Copyright (c) 2018 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -24,6 +25,12 @@
* We manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 08.2018
......@@ -69,8 +76,9 @@ static const enum AVPixelFormat *get_compliance_normal_pix_fmts(const AVCodec *c
}
}
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
const AVCodec *codec, enum AVPixelFormat target)
enum AVPixelFormat
choose_pixel_fmt(const AVCodec *codec, enum AVPixelFormat target,
int strict_std_compliance)
{
if (codec && codec->pix_fmts) {
const enum AVPixelFormat *p = codec->pix_fmts;
......@@ -79,7 +87,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
if (enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
if (strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(codec, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
......@@ -106,6 +114,7 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
static const char *choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint)
{
OutputStream *ost = ofilter->ost;
AVCodecContext *enc = ost->enc_ctx;
const AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
if (strict_dict)
// used by choose_pixel_fmt() and below
......@@ -119,13 +128,14 @@ static const char *choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint)
return av_get_pix_fmt_name(ost->enc_ctx->pix_fmt);
}
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
return av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt));
} else if (ost->enc && ost->enc->pix_fmts) {
return av_get_pix_fmt_name(choose_pixel_fmt(enc->codec, enc->pix_fmt,
ost->enc_ctx->strict_std_compliance));
} else if (enc->codec->pix_fmts) {
const enum AVPixelFormat *p;
p = ost->enc->pix_fmts;
p = enc->codec->pix_fmts;
if (ost->enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_normal_pix_fmts(ost->enc, p);
p = get_compliance_normal_pix_fmts(enc->codec, p);
}
for (; *p != AV_PIX_FMT_NONE; p++) {
......@@ -133,7 +143,7 @@ static const char *choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint)
av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
}
if (!av_bprint_is_complete(bprint))
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
return bprint->str;
} else
return NULL;
......@@ -197,7 +207,7 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
InputFilter *ifilter;
if (!fg)
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
fg->index = nb_filtergraphs;
ofilter = ALLOC_ARRAY_ELEM(fg->outputs, fg->nb_outputs);
......@@ -214,7 +224,7 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
ifilter->frame_queue = av_fifo_alloc2(8, sizeof(AVFrame*), AV_FIFO_FLAG_AUTO_GROW);
if (!ifilter->frame_queue)
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = ifilter;
......@@ -238,7 +248,7 @@ static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
res = av_asprintf("%s:%s", ctx->filter->name,
avfilter_pad_get_name(pads, inout->pad_idx));
if (!res)
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
return res;
}
......@@ -285,7 +295,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
"matches no streams.\n", p, fg->graph_desc);
exit_program(1);
}
ist = input_streams[input_files[file_idx]->ist_index + st->index];
ist = input_files[file_idx]->streams[st->index];
if (ist->user_set_discard == AVDISCARD_ALL) {
av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
"matches a disabled input stream.\n", p, fg->graph_desc);
......@@ -293,14 +303,13 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
}
} else {
/* find the first unused stream of corresponding type */
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
if (ist->dec_ctx->codec_type == type && ist->discard)
break;
}
if (i == nb_input_streams) {
if (!ist) {
av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
"unlabeled input pad %d on filter %s\n", in->pad_idx,
in->filter_ctx->name);
......@@ -323,12 +332,162 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
ifilter->frame_queue = av_fifo_alloc2(8, sizeof(AVFrame*), AV_FIFO_FLAG_AUTO_GROW);
if (!ifilter->frame_queue)
exit_program(1);
report_and_exit(AVERROR(ENOMEM));
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = ifilter;
}
static int read_binary(const char *path, uint8_t **data, int *len)
{
AVIOContext *io = NULL;
int64_t fsize;
int ret;
*data = NULL;
*len = 0;
ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
path, av_err2str(ret));
return ret;
}
fsize = avio_size(io);
if (fsize < 0 || fsize > INT_MAX) {
av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
ret = AVERROR(EIO);
goto fail;
}
*data = av_malloc(fsize);
if (!*data) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avio_read(io, *data, fsize);
if (ret != fsize) {
av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
ret = ret < 0 ? ret : AVERROR(EIO);
goto fail;
}
*len = fsize;
return 0;
fail:
avio_close(io);
av_freep(data);
*len = 0;
return ret;
}
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
{
const AVOption *o = NULL;
int ret;
ret = av_opt_set(f, key, val, AV_OPT_SEARCH_CHILDREN);
if (ret >= 0)
return 0;
if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
o = av_opt_find(f, key + 1, NULL, 0, AV_OPT_SEARCH_CHILDREN);
if (!o)
goto err_apply;
// key is a valid option name prefixed with '/'
// interpret value as a path from which to load the actual option value
key++;
if (o->type == AV_OPT_TYPE_BINARY) {
uint8_t *data;
int len;
ret = read_binary(val, &data, &len);
if (ret < 0)
goto err_load;
ret = av_opt_set_bin(f, key, data, len, AV_OPT_SEARCH_CHILDREN);
av_freep(&data);
} else {
char *data = file_read(val);
if (!data) {
ret = AVERROR(EIO);
goto err_load;
}
ret = av_opt_set(f, key, data, AV_OPT_SEARCH_CHILDREN);
av_freep(&data);
}
if (ret < 0)
goto err_apply;
return 0;
err_apply:
av_log(NULL, AV_LOG_ERROR,
"Error applying option '%s' to filter '%s': %s\n",
key, f->filter->name, av_err2str(ret));
return ret;
err_load:
av_log(NULL, AV_LOG_ERROR,
"Error loading value for option '%s' from file '%s'\n",
key, val);
return ret;
}
static int graph_opts_apply(AVFilterGraphSegment *seg)
{
for (size_t i = 0; i < seg->nb_chains; i++) {
AVFilterChain *ch = seg->chains[i];
for (size_t j = 0; j < ch->nb_filters; j++) {
AVFilterParams *p = ch->filters[j];
const AVDictionaryEntry *e = NULL;
av_assert0(p->filter);
while ((e = av_dict_iterate(p->opts, e))) {
int ret = filter_opt_apply(p->filter, e->key, e->value);
if (ret < 0)
return ret;
}
av_dict_free(&p->opts);
}
}
return 0;
}
static int graph_parse(AVFilterGraph *graph, const char *desc,
AVFilterInOut **inputs, AVFilterInOut **outputs)
{
AVFilterGraphSegment *seg;
int ret;
ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
if (ret < 0)
return ret;
ret = avfilter_graph_segment_create_filters(seg, 0);
if (ret < 0)
goto fail;
ret = graph_opts_apply(seg);
if (ret < 0)
goto fail;
ret = avfilter_graph_segment_apply(seg, 0, inputs, outputs);
fail:
avfilter_graph_segment_free(&seg);
return ret;
}
int init_complex_filtergraph(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
......@@ -342,7 +501,7 @@ int init_complex_filtergraph(FilterGraph *fg)
return AVERROR(ENOMEM);
graph->nb_threads = 1;
ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
ret = graph_parse(graph, fg->graph_desc, &inputs, &outputs);
if (ret < 0)
goto fail;
......@@ -467,8 +626,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
snprintf(args, sizeof(args), "%d:%d",
ofilter->width, ofilter->height);
while ((e = av_dict_get(ost->sws_dict, "", e,
AV_DICT_IGNORE_SUFFIX))) {
while ((e = av_dict_iterate(ost->sws_dict, e))) {
av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
}
......@@ -575,6 +733,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
pad_idx = 0; \
} while (0)
av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
#if FFMPEG_OPT_MAP_CHANNEL
if (ost->audio_channels_mapped) {
AVChannelLayout mapped_layout = { 0 };
int i;
......@@ -587,6 +746,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
av_bprint_clear(&args);
}
#endif
if (codec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
av_channel_layout_default(&codec->ch_layout, codec->ch_layout.nb_channels);
......@@ -620,11 +780,11 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
if (ost->apad && of->shortest) {
int i;
for (i=0; i<of->ctx->nb_streams; i++)
if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
for (i = 0; i < of->nb_streams; i++)
if (of->streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if (i<of->ctx->nb_streams) {
if (i < of->nb_streams) {
AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
}
}
......@@ -751,7 +911,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
}
if (!fr.num)
fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
fr = ist->framerate_guessed;
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist, ifilter);
......@@ -904,40 +1064,6 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
last_filter = filt_ctx; \
} while (0)
if (audio_sync_method > 0) {
char args[256] = {0};
av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
if (audio_drift_threshold != 0.1)
av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
if (!fg->reconfiguration)
av_strlcatf(args, sizeof(args), ":first_pts=0");
AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
}
// if (ost->audio_channels_mapped) {
// int i;
// AVBPrint pan_buf;
// av_bprint_init(&pan_buf, 256, 8192);
// av_bprintf(&pan_buf, "0x%"PRIx64,
// av_get_default_channel_layout(ost->audio_channels_mapped));
// for (i = 0; i < ost->audio_channels_mapped; i++)
// if (ost->audio_channels_map[i] != -1)
// av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
// AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
// av_bprint_finalize(&pan_buf, NULL);
// }
if (audio_volume != 256) {
char args[256];
av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
"audio filter instead.\n");
snprintf(args, sizeof(args), "%f", audio_volume / 256.);
AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
}
snprintf(name, sizeof(name), "trim for input stream %d:%d",
ist->file_index, ist->st->index);
if (copy_ts) {
......@@ -1020,44 +1146,39 @@ int configure_filtergraph(FilterGraph *fg)
if (simple) {
OutputStream *ost = fg->outputs[0]->ost;
char args[512];
const AVDictionaryEntry *e = NULL;
if (filter_nbthreads) {
ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
if (ret < 0)
goto fail;
} else {
const AVDictionaryEntry *e = NULL;
e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
if (e)
av_opt_set(fg->graph, "threads", e->value, 0);
}
args[0] = 0;
e = NULL;
while ((e = av_dict_get(ost->sws_dict, "", e,
AV_DICT_IGNORE_SUFFIX))) {
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
}
if (strlen(args)) {
args[strlen(args)-1] = 0;
fg->graph->scale_sws_opts = av_strdup(args);
if (av_dict_count(ost->sws_dict)) {
ret = av_dict_get_string(ost->sws_dict,
&fg->graph->scale_sws_opts,
'=', ':');
if (ret < 0)
goto fail;
}
args[0] = 0;
e = NULL;
while ((e = av_dict_get(ost->swr_opts, "", e,
AV_DICT_IGNORE_SUFFIX))) {
av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
if (av_dict_count(ost->swr_opts)) {
char *args;
ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
if (ret < 0)
goto fail;
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
av_free(args);
}
if (strlen(args))
args[strlen(args)-1] = 0;
av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
} else {
fg->graph->nb_threads = filter_complex_nbthreads;
}
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
if ((ret = graph_parse(fg->graph, graph_desc, &inputs, &outputs)) < 0)
goto fail;
ret = hw_device_setup_for_filter(fg);
......@@ -1131,16 +1252,8 @@ int configure_filtergraph(FilterGraph *fg)
for (i = 0; i < fg->nb_outputs; i++) {
OutputStream *ost = fg->outputs[i]->ost;
if (!ost->enc) {
/* identical to the same check in ffmpeg.c, needed because
complex filter graphs are initialized earlier */
av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
ret = AVERROR(EINVAL);
goto fail;
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter,
ost->enc_ctx->frame_size);
}
......
/*
* copyright (c) 2018 Taner Sener ( tanersener gmail com )
* Copyright (c) 2018-2019 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -23,6 +24,12 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 12.2019
......@@ -357,7 +364,7 @@ int hw_device_setup_for_decode(InputStream *ist)
if (ist->hwaccel_id == HWACCEL_AUTO) {
ist->hwaccel_device_type = dev->type;
} else if (ist->hwaccel_device_type != dev->type) {
av_log(ist->dec_ctx, AV_LOG_ERROR, "Invalid hwaccel device "
av_log(NULL, AV_LOG_ERROR, "Invalid hwaccel device "
"specified for decoder: device %s of type %s is not "
"usable with hwaccel %s.\n", dev->name,
av_hwdevice_get_type_name(dev->type),
......@@ -408,7 +415,7 @@ int hw_device_setup_for_decode(InputStream *ist)
type = config->device_type;
dev = hw_device_get_by_type(type);
if (dev) {
av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto "
av_log(NULL, AV_LOG_INFO, "Using auto "
"hwaccel type %s with existing device %s.\n",
av_hwdevice_get_type_name(type), dev->name);
}
......@@ -426,12 +433,12 @@ int hw_device_setup_for_decode(InputStream *ist)
continue;
}
if (ist->hwaccel_device) {
av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto "
av_log(NULL, AV_LOG_INFO, "Using auto "
"hwaccel type %s with new device created "
"from %s.\n", av_hwdevice_get_type_name(type),
ist->hwaccel_device);
} else {
av_log(ist->dec_ctx, AV_LOG_INFO, "Using auto "
av_log(NULL, AV_LOG_INFO, "Using auto "
"hwaccel type %s with new default device.\n",
av_hwdevice_get_type_name(type));
}
......@@ -439,7 +446,7 @@ int hw_device_setup_for_decode(InputStream *ist)
if (dev) {
ist->hwaccel_device_type = type;
} else {
av_log(ist->dec_ctx, AV_LOG_INFO, "Auto hwaccel "
av_log(NULL, AV_LOG_INFO, "Auto hwaccel "
"disabled: no device found.\n");
ist->hwaccel_id = HWACCEL_NONE;
return 0;
......@@ -447,7 +454,7 @@ int hw_device_setup_for_decode(InputStream *ist)
}
if (!dev) {
av_log(ist->dec_ctx, AV_LOG_ERROR, "No device available "
av_log(NULL, AV_LOG_ERROR, "No device available "
"for decoder: device type %s needed for codec %s.\n",
av_hwdevice_get_type_name(type), ist->dec->name);
return err;
......@@ -479,7 +486,7 @@ int hw_device_setup_for_encode(OutputStream *ost)
}
for (i = 0;; i++) {
config = avcodec_get_hw_config(ost->enc, i);
config = avcodec_get_hw_config(ost->enc_ctx->codec, i);
if (!config)
break;
......@@ -490,7 +497,7 @@ int hw_device_setup_for_encode(OutputStream *ost)
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input "
"frames context (format %s) with %s encoder.\n",
av_get_pix_fmt_name(ost->enc_ctx->pix_fmt),
ost->enc->name);
ost->enc_ctx->codec->name);
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
if (!ost->enc_ctx->hw_frames_ctx)
return AVERROR(ENOMEM);
......@@ -505,7 +512,7 @@ int hw_device_setup_for_encode(OutputStream *ost)
if (dev) {
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s "
"(type %s) with %s encoder.\n", dev->name,
av_hwdevice_get_type_name(dev->type), ost->enc->name);
av_hwdevice_get_type_name(dev->type), ost->enc_ctx->codec->name);
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
if (!ost->enc_ctx->hw_device_ctx)
return AVERROR(ENOMEM);
......
/*
* This file is part of FFmpeg.
* copyright (c) 2022 Taner Sener ( tanersener gmail com )
* Copyright (c) 2022 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
......@@ -22,108 +23,101 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop the ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
* - want_sdp marked as thread-local
* - ms_from_ost migrated from ffmpeg_mux.c and marked as non-static
*
* ffmpeg-kit changes by Taner Sener
*
* 09.2022
* --------------------------------------------------------
* - fftools_ prefix added to fftools headers
* - using main_ffmpeg_return_code instead of main_return_code
* - printf replaced with av_log statements
*/
#include <stdatomic.h>
#include <stdio.h>
#include <string.h>
#include "fftools_ffmpeg.h"
#include "fftools_ffmpeg_mux.h"
#include "fftools_objpool.h"
#include "fftools_sync_queue.h"
#include "fftools_thread_queue.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/timestamp.h"
#include "libavutil/thread.h"
#include "libavcodec/packet.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
__thread int want_sdp = 1;
MuxStream *ms_from_ost(OutputStream *ost)
{
int i;
for (i = 0; i < nb_output_streams; i++) {
OutputStream *ost2 = output_streams[i];
ost2->finished |= ost == ost2 ? this_stream : others;
return (MuxStream*)ost;
}
static Muxer *mux_from_of(OutputFile *of)
{
return (Muxer*)of;
}
static int64_t filesize(AVIOContext *pb)
{
int64_t ret = -1;
if (pb) {
ret = avio_size(pb);
if (ret <= 0) // FIXME improve avio_size() so it works with non seekable output too
ret = avio_tell(pb);
}
return ret;
}
void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost,
int unqueue)
static int write_packet(Muxer *mux, OutputStream *ost, AVPacket *pkt)
{
AVFormatContext *s = of->ctx;
MuxStream *ms = ms_from_ost(ost);
AVFormatContext *s = mux->fc;
AVStream *st = ost->st;
int64_t fs;
uint64_t frame_num;
int ret;
/*
* Audio encoders may split the packets -- #frames in != #packets out.
* But there is no reordering, so we can limit the number of output packets
* by simply dropping them here.
* Counting encoded video frames needs to be done separately because of
* reordering, see do_video_out().
* Do not count the packet when unqueued because it has been counted when queued.
*/
if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
if (ost->frame_number >= ost->max_frames) {
av_packet_unref(pkt);
return;
}
ost->frame_number++;
}
if (!of->header_written) {
AVPacket *tmp_pkt;
/* the muxer is not initialized yet, buffer the packet */
if (!av_fifo_can_write(ost->muxing_queue)) {
size_t cur_size = av_fifo_can_read(ost->muxing_queue);
unsigned int are_we_over_size =
(ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
size_t limit = are_we_over_size ? ost->max_muxing_queue_size : SIZE_MAX;
size_t new_size = FFMIN(2 * cur_size, limit);
if (new_size <= cur_size) {
av_log(NULL, AV_LOG_ERROR,
"Too many packets buffered for output stream %d:%d.\n",
ost->file_index, ost->st->index);
exit_program(1);
}
ret = av_fifo_grow2(ost->muxing_queue, new_size - cur_size);
if (ret < 0)
exit_program(1);
}
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
exit_program(1);
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
exit_program(1);
av_packet_move_ref(tmp_pkt, pkt);
ost->muxing_queue_data_size += tmp_pkt->size;
av_fifo_write(ost->muxing_queue, &tmp_pkt, 1);
return;
fs = filesize(s->pb);
atomic_store(&mux->last_filesize, fs);
if (fs >= mux->limit_filesize) {
ret = AVERROR_EOF;
goto fail;
}
if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->vsync_method == VSYNC_DROP) ||
(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->vsync_method == VSYNC_DROP)
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (ost->frame_rate.num && ost->is_cfr) {
if (pkt->duration > 0)
av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
av_log(ost, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
ost->mux_timebase);
pkt->time_base);
}
}
av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
av_packet_rescale_ts(pkt, pkt->time_base, ost->st->time_base);
pkt->time_base = ost->st->time_base;
if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
if (pkt->dts != AV_NOPTS_VALUE &&
......@@ -133,25 +127,26 @@ void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost,
pkt->dts, pkt->pts,
ost->file_index, ost->st->index);
pkt->pts =
pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
- FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
- FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
pkt->dts = pkt->pts + pkt->dts + ms->last_mux_dts + 1
- FFMIN3(pkt->pts, pkt->dts, ms->last_mux_dts + 1)
- FFMAX3(pkt->pts, pkt->dts, ms->last_mux_dts + 1);
}
if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
pkt->dts != AV_NOPTS_VALUE &&
ost->last_mux_dts != AV_NOPTS_VALUE) {
int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
ms->last_mux_dts != AV_NOPTS_VALUE) {
int64_t max = ms->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
if (pkt->dts < max) {
int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
if (exit_on_error)
loglevel = AV_LOG_ERROR;
av_log(s, loglevel, "Non-monotonous DTS in output stream "
"%d:%d; previous: %"PRId64", current: %"PRId64"; ",
ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
ost->file_index, ost->st->index, ms->last_mux_dts, pkt->dts);
if (exit_on_error) {
av_log(NULL, AV_LOG_FATAL, "aborting.\n");
exit_program(1);
ret = AVERROR(EINVAL);
goto fail;
}
av_log(s, loglevel, "changing to %"PRId64". This may result "
"in incorrect timestamps in the output file.\n",
max);
......@@ -161,17 +156,17 @@ void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost,
}
}
}
ost->last_mux_dts = pkt->dts;
ms->last_mux_dts = pkt->dts;
ost->data_size += pkt->size;
ost->packets_written++;
ost->data_size_mux += pkt->size;
frame_num = atomic_fetch_add(&ost->packets_written, 1);
pkt->stream_index = ost->index;
if (debug_ts) {
av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
av_log(ost, AV_LOG_INFO, "muxer <- type:%s "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s size:%d\n",
av_get_media_type_string(ost->enc_ctx->codec_type),
av_get_media_type_string(st->codecpar->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ost->st->time_base),
......@@ -179,12 +174,307 @@ void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost,
);
}
if (ms->stats.io)
enc_stats_write(ost, &ms->stats, NULL, pkt, frame_num);
ret = av_interleaved_write_frame(s, pkt);
if (ret < 0) {
print_error("av_interleaved_write_frame()", ret);
main_ffmpeg_return_code = 1;
close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
goto fail;
}
return 0;
fail:
av_packet_unref(pkt);
return ret;
}
static int sync_queue_process(Muxer *mux, OutputStream *ost, AVPacket *pkt, int *stream_eof)
{
OutputFile *of = &mux->of;
if (ost->sq_idx_mux >= 0) {
int ret = sq_send(mux->sq_mux, ost->sq_idx_mux, SQPKT(pkt));
if (ret < 0) {
if (ret == AVERROR_EOF)
*stream_eof = 1;
return ret;
}
while (1) {
ret = sq_receive(mux->sq_mux, -1, SQPKT(mux->sq_pkt));
if (ret < 0)
return (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) ? 0 : ret;
ret = write_packet(mux, of->streams[ret],
mux->sq_pkt);
if (ret < 0)
return ret;
}
} else if (pkt)
return write_packet(mux, ost, pkt);
return 0;
}
static void thread_set_name(OutputFile *of)
{
char name[16];
snprintf(name, sizeof(name), "mux%d:%s", of->index, of->format->name);
ff_thread_setname(name);
}
static void *muxer_thread(void *arg)
{
Muxer *mux = arg;
OutputFile *of = &mux->of;
AVPacket *pkt = NULL;
int ret = 0;
pkt = av_packet_alloc();
if (!pkt) {
ret = AVERROR(ENOMEM);
goto finish;
}
thread_set_name(of);
while (1) {
OutputStream *ost;
int stream_idx, stream_eof = 0;
ret = tq_receive(mux->tq, &stream_idx, pkt);
if (stream_idx < 0) {
av_log(mux, AV_LOG_VERBOSE, "All streams finished\n");
ret = 0;
break;
}
ost = of->streams[stream_idx];
ret = sync_queue_process(mux, ost, ret < 0 ? NULL : pkt, &stream_eof);
av_packet_unref(pkt);
if (ret == AVERROR_EOF && stream_eof)
tq_receive_finish(mux->tq, stream_idx);
else if (ret < 0) {
av_log(mux, AV_LOG_ERROR, "Error muxing a packet\n");
break;
}
}
finish:
av_packet_free(&pkt);
for (unsigned int i = 0; i < mux->fc->nb_streams; i++)
tq_receive_finish(mux->tq, i);
av_log(mux, AV_LOG_VERBOSE, "Terminating muxer thread\n");
return (void*)(intptr_t)ret;
}
static int thread_submit_packet(Muxer *mux, OutputStream *ost, AVPacket *pkt)
{
int ret = 0;
if (!pkt || ost->finished & MUXER_FINISHED)
goto finish;
ret = tq_send(mux->tq, ost->index, pkt);
if (ret < 0)
goto finish;
return 0;
finish:
if (pkt)
av_packet_unref(pkt);
ost->finished |= MUXER_FINISHED;
tq_send_finish(mux->tq, ost->index);
return ret == AVERROR_EOF ? 0 : ret;
}
static int queue_packet(Muxer *mux, OutputStream *ost, AVPacket *pkt)
{
MuxStream *ms = ms_from_ost(ost);
AVPacket *tmp_pkt = NULL;
int ret;
if (!av_fifo_can_write(ms->muxing_queue)) {
size_t cur_size = av_fifo_can_read(ms->muxing_queue);
size_t pkt_size = pkt ? pkt->size : 0;
unsigned int are_we_over_size =
(ms->muxing_queue_data_size + pkt_size) > ms->muxing_queue_data_threshold;
size_t limit = are_we_over_size ? ms->max_muxing_queue_size : SIZE_MAX;
size_t new_size = FFMIN(2 * cur_size, limit);
if (new_size <= cur_size) {
av_log(ost, AV_LOG_ERROR,
"Too many packets buffered for output stream %d:%d.\n",
ost->file_index, ost->st->index);
return AVERROR(ENOSPC);
}
ret = av_fifo_grow2(ms->muxing_queue, new_size - cur_size);
if (ret < 0)
return ret;
}
if (pkt) {
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
return ret;
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
return AVERROR(ENOMEM);
av_packet_move_ref(tmp_pkt, pkt);
ms->muxing_queue_data_size += tmp_pkt->size;
}
av_fifo_write(ms->muxing_queue, &tmp_pkt, 1);
return 0;
}
static int submit_packet(Muxer *mux, AVPacket *pkt, OutputStream *ost)
{
int ret;
if (mux->tq) {
return thread_submit_packet(mux, ost, pkt);
} else {
/* the muxer is not initialized yet, buffer the packet */
ret = queue_packet(mux, ost, pkt);
if (ret < 0) {
if (pkt)
av_packet_unref(pkt);
return ret;
}
}
return 0;
}
void of_output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
{
Muxer *mux = mux_from_of(of);
MuxStream *ms = ms_from_ost(ost);
const char *err_msg;
int ret = 0;
if (!eof && pkt->dts != AV_NOPTS_VALUE)
ost->last_mux_dts = av_rescale_q(pkt->dts, pkt->time_base, AV_TIME_BASE_Q);
/* apply the output bitstream filters */
if (ms->bsf_ctx) {
int bsf_eof = 0;
ret = av_bsf_send_packet(ms->bsf_ctx, eof ? NULL : pkt);
if (ret < 0) {
err_msg = "submitting a packet for bitstream filtering";
goto fail;
}
while (!bsf_eof) {
ret = av_bsf_receive_packet(ms->bsf_ctx, pkt);
if (ret == AVERROR(EAGAIN))
return;
else if (ret == AVERROR_EOF)
bsf_eof = 1;
else if (ret < 0) {
err_msg = "applying bitstream filters to a packet";
goto fail;
}
ret = submit_packet(mux, bsf_eof ? NULL : pkt, ost);
if (ret < 0)
goto mux_fail;
}
} else {
ret = submit_packet(mux, eof ? NULL : pkt, ost);
if (ret < 0)
goto mux_fail;
}
return;
mux_fail:
err_msg = "submitting a packet to the muxer";
fail:
av_log(ost, AV_LOG_ERROR, "Error %s\n", err_msg);
if (exit_on_error)
exit_program(1);
}
static int thread_stop(Muxer *mux)
{
void *ret;
if (!mux || !mux->tq)
return 0;
for (unsigned int i = 0; i < mux->fc->nb_streams; i++)
tq_send_finish(mux->tq, i);
pthread_join(mux->thread, &ret);
tq_free(&mux->tq);
return (int)(intptr_t)ret;
}
static void pkt_move(void *dst, void *src)
{
av_packet_move_ref(dst, src);
}
static int thread_start(Muxer *mux)
{
AVFormatContext *fc = mux->fc;
ObjPool *op;
int ret;
op = objpool_alloc_packets();
if (!op)
return AVERROR(ENOMEM);
mux->tq = tq_alloc(fc->nb_streams, mux->thread_queue_size, op, pkt_move);
if (!mux->tq) {
objpool_free(&op);
return AVERROR(ENOMEM);
}
ret = pthread_create(&mux->thread, NULL, muxer_thread, (void*)mux);
if (ret) {
tq_free(&mux->tq);
return AVERROR(ret);
}
/* flush the muxing queues */
for (int i = 0; i < fc->nb_streams; i++) {
OutputStream *ost = mux->of.streams[i];
MuxStream *ms = ms_from_ost(ost);
AVPacket *pkt;
/* try to improve muxing time_base (only possible if nothing has been written yet) */
if (!av_fifo_can_read(ms->muxing_queue))
ost->mux_timebase = ost->st->time_base;
while (av_fifo_read(ms->muxing_queue, &pkt, 1) >= 0) {
ret = thread_submit_packet(mux, ost, pkt);
if (pkt) {
ms->muxing_queue_data_size -= pkt->size;
av_packet_free(&pkt);
}
if (ret < 0)
return ret;
}
}
return 0;
}
static int print_sdp(void)
......@@ -196,16 +486,16 @@ static int print_sdp(void)
AVFormatContext **avc;
for (i = 0; i < nb_output_files; i++) {
if (!output_files[i]->header_written)
if (!mux_from_of(output_files[i])->header_written)
return 0;
}
avc = av_malloc_array(nb_output_files, sizeof(*avc));
if (!avc)
exit_program(1);
return AVERROR(ENOMEM);
for (i = 0, j = 0; i < nb_output_files; i++) {
if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
avc[j] = output_files[i]->ctx;
if (!strcmp(output_files[i]->format->name, "rtp")) {
avc[j] = mux_from_of(output_files[i])->fc;
j++;
}
}
......@@ -221,7 +511,7 @@ static int print_sdp(void)
goto fail;
if (!sdp_filename) {
printf("SDP:\n%s\n", sdp);
av_log(NULL, AV_LOG_ERROR, "SDP:\n%s\n", sdp);
fflush(stdout);
} else {
ret = avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL);
......@@ -235,34 +525,36 @@ static int print_sdp(void)
av_freep(&sdp_filename);
}
// SDP successfully written, allow muxer threads to start
ret = 1;
fail:
av_freep(&avc);
return ret;
}
/* open the muxer when all the streams are initialized */
int of_check_init(OutputFile *of)
int mux_check_init(Muxer *mux)
{
OutputFile *of = &mux->of;
AVFormatContext *fc = mux->fc;
int ret, i;
for (i = 0; i < of->ctx->nb_streams; i++) {
OutputStream *ost = output_streams[of->ost_index + i];
for (i = 0; i < fc->nb_streams; i++) {
OutputStream *ost = of->streams[i];
if (!ost->initialized)
return 0;
}
ret = avformat_write_header(of->ctx, &of->opts);
ret = avformat_write_header(fc, &mux->opts);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Could not write header for output file #%d "
"(incorrect codec parameters ?): %s\n",
of->index, av_err2str(ret));
av_log(mux, AV_LOG_ERROR, "Could not write header (incorrect codec "
"parameters ?): %s\n", av_err2str(ret));
return ret;
}
//assert_avoptions(of->opts);
of->header_written = 1;
mux->header_written = 1;
av_dump_format(of->ctx, of->index, of->ctx->url, 1);
av_dump_format(fc, of->index, fc->url, 1);
nb_output_dumped++;
if (sdp_filename || want_sdp) {
......@@ -270,62 +562,220 @@ int of_check_init(OutputFile *of)
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error writing the SDP.\n");
return ret;
} else if (ret == 1) {
/* SDP is written only after all the muxers are ready, so now we
* start ALL the threads */
for (i = 0; i < nb_output_files; i++) {
ret = thread_start(mux_from_of(output_files[i]));
if (ret < 0)
return ret;
}
}
} else {
ret = thread_start(mux_from_of(of));
if (ret < 0)
return ret;
}
/* flush the muxing queues */
for (i = 0; i < of->ctx->nb_streams; i++) {
OutputStream *ost = output_streams[of->ost_index + i];
AVPacket *pkt;
return 0;
}
/* try to improve muxing time_base (only possible if nothing has been written yet) */
if (!av_fifo_can_read(ost->muxing_queue))
ost->mux_timebase = ost->st->time_base;
static int bsf_init(MuxStream *ms)
{
OutputStream *ost = &ms->ost;
AVBSFContext *ctx = ms->bsf_ctx;
int ret;
while (av_fifo_read(ost->muxing_queue, &pkt, 1) >= 0) {
ost->muxing_queue_data_size -= pkt->size;
of_write_packet(of, pkt, ost, 1);
av_packet_free(&pkt);
}
if (!ctx)
return 0;
ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
if (ret < 0)
return ret;
ctx->time_base_in = ost->st->time_base;
ret = av_bsf_init(ctx);
if (ret < 0) {
av_log(ms, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
ctx->filter->name);
return ret;
}
ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
if (ret < 0)
return ret;
ost->st->time_base = ctx->time_base_out;
return 0;
}
int of_stream_init(OutputFile *of, OutputStream *ost)
{
Muxer *mux = mux_from_of(of);
MuxStream *ms = ms_from_ost(ost);
int ret;
if (ost->sq_idx_mux >= 0)
sq_set_tb(mux->sq_mux, ost->sq_idx_mux, ost->mux_timebase);
/* initialize bitstream filters for the output stream
* needs to be done here, because the codec id for streamcopy is not
* known until now */
ret = bsf_init(ms);
if (ret < 0)
return ret;
ost->initialized = 1;
return mux_check_init(mux);
}
int of_write_trailer(OutputFile *of)
{
Muxer *mux = mux_from_of(of);
AVFormatContext *fc = mux->fc;
int ret;
if (!of->header_written) {
av_log(NULL, AV_LOG_ERROR,
"Nothing was written into output file %d (%s), because "
"at least one of its streams received no packets.\n",
of->index, of->ctx->url);
if (!mux->tq) {
av_log(mux, AV_LOG_ERROR,
"Nothing was written into output file, because "
"at least one of its streams received no packets.\n");
return AVERROR(EINVAL);
}
ret = av_write_trailer(of->ctx);
ret = thread_stop(mux);
if (ret < 0)
main_ffmpeg_return_code = ret;
ret = av_write_trailer(fc);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", of->ctx->url, av_err2str(ret));
av_log(mux, AV_LOG_ERROR, "Error writing trailer: %s\n", av_err2str(ret));
return ret;
}
mux->last_filesize = filesize(fc->pb);
if (!(of->format->flags & AVFMT_NOFILE)) {
ret = avio_closep(&fc->pb);
if (ret < 0) {
av_log(mux, AV_LOG_ERROR, "Error closing file: %s\n", av_err2str(ret));
return ret;
}
}
return 0;
}
static void ost_free(OutputStream **post)
{
OutputStream *ost = *post;
MuxStream *ms;
if (!ost)
return;
ms = ms_from_ost(ost);
if (ost->logfile) {
if (fclose(ost->logfile))
av_log(ms, AV_LOG_ERROR,
"Error closing logfile, loss of information possible: %s\n",
av_err2str(AVERROR(errno)));
ost->logfile = NULL;
}
if (ms->muxing_queue) {
AVPacket *pkt;
while (av_fifo_read(ms->muxing_queue, &pkt, 1) >= 0)
av_packet_free(&pkt);
av_fifo_freep2(&ms->muxing_queue);
}
av_bsf_free(&ms->bsf_ctx);
av_frame_free(&ost->filtered_frame);
av_frame_free(&ost->sq_frame);
av_frame_free(&ost->last_frame);
av_packet_free(&ost->pkt);
av_dict_free(&ost->encoder_opts);
av_freep(&ost->kf.pts);
av_expr_free(ost->kf.pexpr);
av_freep(&ost->avfilter);
av_freep(&ost->logfile_prefix);
av_freep(&ost->apad);
#if FFMPEG_OPT_MAP_CHANNEL
av_freep(&ost->audio_channels_map);
ost->audio_channels_mapped = 0;
#endif
av_dict_free(&ost->sws_dict);
av_dict_free(&ost->swr_opts);
if (ost->enc_ctx)
av_freep(&ost->enc_ctx->stats_in);
avcodec_free_context(&ost->enc_ctx);
for (int i = 0; i < ost->enc_stats_pre.nb_components; i++)
av_freep(&ost->enc_stats_pre.components[i].str);
av_freep(&ost->enc_stats_pre.components);
for (int i = 0; i < ost->enc_stats_post.nb_components; i++)
av_freep(&ost->enc_stats_post.components[i].str);
av_freep(&ost->enc_stats_post.components);
for (int i = 0; i < ms->stats.nb_components; i++)
av_freep(&ms->stats.components[i].str);
av_freep(&ms->stats.components);
av_freep(post);
}
static void fc_close(AVFormatContext **pfc)
{
AVFormatContext *fc = *pfc;
if (!fc)
return;
if (!(fc->oformat->flags & AVFMT_NOFILE))
avio_closep(&fc->pb);
avformat_free_context(fc);
*pfc = NULL;
}
void of_close(OutputFile **pof)
{
OutputFile *of = *pof;
AVFormatContext *s;
Muxer *mux;
if (!of)
return;
mux = mux_from_of(of);
thread_stop(mux);
s = of->ctx;
if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
avio_closep(&s->pb);
avformat_free_context(s);
av_dict_free(&of->opts);
sq_free(&of->sq_encode);
sq_free(&mux->sq_mux);
for (int i = 0; i < of->nb_streams; i++)
ost_free(&of->streams[i]);
av_freep(&of->streams);
av_dict_free(&mux->opts);
av_packet_free(&mux->sq_pkt);
fc_close(&mux->fc);
av_freep(pof);
}
int64_t of_filesize(OutputFile *of)
{
Muxer *mux = mux_from_of(of);
return atomic_load(&mux->last_filesize);
}
/*
* Muxer internal APIs - should not be included outside of ffmpeg_mux*
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of ffmpeg_mux.h file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
* - want_sdp made thread-local
* - EncStatsFile declaration migrated from ffmpeg_mux_init.c
* - WARN_MULTIPLE_OPT_USAGE, MATCH_PER_STREAM_OPT, MATCH_PER_TYPE_OPT, SPECIFIER_OPT_FMT declarations migrated from
* ffmpeg.h
* - ms_from_ost migrated to ffmpeg_mux.c
*/
#ifndef FFTOOLS_FFMPEG_MUX_H
#define FFTOOLS_FFMPEG_MUX_H
#include <stdatomic.h>
#include <stdint.h>
#include "fftools_thread_queue.h"
#include "libavformat/avformat.h"
#include "libavcodec/packet.h"
#include "libavutil/dict.h"
#include "libavutil/fifo.h"
#include "libavutil/thread.h"
#define SPECIFIER_OPT_FMT_str "%s"
#define SPECIFIER_OPT_FMT_i "%i"
#define SPECIFIER_OPT_FMT_i64 "%"PRId64
#define SPECIFIER_OPT_FMT_ui64 "%"PRIu64
#define SPECIFIER_OPT_FMT_f "%f"
#define SPECIFIER_OPT_FMT_dbl "%lf"
#define WARN_MULTIPLE_OPT_USAGE(name, type, so, st)\
{\
char namestr[128] = "";\
const char *spec = so->specifier && so->specifier[0] ? so->specifier : "";\
for (int _i = 0; opt_name_##name[_i]; _i++)\
av_strlcatf(namestr, sizeof(namestr), "-%s%s", opt_name_##name[_i], opt_name_##name[_i+1] ? (opt_name_##name[_i+2] ? ", " : " or ") : "");\
av_log(NULL, AV_LOG_WARNING, "Multiple %s options specified for stream %d, only the last option '-%s%s%s "SPECIFIER_OPT_FMT_##type"' will be used.\n",\
namestr, st->index, opt_name_##name[0], spec[0] ? ":" : "", spec, so->u.type);\
}
#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
{\
int _ret, _matches = 0;\
SpecifierOpt *so;\
for (int _i = 0; _i < o->nb_ ## name; _i++) {\
char *spec = o->name[_i].specifier;\
if ((_ret = check_stream_specifier(fmtctx, st, spec)) > 0) {\
outvar = o->name[_i].u.type;\
so = &o->name[_i];\
_matches++;\
} else if (_ret < 0)\
exit_program(1);\
}\
if (_matches > 1)\
WARN_MULTIPLE_OPT_USAGE(name, type, so, st);\
}
#define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\
{\
int i;\
for (i = 0; i < o->nb_ ## name; i++) {\
char *spec = o->name[i].specifier;\
if (!strcmp(spec, mediatype))\
outvar = o->name[i].u.type;\
}\
}
typedef struct MuxStream {
OutputStream ost;
// name used for logging
char log_name[32];
/* the packets are buffered here until the muxer is ready to be initialized */
AVFifo *muxing_queue;
AVBSFContext *bsf_ctx;
EncStats stats;
int64_t max_frames;
/*
* The size of the AVPackets' buffers in queue.
* Updated when a packet is either pushed or pulled from the queue.
*/
size_t muxing_queue_data_size;
int max_muxing_queue_size;
/* Threshold after which max_muxing_queue_size will be in effect */
size_t muxing_queue_data_threshold;
/* dts of the last packet sent to the muxer, in the stream timebase
* used for making up missing dts values */
int64_t last_mux_dts;
} MuxStream;
typedef struct Muxer {
OutputFile of;
// name used for logging
char log_name[32];
AVFormatContext *fc;
pthread_t thread;
ThreadQueue *tq;
AVDictionary *opts;
int thread_queue_size;
/* filesize limit expressed in bytes */
int64_t limit_filesize;
atomic_int_least64_t last_filesize;
int header_written;
SyncQueue *sq_mux;
AVPacket *sq_pkt;
} Muxer;
typedef struct EncStatsFile {
char *path;
AVIOContext *io;
} EncStatsFile;
/* whether we want to print an SDP, set in of_open() */
extern __thread int want_sdp;
int mux_check_init(Muxer *mux);
#endif /* FFTOOLS_FFMPEG_MUX_H */
/*
* Muxer/output file setup.
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of ffmpeg_mux_init.c file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
* - EncStatsFile declaration migrated to ffmpeg_mux.h
* - extern ms_from_ost added
* - "class" member field renamed as clazz
*/
#include <string.h>
#include "fftools_cmdutils.h"
#include "fftools_ffmpeg.h"
#include "fftools_ffmpeg_mux.h"
#include "fftools_fopen_utf8.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libavfilter/avfilter.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/avutil.h"
#include "libavutil/bprint.h"
#include "libavutil/dict.h"
#include "libavutil/getenv_utf8.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
static const char *const opt_name_apad[] = {"apad", NULL};
static const char *const opt_name_autoscale[] = {"autoscale", NULL};
static const char *const opt_name_bits_per_raw_sample[] = {"bits_per_raw_sample", NULL};
static const char *const opt_name_bitstream_filters[] = {"bsf", "absf", "vbsf", NULL};
static const char *const opt_name_copy_initial_nonkeyframes[] = {"copyinkf", NULL};
static const char *const opt_name_copy_prior_start[] = {"copypriorss", NULL};
static const char *const opt_name_disposition[] = {"disposition", NULL};
static const char *const opt_name_enc_time_bases[] = {"enc_time_base", NULL};
static const char *const opt_name_enc_stats_pre[] = {"enc_stats_pre", NULL};
static const char *const opt_name_enc_stats_post[] = {"enc_stats_post", NULL};
static const char *const opt_name_mux_stats[] = {"mux_stats", NULL};
static const char *const opt_name_enc_stats_pre_fmt[] = {"enc_stats_pre_fmt", NULL};
static const char *const opt_name_enc_stats_post_fmt[] = {"enc_stats_post_fmt", NULL};
static const char *const opt_name_mux_stats_fmt[] = {"mux_stats_fmt", NULL};
static const char *const opt_name_filters[] = {"filter", "af", "vf", NULL};
static const char *const opt_name_filter_scripts[] = {"filter_script", NULL};
static const char *const opt_name_fix_sub_duration_heartbeat[] = {"fix_sub_duration_heartbeat", NULL};
static const char *const opt_name_fps_mode[] = {"fps_mode", NULL};
static const char *const opt_name_force_fps[] = {"force_fps", NULL};
static const char *const opt_name_forced_key_frames[] = {"forced_key_frames", NULL};
static const char *const opt_name_frame_aspect_ratios[] = {"aspect", NULL};
static const char *const opt_name_intra_matrices[] = {"intra_matrix", NULL};
static const char *const opt_name_inter_matrices[] = {"inter_matrix", NULL};
static const char *const opt_name_chroma_intra_matrices[] = {"chroma_intra_matrix", NULL};
static const char *const opt_name_max_frame_rates[] = {"fpsmax", NULL};
static const char *const opt_name_max_frames[] = {"frames", "aframes", "vframes", "dframes", NULL};
static const char *const opt_name_max_muxing_queue_size[] = {"max_muxing_queue_size", NULL};
static const char *const opt_name_muxing_queue_data_threshold[] = {"muxing_queue_data_threshold", NULL};
static const char *const opt_name_pass[] = {"pass", NULL};
static const char *const opt_name_passlogfiles[] = {"passlogfile", NULL};
static const char *const opt_name_presets[] = {"pre", "apre", "vpre", "spre", NULL};
static const char *const opt_name_qscale[] = {"q", "qscale", NULL};
static const char *const opt_name_rc_overrides[] = {"rc_override", NULL};
static const char *const opt_name_time_bases[] = {"time_base", NULL};
static const char *const opt_name_audio_channels[] = {"ac", NULL};
static const char *const opt_name_audio_ch_layouts[] = {"channel_layout", "ch_layout", NULL};
static const char *const opt_name_audio_sample_rate[] = {"ar", NULL};
static const char *const opt_name_frame_sizes[] = {"s", NULL};
static const char *const opt_name_frame_pix_fmts[] = {"pix_fmt", NULL};
static const char *const opt_name_sample_fmts[] = {"sample_fmt", NULL};
extern MuxStream *ms_from_ost(OutputStream *ost);
static int check_opt_bitexact(void *ctx, const AVDictionary *opts,
const char *opt_name, int flag)
{
const AVDictionaryEntry *e = av_dict_get(opts, opt_name, NULL, 0);
if (e) {
const AVOption *o = av_opt_find(ctx, opt_name, NULL, 0, 0);
int val = 0;
if (!o)
return 0;
av_opt_eval_flags(ctx, o, e->value, &val);
return !!(val & flag);
}
return 0;
}
static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
OutputStream *ost, const AVCodec **enc)
{
enum AVMediaType type = ost->st->codecpar->codec_type;
char *codec_name = NULL;
*enc = NULL;
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codecpar->codec_id = av_guess_codec(s->oformat, NULL, s->url,
NULL, ost->st->codecpar->codec_type);
*enc = avcodec_find_encoder(ost->st->codecpar->codec_id);
if (!*enc) {
av_log(ost, AV_LOG_FATAL, "Automatic encoder selection failed "
"Default encoder for format %s (codec %s) is "
"probably disabled. Please choose an encoder manually.\n",
s->oformat->name, avcodec_get_name(ost->st->codecpar->codec_id));
return AVERROR_ENCODER_NOT_FOUND;
}
} else if (strcmp(codec_name, "copy")) {
*enc = find_codec_or_die(ost, codec_name, ost->st->codecpar->codec_type, 1);
ost->st->codecpar->codec_id = (*enc)->id;
}
}
return 0;
}
static char *get_line(AVIOContext *s, AVBPrint *bprint)
{
char c;
while ((c = avio_r8(s)) && c != '\n')
av_bprint_chars(bprint, c, 1);
if (!av_bprint_is_complete(bprint))
report_and_exit(AVERROR(ENOMEM));
return bprint->str;
}
static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
{
int i, ret = -1;
char filename[1000];
char *env_avconv_datadir = getenv_utf8("AVCONV_DATADIR");
char *env_home = getenv_utf8("HOME");
const char *base[3] = { env_avconv_datadir,
env_home,
AVCONV_DATADIR,
};
for (i = 0; i < FF_ARRAY_ELEMS(base) && ret < 0; i++) {
if (!base[i])
continue;
if (codec_name) {
snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
i != 1 ? "" : "/.avconv", codec_name, preset_name);
ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
}
if (ret < 0) {
snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
i != 1 ? "" : "/.avconv", preset_name);
ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
}
}
freeenv_utf8(env_home);
freeenv_utf8(env_avconv_datadir);
return ret;
}
__thread EncStatsFile *enc_stats_files;
__thread int nb_enc_stats_files;
static int enc_stats_get_file(AVIOContext **io, const char *path)
{
EncStatsFile *esf;
int ret;
for (int i = 0; i < nb_enc_stats_files; i++)
if (!strcmp(path, enc_stats_files[i].path)) {
*io = enc_stats_files[i].io;
return 0;
}
GROW_ARRAY(enc_stats_files, nb_enc_stats_files);
esf = &enc_stats_files[nb_enc_stats_files - 1];
ret = avio_open2(&esf->io, path, AVIO_FLAG_WRITE, &int_cb, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error opening stats file '%s': %s\n",
path, av_err2str(ret));
return ret;
}
esf->path = av_strdup(path);
if (!esf->path)
return AVERROR(ENOMEM);
*io = esf->io;
return 0;
}
void of_enc_stats_close(void)
{
for (int i = 0; i < nb_enc_stats_files; i++) {
av_freep(&enc_stats_files[i].path);
avio_closep(&enc_stats_files[i].io);
}
av_freep(&enc_stats_files);
nb_enc_stats_files = 0;
}
static int unescape(char **pdst, size_t *dst_len,
const char **pstr, char delim)
{
const char *str = *pstr;
char *dst;
size_t len, idx;
*pdst = NULL;
len = strlen(str);
if (!len)
return 0;
dst = av_malloc(len + 1);
if (!dst)
return AVERROR(ENOMEM);
for (idx = 0; *str; idx++, str++) {
if (str[0] == '\\' && str[1])
str++;
else if (*str == delim)
break;
dst[idx] = *str;
}
if (!idx) {
av_freep(&dst);
return 0;
}
dst[idx] = 0;
*pdst = dst;
*dst_len = idx;
*pstr = str;
return 0;
}
static int enc_stats_init(OutputStream *ost, EncStats *es, int pre,
const char *path, const char *fmt_spec)
{
static const struct {
enum EncStatsType type;
const char *str;
int pre_only:1;
int post_only:1;
int need_input_data:1;
} fmt_specs[] = {
{ ENC_STATS_FILE_IDX, "fidx" },
{ ENC_STATS_STREAM_IDX, "sidx" },
{ ENC_STATS_FRAME_NUM, "n" },
{ ENC_STATS_FRAME_NUM_IN, "ni", 0, 0, 1 },
{ ENC_STATS_TIMEBASE, "tb" },
{ ENC_STATS_TIMEBASE_IN, "tbi", 0, 0, 1 },
{ ENC_STATS_PTS, "pts" },
{ ENC_STATS_PTS_TIME, "t" },
{ ENC_STATS_PTS_IN, "ptsi", 0, 0, 1 },
{ ENC_STATS_PTS_TIME_IN, "ti", 0, 0, 1 },
{ ENC_STATS_DTS, "dts", 0, 1 },
{ ENC_STATS_DTS_TIME, "dt", 0, 1 },
{ ENC_STATS_SAMPLE_NUM, "sn", 1 },
{ ENC_STATS_NB_SAMPLES, "samp", 1 },
{ ENC_STATS_PKT_SIZE, "size", 0, 1 },
{ ENC_STATS_BITRATE, "br", 0, 1 },
{ ENC_STATS_AVG_BITRATE, "abr", 0, 1 },
};
const char *next = fmt_spec;
int ret;
while (*next) {
EncStatsComponent *c;
char *val;
size_t val_len;
// get the sequence up until next opening brace
ret = unescape(&val, &val_len, &next, '{');
if (ret < 0)
return ret;
if (val) {
GROW_ARRAY(es->components, es->nb_components);
c = &es->components[es->nb_components - 1];
c->type = ENC_STATS_LITERAL;
c->str = val;
c->str_len = val_len;
}
if (!*next)
break;
next++;
// get the part inside braces
ret = unescape(&val, &val_len, &next, '}');
if (ret < 0)
return ret;
if (!val) {
av_log(NULL, AV_LOG_ERROR,
"Empty formatting directive in: %s\n", fmt_spec);
return AVERROR(EINVAL);
}
if (!*next) {
av_log(NULL, AV_LOG_ERROR,
"Missing closing brace in: %s\n", fmt_spec);
ret = AVERROR(EINVAL);
goto fail;
}
next++;
GROW_ARRAY(es->components, es->nb_components);
c = &es->components[es->nb_components - 1];
for (size_t i = 0; i < FF_ARRAY_ELEMS(fmt_specs); i++) {
if (!strcmp(val, fmt_specs[i].str)) {
if ((pre && fmt_specs[i].post_only) || (!pre && fmt_specs[i].pre_only)) {
av_log(NULL, AV_LOG_ERROR,
"Format directive '%s' may only be used %s-encoding\n",
val, pre ? "post" : "pre");
ret = AVERROR(EINVAL);
goto fail;
}
c->type = fmt_specs[i].type;
if (fmt_specs[i].need_input_data) {
if (ost->ist)
ost->ist->want_frame_data = 1;
else {
av_log(ost, AV_LOG_WARNING,
"Format directive '%s' is unavailable, because "
"this output stream has no associated input stream\n",
val);
}
}
break;
}
}
if (!c->type) {
av_log(NULL, AV_LOG_ERROR, "Invalid format directive: %s\n", val);
ret = AVERROR(EINVAL);
goto fail;
}
fail:
av_freep(&val);
if (ret < 0)
return ret;
}
ret = enc_stats_get_file(&es->io, path);
if (ret < 0)
return ret;
return 0;
}
static const char *output_stream_item_name(void *obj)
{
const MuxStream *ms = obj;
return ms->log_name;
}
static const AVClass output_stream_class = {
.class_name = "OutputStream",
.version = LIBAVUTIL_VERSION_INT,
.item_name = output_stream_item_name,
.category = AV_CLASS_CATEGORY_MUXER,
};
static MuxStream *mux_stream_alloc(Muxer *mux, enum AVMediaType type)
{
const char *type_str = av_get_media_type_string(type);
MuxStream *ms = allocate_array_elem(&mux->of.streams, sizeof(*ms),
&mux->of.nb_streams);
ms->ost.file_index = mux->of.index;
ms->ost.index = mux->of.nb_streams - 1;
ms->ost.clazz = &output_stream_class;
snprintf(ms->log_name, sizeof(ms->log_name), "%cost#%d:%d",
type_str ? *type_str : '?', mux->of.index, ms->ost.index);
return ms;
}
static OutputStream *new_output_stream(Muxer *mux, const OptionsContext *o,
enum AVMediaType type, InputStream *ist)
{
AVFormatContext *oc = mux->fc;
MuxStream *ms;
OutputStream *ost;
const AVCodec *enc;
AVStream *st = avformat_new_stream(oc, NULL);
int ret = 0;
const char *bsfs = NULL, *time_base = NULL;
char *next, *codec_tag = NULL;
double qscale = -1;
int i;
if (!st)
report_and_exit(AVERROR(ENOMEM));
if (oc->nb_streams - 1 < o->nb_streamid_map)
st->id = o->streamid_map[oc->nb_streams - 1];
ms = mux_stream_alloc(mux, type);
ost = &ms->ost;
ms->muxing_queue = av_fifo_alloc2(8, sizeof(AVPacket*), 0);
if (!ms->muxing_queue)
report_and_exit(AVERROR(ENOMEM));
ms->last_mux_dts = AV_NOPTS_VALUE;
ost->st = st;
ost->ist = ist;
ost->kf.ref_pts = AV_NOPTS_VALUE;
st->codecpar->codec_type = type;
ret = choose_encoder(o, oc, ost, &enc);
if (ret < 0) {
av_log(ost, AV_LOG_FATAL, "Error selecting an encoder\n");
exit_program(1);
}
if (enc) {
ost->enc_ctx = avcodec_alloc_context3(enc);
if (!ost->enc_ctx)
report_and_exit(AVERROR(ENOMEM));
av_strlcat(ms->log_name, "/", sizeof(ms->log_name));
av_strlcat(ms->log_name, enc->name, sizeof(ms->log_name));
} else {
av_strlcat(ms->log_name, "/copy", sizeof(ms->log_name));
}
ost->filtered_frame = av_frame_alloc();
if (!ost->filtered_frame)
report_and_exit(AVERROR(ENOMEM));
ost->pkt = av_packet_alloc();
if (!ost->pkt)
report_and_exit(AVERROR(ENOMEM));
if (ost->enc_ctx) {
AVCodecContext *enc = ost->enc_ctx;
AVIOContext *s = NULL;
char *buf = NULL, *arg = NULL, *preset = NULL;
const char *enc_stats_pre = NULL, *enc_stats_post = NULL, *mux_stats = NULL;
ost->encoder_opts = filter_codec_opts(o->g->codec_opts, enc->codec_id,
oc, st, enc->codec);
MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
ost->autoscale = 1;
MATCH_PER_STREAM_OPT(autoscale, i, ost->autoscale, oc, st);
if (preset && (!(ret = get_preset_file_2(preset, enc->codec->name, &s)))) {
AVBPrint bprint;
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
do {
av_bprint_clear(&bprint);
buf = get_line(s, &bprint);
if (!buf[0] || buf[0] == '#')
continue;
if (!(arg = strchr(buf, '='))) {
av_log(ost, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
exit_program(1);
}
*arg++ = 0;
av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
} while (!s->eof_reached);
av_bprint_finalize(&bprint, NULL);
avio_closep(&s);
}
if (ret) {
av_log(ost, AV_LOG_FATAL,
"Preset %s specified, but could not be opened.\n", preset);
exit_program(1);
}
MATCH_PER_STREAM_OPT(enc_stats_pre, str, enc_stats_pre, oc, st);
if (enc_stats_pre &&
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
const char *format = "{fidx} {sidx} {n} {t}";
MATCH_PER_STREAM_OPT(enc_stats_pre_fmt, str, format, oc, st);
ret = enc_stats_init(ost, &ost->enc_stats_pre, 1, enc_stats_pre, format);
if (ret < 0)
exit_program(1);
}
MATCH_PER_STREAM_OPT(enc_stats_post, str, enc_stats_post, oc, st);
if (enc_stats_post &&
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
const char *format = "{fidx} {sidx} {n} {t}";
MATCH_PER_STREAM_OPT(enc_stats_post_fmt, str, format, oc, st);
ret = enc_stats_init(ost, &ost->enc_stats_post, 0, enc_stats_post, format);
if (ret < 0)
exit_program(1);
}
MATCH_PER_STREAM_OPT(mux_stats, str, mux_stats, oc, st);
if (mux_stats &&
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
const char *format = "{fidx} {sidx} {n} {t}";
MATCH_PER_STREAM_OPT(mux_stats_fmt, str, format, oc, st);
ret = enc_stats_init(ost, &ms->stats, 0, mux_stats, format);
if (ret < 0)
exit_program(1);
}
} else {
ost->encoder_opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
}
if (o->bitexact) {
ost->bitexact = 1;
} else if (ost->enc_ctx) {
ost->bitexact = check_opt_bitexact(ost->enc_ctx, ost->encoder_opts, "flags",
AV_CODEC_FLAG_BITEXACT);
}
MATCH_PER_STREAM_OPT(time_bases, str, time_base, oc, st);
if (time_base) {
AVRational q;
if (av_parse_ratio(&q, time_base, INT_MAX, 0, NULL) < 0 ||
q.num <= 0 || q.den <= 0) {
av_log(ost, AV_LOG_FATAL, "Invalid time base: %s\n", time_base);
exit_program(1);
}
st->time_base = q;
}
MATCH_PER_STREAM_OPT(enc_time_bases, str, time_base, oc, st);
if (time_base) {
AVRational q;
if (av_parse_ratio(&q, time_base, INT_MAX, 0, NULL) < 0 ||
q.den <= 0) {
av_log(ost, AV_LOG_FATAL, "Invalid time base: %s\n", time_base);
exit_program(1);
}
ost->enc_timebase = q;
}
ms->max_frames = INT64_MAX;
MATCH_PER_STREAM_OPT(max_frames, i64, ms->max_frames, oc, st);
for (i = 0; i<o->nb_max_frames; i++) {
char *p = o->max_frames[i].specifier;
if (!*p && type != AVMEDIA_TYPE_VIDEO) {
av_log(ost, AV_LOG_WARNING, "Applying unspecific -frames to non video streams, maybe you meant -vframes ?\n");
break;
}
}
ost->copy_prior_start = -1;
MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st);
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st);
if (bsfs && *bsfs) {
ret = av_bsf_list_parse_str(bsfs, &ms->bsf_ctx);
if (ret < 0) {
av_log(ost, AV_LOG_ERROR, "Error parsing bitstream filter sequence '%s': %s\n", bsfs, av_err2str(ret));
exit_program(1);
}
}
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
if (codec_tag) {
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
ost->st->codecpar->codec_tag = tag;
if (ost->enc_ctx)
ost->enc_ctx->codec_tag = tag;
}
MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
if (ost->enc_ctx && qscale >= 0) {
ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
}
ms->max_muxing_queue_size = 128;
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ms->max_muxing_queue_size, oc, st);
ms->muxing_queue_data_threshold = 50*1024*1024;
MATCH_PER_STREAM_OPT(muxing_queue_data_threshold, i, ms->muxing_queue_data_threshold, oc, st);
MATCH_PER_STREAM_OPT(bits_per_raw_sample, i, ost->bits_per_raw_sample,
oc, st);
MATCH_PER_STREAM_OPT(fix_sub_duration_heartbeat, i, ost->fix_sub_duration_heartbeat,
oc, st);
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc_ctx)
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_dict_copy(&ost->sws_dict, o->g->sws_dict, 0);
av_dict_copy(&ost->swr_opts, o->g->swr_opts, 0);
if (ost->enc_ctx && av_get_exact_bits_per_sample(ost->enc_ctx->codec_id) == 24)
av_dict_set(&ost->swr_opts, "output_sample_bits", "24", 0);
if (ost->ist) {
ost->ist->discard = 0;
ost->ist->st->discard = ost->ist->user_set_discard;
}
ost->last_mux_dts = AV_NOPTS_VALUE;
ost->last_filter_pts = AV_NOPTS_VALUE;
MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i,
ost->copy_initial_nonkeyframes, oc, st);
return ost;
}
static char *get_ost_filters(const OptionsContext *o, AVFormatContext *oc,
OutputStream *ost)
{
AVStream *st = ost->st;
if (ost->filters_script && ost->filters) {
av_log(ost, AV_LOG_ERROR, "Both -filter and -filter_script set\n");
exit_program(1);
}
if (ost->filters_script)
return file_read(ost->filters_script);
else if (ost->filters)
return av_strdup(ost->filters);
return av_strdup(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ?
"null" : "anull");
}
static void check_streamcopy_filters(const OptionsContext *o, AVFormatContext *oc,
OutputStream *ost, enum AVMediaType type)
{
if (ost->filters_script || ost->filters) {
av_log(ost, AV_LOG_ERROR,
"%s '%s' was defined, but codec copy was selected.\n"
"Filtering and streamcopy cannot be used together.\n",
ost->filters ? "Filtergraph" : "Filtergraph script",
ost->filters ? ost->filters : ost->filters_script);
exit_program(1);
}
}
static void parse_matrix_coeffs(void *logctx, uint16_t *dest, const char *str)
{
int i;
const char *p = str;
for (i = 0;; i++) {
dest[i] = atoi(p);
if (i == 63)
break;
p = strchr(p, ',');
if (!p) {
av_log(logctx, AV_LOG_FATAL,
"Syntax error in matrix \"%s\" at coeff %d\n", str, i);
exit_program(1);
}
p++;
}
}
static OutputStream *new_video_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
AVFormatContext *oc = mux->fc;
AVStream *st;
OutputStream *ost;
char *frame_rate = NULL, *max_frame_rate = NULL, *frame_aspect_ratio = NULL;
ost = new_output_stream(mux, o, AVMEDIA_TYPE_VIDEO, ist);
st = ost->st;
MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
exit_program(1);
}
MATCH_PER_STREAM_OPT(max_frame_rates, str, max_frame_rate, oc, st);
if (max_frame_rate && av_parse_video_rate(&ost->max_frame_rate, max_frame_rate) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid maximum framerate value: %s\n", max_frame_rate);
exit_program(1);
}
if (frame_rate && max_frame_rate) {
av_log(ost, AV_LOG_ERROR, "Only one of -fpsmax and -r can be set for a stream.\n");
exit_program(1);
}
MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
if (frame_aspect_ratio) {
AVRational q;
if (av_parse_ratio(&q, frame_aspect_ratio, 255, 0, NULL) < 0 ||
q.num <= 0 || q.den <= 0) {
av_log(ost, AV_LOG_FATAL, "Invalid aspect ratio: %s\n", frame_aspect_ratio);
exit_program(1);
}
ost->frame_aspect_ratio = q;
}
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
if (ost->enc_ctx) {
AVCodecContext *video_enc = ost->enc_ctx;
const char *p = NULL, *fps_mode = NULL;
char *frame_size = NULL;
char *frame_pix_fmt = NULL;
char *intra_matrix = NULL, *inter_matrix = NULL;
char *chroma_intra_matrix = NULL;
int do_pass = 0;
int i;
MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
exit_program(1);
}
MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
if (frame_pix_fmt && *frame_pix_fmt == '+') {
ost->keep_pix_fmt = 1;
if (!*++frame_pix_fmt)
frame_pix_fmt = NULL;
}
if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == AV_PIX_FMT_NONE) {
av_log(ost, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
exit_program(1);
}
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
if (intra_matrix) {
if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64)))
report_and_exit(AVERROR(ENOMEM));
parse_matrix_coeffs(ost, video_enc->intra_matrix, intra_matrix);
}
MATCH_PER_STREAM_OPT(chroma_intra_matrices, str, chroma_intra_matrix, oc, st);
if (chroma_intra_matrix) {
uint16_t *p = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64);
if (!p)
report_and_exit(AVERROR(ENOMEM));
video_enc->chroma_intra_matrix = p;
parse_matrix_coeffs(ost, p, chroma_intra_matrix);
}
MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
if (inter_matrix) {
if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64)))
report_and_exit(AVERROR(ENOMEM));
parse_matrix_coeffs(ost, video_enc->inter_matrix, inter_matrix);
}
MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
for (i = 0; p; i++) {
int start, end, q;
int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
if (e != 3) {
av_log(ost, AV_LOG_FATAL, "error parsing rc_override\n");
exit_program(1);
}
video_enc->rc_override =
av_realloc_array(video_enc->rc_override,
i + 1, sizeof(RcOverride));
if (!video_enc->rc_override) {
av_log(ost, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
exit_program(1);
}
video_enc->rc_override[i].start_frame = start;
video_enc->rc_override[i].end_frame = end;
if (q > 0) {
video_enc->rc_override[i].qscale = q;
video_enc->rc_override[i].quality_factor = 1.0;
}
else {
video_enc->rc_override[i].qscale = 0;
video_enc->rc_override[i].quality_factor = -q/100.0;
}
p = strchr(p, '/');
if (p) p++;
}
video_enc->rc_override_count = i;
#if FFMPEG_OPT_PSNR
if (do_psnr) {
av_log(ost, AV_LOG_WARNING, "The -psnr option is deprecated, use -flags +psnr\n");
video_enc->flags|= AV_CODEC_FLAG_PSNR;
}
#endif
/* two pass mode */
MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st);
if (do_pass) {
if (do_pass & 1) {
video_enc->flags |= AV_CODEC_FLAG_PASS1;
av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
}
if (do_pass & 2) {
video_enc->flags |= AV_CODEC_FLAG_PASS2;
av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
}
}
MATCH_PER_STREAM_OPT(passlogfiles, str, ost->logfile_prefix, oc, st);
if (ost->logfile_prefix &&
!(ost->logfile_prefix = av_strdup(ost->logfile_prefix)))
report_and_exit(AVERROR(ENOMEM));
if (do_pass) {
int ost_idx = -1;
char logfilename[1024];
FILE *f;
/* compute this stream's global index */
for (int i = 0; i <= ost->file_index; i++)
ost_idx += output_files[i]->nb_streams;
snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
ost->logfile_prefix ? ost->logfile_prefix :
DEFAULT_PASS_LOGFILENAME_PREFIX,
ost_idx);
if (!strcmp(ost->enc_ctx->codec->name, "libx264")) {
av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
} else {
if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
char *logbuffer = file_read(logfilename);
if (!logbuffer) {
av_log(ost, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
logfilename);
exit_program(1);
}
video_enc->stats_in = logbuffer;
}
if (video_enc->flags & AV_CODEC_FLAG_PASS1) {
f = fopen_utf8(logfilename, "wb");
if (!f) {
av_log(ost, AV_LOG_FATAL,
"Cannot write log file '%s' for pass-1 encoding: %s\n",
logfilename, strerror(errno));
exit_program(1);
}
ost->logfile = f;
}
}
}
MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
ost->top_field_first = -1;
MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
ost->vsync_method = video_sync_method;
MATCH_PER_STREAM_OPT(fps_mode, str, fps_mode, oc, st);
if (fps_mode)
parse_and_set_vsync(fps_mode, &ost->vsync_method, ost->file_index, ost->index, 0);
if ((ost->frame_rate.num || ost->max_frame_rate.num) &&
!(ost->vsync_method == VSYNC_AUTO ||
ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR)) {
av_log(ost, AV_LOG_FATAL, "One of -r/-fpsmax was specified "
"together a non-CFR -vsync/-fps_mode. This is contradictory.\n");
exit_program(1);
}
if (ost->vsync_method == VSYNC_AUTO) {
if (ost->frame_rate.num || ost->max_frame_rate.num) {
ost->vsync_method = VSYNC_CFR;
} else if (!strcmp(oc->oformat->name, "avi")) {
ost->vsync_method = VSYNC_VFR;
} else {
ost->vsync_method = (oc->oformat->flags & AVFMT_VARIABLE_FPS) ?
((oc->oformat->flags & AVFMT_NOTIMESTAMPS) ?
VSYNC_PASSTHROUGH : VSYNC_VFR) :
VSYNC_CFR;
}
if (ost->ist && ost->vsync_method == VSYNC_CFR) {
const InputFile *ifile = input_files[ost->ist->file_index];
if (ifile->nb_streams == 1 && ifile->input_ts_offset == 0)
ost->vsync_method = VSYNC_VSCFR;
}
if (ost->vsync_method == VSYNC_CFR && copy_ts) {
ost->vsync_method = VSYNC_VSCFR;
}
}
ost->is_cfr = (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR);
ost->avfilter = get_ost_filters(o, oc, ost);
if (!ost->avfilter)
exit_program(1);
ost->last_frame = av_frame_alloc();
if (!ost->last_frame)
report_and_exit(AVERROR(ENOMEM));
} else
check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_VIDEO);
return ost;
}
static OutputStream *new_audio_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
AVFormatContext *oc = mux->fc;
AVStream *st;
OutputStream *ost;
ost = new_output_stream(mux, o, AVMEDIA_TYPE_AUDIO, ist);
st = ost->st;
MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
MATCH_PER_STREAM_OPT(filters, str, ost->filters, oc, st);
if (ost->enc_ctx) {
AVCodecContext *audio_enc = ost->enc_ctx;
int channels = 0;
char *layout = NULL;
char *sample_fmt = NULL;
MATCH_PER_STREAM_OPT(audio_channels, i, channels, oc, st);
if (channels) {
audio_enc->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
audio_enc->ch_layout.nb_channels = channels;
}
MATCH_PER_STREAM_OPT(audio_ch_layouts, str, layout, oc, st);
if (layout) {
if (av_channel_layout_from_string(&audio_enc->ch_layout, layout) < 0) {
#if FF_API_OLD_CHANNEL_LAYOUT
uint64_t mask;
AV_NOWARN_DEPRECATED({
mask = av_get_channel_layout(layout);
})
if (!mask) {
#endif
av_log(ost, AV_LOG_FATAL, "Unknown channel layout: %s\n", layout);
exit_program(1);
#if FF_API_OLD_CHANNEL_LAYOUT
}
av_log(ost, AV_LOG_WARNING, "Channel layout '%s' uses a deprecated syntax.\n",
layout);
av_channel_layout_from_mask(&audio_enc->ch_layout, mask);
#endif
}
}
MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
if (sample_fmt &&
(audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
av_log(ost, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
exit_program(1);
}
MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
MATCH_PER_STREAM_OPT(apad, str, ost->apad, oc, st);
ost->apad = av_strdup(ost->apad);
ost->avfilter = get_ost_filters(o, oc, ost);
if (!ost->avfilter)
exit_program(1);
#if FFMPEG_OPT_MAP_CHANNEL
/* check for channel mapping for this audio stream */
for (int n = 0; n < o->nb_audio_channel_maps; n++) {
AudioChannelMap *map = &o->audio_channel_maps[n];
if ((map->ofile_idx == -1 || ost->file_index == map->ofile_idx) &&
(map->ostream_idx == -1 || ost->st->index == map->ostream_idx)) {
InputStream *ist;
if (map->channel_idx == -1) {
ist = NULL;
} else if (!ost->ist) {
av_log(ost, AV_LOG_FATAL, "Cannot determine input stream for channel mapping %d.%d\n",
ost->file_index, ost->st->index);
continue;
} else {
ist = ost->ist;
}
if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
if (av_reallocp_array(&ost->audio_channels_map,
ost->audio_channels_mapped + 1,
sizeof(*ost->audio_channels_map)
) < 0 )
report_and_exit(AVERROR(ENOMEM));
ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
}
}
}
#endif
} else
check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_AUDIO);
return ost;
}
static OutputStream *new_data_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
OutputStream *ost;
ost = new_output_stream(mux, o, AVMEDIA_TYPE_DATA, ist);
if (ost->enc_ctx) {
av_log(ost, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
exit_program(1);
}
return ost;
}
static OutputStream *new_unknown_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
OutputStream *ost;
ost = new_output_stream(mux, o, AVMEDIA_TYPE_UNKNOWN, ist);
if (ost->enc_ctx) {
av_log(ost, AV_LOG_FATAL, "Unknown stream encoding not supported yet (only streamcopy)\n");
exit_program(1);
}
return ost;
}
static OutputStream *new_attachment_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
OutputStream *ost = new_output_stream(mux, o, AVMEDIA_TYPE_ATTACHMENT, ist);
ost->finished = 1;
return ost;
}
static OutputStream *new_subtitle_stream(Muxer *mux, const OptionsContext *o, InputStream *ist)
{
AVStream *st;
OutputStream *ost;
ost = new_output_stream(mux, o, AVMEDIA_TYPE_SUBTITLE, ist);
st = ost->st;
if (ost->enc_ctx) {
AVCodecContext *subtitle_enc = ost->enc_ctx;
char *frame_size = NULL;
MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, mux->fc, st);
if (frame_size && av_parse_video_size(&subtitle_enc->width, &subtitle_enc->height, frame_size) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
exit_program(1);
}
}
return ost;
}
static void init_output_filter(OutputFilter *ofilter, const OptionsContext *o,
Muxer *mux)
{
OutputStream *ost;
switch (ofilter->type) {
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(mux, o, NULL); break;
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(mux, o, NULL); break;
default:
av_log(mux, AV_LOG_FATAL, "Only video and audio filters are supported "
"currently.\n");
exit_program(1);
}
ost->filter = ofilter;
ofilter->ost = ost;
ofilter->format = -1;
if (!ost->enc_ctx) {
av_log(ost, AV_LOG_ERROR, "Streamcopy requested for output stream fed "
"from a complex filtergraph. Filtering and streamcopy "
"cannot be used together.\n");
exit_program(1);
}
if (ost->avfilter && (ost->filters || ost->filters_script)) {
const char *opt = ost->filters ? "-vf/-af/-filter" : "-filter_script";
av_log(ost, AV_LOG_ERROR,
"%s '%s' was specified through the %s option "
"for output stream %d:%d, which is fed from a complex filtergraph.\n"
"%s and -filter_complex cannot be used together for the same stream.\n",
ost->filters ? "Filtergraph" : "Filtergraph script",
ost->filters ? ost->filters : ost->filters_script,
opt, ost->file_index, ost->index, opt);
exit_program(1);
}
avfilter_inout_free(&ofilter->out_tmp);
}
static void map_auto_video(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
InputStream *best_ist = NULL;
int best_score = 0;
int qcr;
/* video: highest resolution */
if (av_guess_codec(oc->oformat, NULL, oc->url, NULL, AVMEDIA_TYPE_VIDEO) == AV_CODEC_ID_NONE)
return;
qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
for (int j = 0; j < nb_input_files; j++) {
InputFile *ifile = input_files[j];
InputStream *file_best_ist = NULL;
int file_best_score = 0;
for (int i = 0; i < ifile->nb_streams; i++) {
InputStream *ist = ifile->streams[i];
int score;
if (ist->user_set_discard == AVDISCARD_ALL ||
ist->st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
score = ist->st->codecpar->width * ist->st->codecpar->height
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
score = 1;
if (score > file_best_score) {
if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
continue;
file_best_score = score;
file_best_ist = ist;
}
}
if (file_best_ist) {
if((qcr == MKTAG('A', 'P', 'I', 'C')) ||
!(file_best_ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
file_best_score -= 5000000*!!(file_best_ist->st->disposition & AV_DISPOSITION_DEFAULT);
if (file_best_score > best_score) {
best_score = file_best_score;
best_ist = file_best_ist;
}
}
}
if (best_ist)
new_video_stream(mux, o, best_ist);
}
static void map_auto_audio(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
InputStream *best_ist = NULL;
int best_score = 0;
/* audio: most channels */
if (av_guess_codec(oc->oformat, NULL, oc->url, NULL, AVMEDIA_TYPE_AUDIO) == AV_CODEC_ID_NONE)
return;
for (int j = 0; j < nb_input_files; j++) {
InputFile *ifile = input_files[j];
InputStream *file_best_ist = NULL;
int file_best_score = 0;
for (int i = 0; i < ifile->nb_streams; i++) {
InputStream *ist = ifile->streams[i];
int score;
if (ist->user_set_discard == AVDISCARD_ALL ||
ist->st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
score = ist->st->codecpar->ch_layout.nb_channels
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
if (score > file_best_score) {
file_best_score = score;
file_best_ist = ist;
}
}
if (file_best_ist) {
file_best_score -= 5000000*!!(file_best_ist->st->disposition & AV_DISPOSITION_DEFAULT);
if (file_best_score > best_score) {
best_score = file_best_score;
best_ist = file_best_ist;
}
}
}
if (best_ist)
new_audio_stream(mux, o, best_ist);
}
static void map_auto_subtitle(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
char *subtitle_codec_name = NULL;
/* subtitles: pick first */
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
if (!avcodec_find_encoder(oc->oformat->subtitle_codec) && !subtitle_codec_name)
return;
for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
AVCodecDescriptor const *input_descriptor =
avcodec_descriptor_get(ist->st->codecpar->codec_id);
AVCodecDescriptor const *output_descriptor = NULL;
AVCodec const *output_codec =
avcodec_find_encoder(oc->oformat->subtitle_codec);
int input_props = 0, output_props = 0;
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
if (output_codec)
output_descriptor = avcodec_descriptor_get(output_codec->id);
if (input_descriptor)
input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
if (output_descriptor)
output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
if (subtitle_codec_name ||
input_props & output_props ||
// Map dvb teletext which has neither property to any output subtitle encoder
(input_descriptor && output_descriptor &&
(!input_descriptor->props ||
!output_descriptor->props))) {
new_subtitle_stream(mux, o, ist);
break;
}
}
}
static void map_auto_data(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
/* Data only if codec id match */
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, oc->url, NULL, AVMEDIA_TYPE_DATA);
if (codec_id == AV_CODEC_ID_NONE)
return;
for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
if (ist->user_set_discard == AVDISCARD_ALL)
continue;
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA &&
ist->st->codecpar->codec_id == codec_id )
new_data_stream(mux, o, ist);
}
}
static void map_manual(Muxer *mux, const OptionsContext *o, const StreamMap *map)
{
InputStream *ist;
if (map->disabled)
return;
if (map->linklabel) {
FilterGraph *fg;
OutputFilter *ofilter = NULL;
int j, k;
for (j = 0; j < nb_filtergraphs; j++) {
fg = filtergraphs[j];
for (k = 0; k < fg->nb_outputs; k++) {
AVFilterInOut *out = fg->outputs[k]->out_tmp;
if (out && !strcmp(out->name, map->linklabel)) {
ofilter = fg->outputs[k];
goto loop_end;
}
}
}
loop_end:
if (!ofilter) {
av_log(mux, AV_LOG_FATAL, "Output with label '%s' does not exist "
"in any defined filter graph, or was already used elsewhere.\n", map->linklabel);
exit_program(1);
}
init_output_filter(ofilter, o, mux);
} else {
ist = input_files[map->file_index]->streams[map->stream_index];
if (ist->user_set_discard == AVDISCARD_ALL) {
av_log(mux, AV_LOG_FATAL, "Stream #%d:%d is disabled and cannot be mapped.\n",
map->file_index, map->stream_index);
exit_program(1);
}
if(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
return;
if(o-> audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
return;
if(o-> video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
return;
if(o-> data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA)
return;
switch (ist->st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO: new_video_stream (mux, o, ist); break;
case AVMEDIA_TYPE_AUDIO: new_audio_stream (mux, o, ist); break;
case AVMEDIA_TYPE_SUBTITLE: new_subtitle_stream (mux, o, ist); break;
case AVMEDIA_TYPE_DATA: new_data_stream (mux, o, ist); break;
case AVMEDIA_TYPE_ATTACHMENT: new_attachment_stream(mux, o, ist); break;
case AVMEDIA_TYPE_UNKNOWN:
if (copy_unknown_streams) {
new_unknown_stream (mux, o, ist);
break;
}
default:
av_log(mux, ignore_unknown_streams ? AV_LOG_WARNING : AV_LOG_FATAL,
"Cannot map stream #%d:%d - unsupported type.\n",
map->file_index, map->stream_index);
if (!ignore_unknown_streams) {
av_log(mux, AV_LOG_FATAL,
"If you want unsupported types ignored instead "
"of failing, please use the -ignore_unknown option\n"
"If you want them copied, please use -copy_unknown\n");
exit_program(1);
}
}
}
}
static void of_add_attachments(Muxer *mux, const OptionsContext *o)
{
OutputStream *ost;
int err;
for (int i = 0; i < o->nb_attachments; i++) {
AVIOContext *pb;
uint8_t *attachment;
const char *p;
int64_t len;
if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
av_log(mux, AV_LOG_FATAL, "Could not open attachment file %s.\n",
o->attachments[i]);
exit_program(1);
}
if ((len = avio_size(pb)) <= 0) {
av_log(mux, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
o->attachments[i]);
exit_program(1);
}
if (len > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE ||
!(attachment = av_malloc(len + AV_INPUT_BUFFER_PADDING_SIZE))) {
av_log(mux, AV_LOG_FATAL, "Attachment %s too large.\n",
o->attachments[i]);
exit_program(1);
}
avio_read(pb, attachment, len);
memset(attachment + len, 0, AV_INPUT_BUFFER_PADDING_SIZE);
ost = new_attachment_stream(mux, o, NULL);
ost->attachment_filename = o->attachments[i];
ost->st->codecpar->extradata = attachment;
ost->st->codecpar->extradata_size = len;
p = strrchr(o->attachments[i], '/');
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
avio_closep(&pb);
}
}
static void create_streams(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
int auto_disable_v = o->video_disable;
int auto_disable_a = o->audio_disable;
int auto_disable_s = o->subtitle_disable;
int auto_disable_d = o->data_disable;
/* create streams for all unlabeled output pads */
for (int i = 0; i < nb_filtergraphs; i++) {
FilterGraph *fg = filtergraphs[i];
for (int j = 0; j < fg->nb_outputs; j++) {
OutputFilter *ofilter = fg->outputs[j];
if (!ofilter->out_tmp || ofilter->out_tmp->name)
continue;
switch (ofilter->type) {
case AVMEDIA_TYPE_VIDEO: auto_disable_v = 1; break;
case AVMEDIA_TYPE_AUDIO: auto_disable_a = 1; break;
case AVMEDIA_TYPE_SUBTITLE: auto_disable_s = 1; break;
}
init_output_filter(ofilter, o, mux);
}
}
if (!o->nb_stream_maps) {
/* pick the "best" stream of each type */
if (!auto_disable_v)
map_auto_video(mux, o);
if (!auto_disable_a)
map_auto_audio(mux, o);
if (!auto_disable_s)
map_auto_subtitle(mux, o);
if (!auto_disable_d)
map_auto_data(mux, o);
} else {
for (int i = 0; i < o->nb_stream_maps; i++)
map_manual(mux, o, &o->stream_maps[i]);
}
of_add_attachments(mux, o);
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, nb_output_files - 1, oc->url, 1);
av_log(mux, AV_LOG_ERROR, "Output file does not contain any stream\n");
exit_program(1);
}
}
static int setup_sync_queues(Muxer *mux, AVFormatContext *oc, int64_t buf_size_us)
{
OutputFile *of = &mux->of;
int nb_av_enc = 0, nb_interleaved = 0;
int limit_frames = 0, limit_frames_av_enc = 0;
#define IS_AV_ENC(ost, type) \
(ost->enc_ctx && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
#define IS_INTERLEAVED(type) (type != AVMEDIA_TYPE_ATTACHMENT)
for (int i = 0; i < oc->nb_streams; i++) {
OutputStream *ost = of->streams[i];
MuxStream *ms = ms_from_ost(ost);
enum AVMediaType type = ost->st->codecpar->codec_type;
ost->sq_idx_encode = -1;
ost->sq_idx_mux = -1;
nb_interleaved += IS_INTERLEAVED(type);
nb_av_enc += IS_AV_ENC(ost, type);
limit_frames |= ms->max_frames < INT64_MAX;
limit_frames_av_enc |= (ms->max_frames < INT64_MAX) && IS_AV_ENC(ost, type);
}
if (!((nb_interleaved > 1 && of->shortest) ||
(nb_interleaved > 0 && limit_frames)))
return 0;
/* if we have more than one encoded audio/video streams, or at least
* one encoded audio/video stream is frame-limited, then we
* synchronize them before encoding */
if ((of->shortest && nb_av_enc > 1) || limit_frames_av_enc) {
of->sq_encode = sq_alloc(SYNC_QUEUE_FRAMES, buf_size_us);
if (!of->sq_encode)
return AVERROR(ENOMEM);
for (int i = 0; i < oc->nb_streams; i++) {
OutputStream *ost = of->streams[i];
MuxStream *ms = ms_from_ost(ost);
enum AVMediaType type = ost->st->codecpar->codec_type;
if (!IS_AV_ENC(ost, type))
continue;
ost->sq_idx_encode = sq_add_stream(of->sq_encode,
of->shortest || ms->max_frames < INT64_MAX);
if (ost->sq_idx_encode < 0)
return ost->sq_idx_encode;
ost->sq_frame = av_frame_alloc();
if (!ost->sq_frame)
return AVERROR(ENOMEM);
if (ms->max_frames != INT64_MAX)
sq_limit_frames(of->sq_encode, ost->sq_idx_encode, ms->max_frames);
}
}
/* if there are any additional interleaved streams, then ALL the streams
* are also synchronized before sending them to the muxer */
if (nb_interleaved > nb_av_enc) {
mux->sq_mux = sq_alloc(SYNC_QUEUE_PACKETS, buf_size_us);
if (!mux->sq_mux)
return AVERROR(ENOMEM);
mux->sq_pkt = av_packet_alloc();
if (!mux->sq_pkt)
return AVERROR(ENOMEM);
for (int i = 0; i < oc->nb_streams; i++) {
OutputStream *ost = of->streams[i];
MuxStream *ms = ms_from_ost(ost);
enum AVMediaType type = ost->st->codecpar->codec_type;
if (!IS_INTERLEAVED(type))
continue;
ost->sq_idx_mux = sq_add_stream(mux->sq_mux,
of->shortest || ms->max_frames < INT64_MAX);
if (ost->sq_idx_mux < 0)
return ost->sq_idx_mux;
if (ms->max_frames != INT64_MAX)
sq_limit_frames(mux->sq_mux, ost->sq_idx_mux, ms->max_frames);
}
}
#undef IS_AV_ENC
#undef IS_INTERLEAVED
return 0;
}
static void of_add_programs(Muxer *mux, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
/* process manually set programs */
for (int i = 0; i < o->nb_program; i++) {
const char *p = o->program[i].u.str;
int progid = i+1;
AVProgram *program;
while(*p) {
const char *p2 = av_get_token(&p, ":");
const char *to_dealloc = p2;
char *key;
if (!p2)
break;
if(*p) p++;
key = av_get_token(&p2, "=");
if (!key || !*p2) {
av_freep(&to_dealloc);
av_freep(&key);
break;
}
p2++;
if (!strcmp(key, "program_num"))
progid = strtol(p2, NULL, 0);
av_freep(&to_dealloc);
av_freep(&key);
}
program = av_new_program(oc, progid);
if (!program)
report_and_exit(AVERROR(ENOMEM));
p = o->program[i].u.str;
while(*p) {
const char *p2 = av_get_token(&p, ":");
const char *to_dealloc = p2;
char *key;
if (!p2)
break;
if(*p) p++;
key = av_get_token(&p2, "=");
if (!key) {
av_log(mux, AV_LOG_FATAL,
"No '=' character in program string %s.\n",
p2);
exit_program(1);
}
if (!*p2)
exit_program(1);
p2++;
if (!strcmp(key, "title")) {
av_dict_set(&program->metadata, "title", p2, 0);
} else if (!strcmp(key, "program_num")) {
} else if (!strcmp(key, "st")) {
int st_num = strtol(p2, NULL, 0);
av_program_add_stream_index(oc, progid, st_num);
} else {
av_log(mux, AV_LOG_FATAL, "Unknown program key %s.\n", key);
exit_program(1);
}
av_freep(&to_dealloc);
av_freep(&key);
}
}
}
/**
* Parse a metadata specifier passed as 'arg' parameter.
* @param arg metadata string to parse
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
* @param index for type c/p, chapter/program index is written here
* @param stream_spec for type s, the stream specifier is written here
*/
static void parse_meta_type(void *logctx, const char *arg,
char *type, int *index, const char **stream_spec)
{
if (*arg) {
*type = *arg;
switch (*arg) {
case 'g':
break;
case 's':
if (*(++arg) && *arg != ':') {
av_log(logctx, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
exit_program(1);
}
*stream_spec = *arg == ':' ? arg + 1 : "";
break;
case 'c':
case 'p':
if (*(++arg) == ':')
*index = strtol(++arg, NULL, 0);
break;
default:
av_log(logctx, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
exit_program(1);
}
} else
*type = 'g';
}
static void of_add_metadata(OutputFile *of, AVFormatContext *oc,
const OptionsContext *o)
{
for (int i = 0; i < o->nb_metadata; i++) {
AVDictionary **m;
char type, *val;
const char *stream_spec;
int index = 0, ret = 0;
val = strchr(o->metadata[i].u.str, '=');
if (!val) {
av_log(of, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
o->metadata[i].u.str);
exit_program(1);
}
*val++ = 0;
parse_meta_type(of, o->metadata[i].specifier, &type, &index, &stream_spec);
if (type == 's') {
for (int j = 0; j < oc->nb_streams; j++) {
OutputStream *ost = of->streams[j];
if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
#if FFMPEG_ROTATION_METADATA
if (!strcmp(o->metadata[i].u.str, "rotate")) {
char *tail;
double theta = av_strtod(val, &tail);
if (!*tail) {
ost->rotate_overridden = 1;
ost->rotate_override_value = theta;
}
av_log(ost, AV_LOG_WARNING,
"Conversion of a 'rotate' metadata key to a "
"proper display matrix rotation is deprecated. "
"See -display_rotation for setting rotation "
"instead.");
} else {
#endif
av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
#if FFMPEG_ROTATION_METADATA
}
#endif
} else if (ret < 0)
exit_program(1);
}
} else {
switch (type) {
case 'g':
m = &oc->metadata;
break;
case 'c':
if (index < 0 || index >= oc->nb_chapters) {
av_log(of, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
exit_program(1);
}
m = &oc->chapters[index]->metadata;
break;
case 'p':
if (index < 0 || index >= oc->nb_programs) {
av_log(of, AV_LOG_FATAL, "Invalid program index %d in metadata specifier.\n", index);
exit_program(1);
}
m = &oc->programs[index]->metadata;
break;
default:
av_log(of, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
exit_program(1);
}
av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
}
}
}
static void set_channel_layout(OutputFilter *f, OutputStream *ost)
{
const AVCodec *c = ost->enc_ctx->codec;
int i, err;
if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
/* Pass the layout through for all orders but UNSPEC */
err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
if (err < 0)
report_and_exit(AVERROR(ENOMEM));
return;
}
/* Requested layout is of order UNSPEC */
if (!c->ch_layouts) {
/* Use the default native layout for the requested amount of channels when the
encoder doesn't have a list of supported layouts */
av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
return;
}
/* Encoder has a list of supported layouts. Pick the first layout in it with the
same amount of channels as the requested layout */
for (i = 0; c->ch_layouts[i].nb_channels; i++) {
if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
break;
}
if (c->ch_layouts[i].nb_channels) {
/* Use it if one is found */
err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
if (err < 0)
report_and_exit(AVERROR(ENOMEM));
return;
}
/* If no layout for the amount of channels requested was found, use the default
native layout for it. */
av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
}
static int copy_chapters(InputFile *ifile, OutputFile *ofile, AVFormatContext *os,
int copy_metadata)
{
AVFormatContext *is = ifile->ctx;
AVChapter **tmp;
int i;
tmp = av_realloc_f(os->chapters, is->nb_chapters + os->nb_chapters, sizeof(*os->chapters));
if (!tmp)
return AVERROR(ENOMEM);
os->chapters = tmp;
for (i = 0; i < is->nb_chapters; i++) {
AVChapter *in_ch = is->chapters[i], *out_ch;
int64_t start_time = (ofile->start_time == AV_NOPTS_VALUE) ? 0 : ofile->start_time;
int64_t ts_off = av_rescale_q(start_time - ifile->ts_offset,
AV_TIME_BASE_Q, in_ch->time_base);
int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
if (in_ch->end < ts_off)
continue;
if (rt != INT64_MAX && in_ch->start > rt + ts_off)
break;
out_ch = av_mallocz(sizeof(AVChapter));
if (!out_ch)
return AVERROR(ENOMEM);
out_ch->id = in_ch->id;
out_ch->time_base = in_ch->time_base;
out_ch->start = FFMAX(0, in_ch->start - ts_off);
out_ch->end = FFMIN(rt, in_ch->end - ts_off);
if (copy_metadata)
av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
os->chapters[os->nb_chapters++] = out_ch;
}
return 0;
}
static int copy_metadata(Muxer *mux, AVFormatContext *ic,
const char *outspec, const char *inspec,
int *metadata_global_manual, int *metadata_streams_manual,
int *metadata_chapters_manual, const OptionsContext *o)
{
AVFormatContext *oc = mux->fc;
AVDictionary **meta_in = NULL;
AVDictionary **meta_out = NULL;
int i, ret = 0;
char type_in, type_out;
const char *istream_spec = NULL, *ostream_spec = NULL;
int idx_in = 0, idx_out = 0;
parse_meta_type(mux, inspec, &type_in, &idx_in, &istream_spec);
parse_meta_type(mux, outspec, &type_out, &idx_out, &ostream_spec);
if (type_in == 'g' || type_out == 'g')
*metadata_global_manual = 1;
if (type_in == 's' || type_out == 's')
*metadata_streams_manual = 1;
if (type_in == 'c' || type_out == 'c')
*metadata_chapters_manual = 1;
/* ic is NULL when just disabling automatic mappings */
if (!ic)
return 0;
#define METADATA_CHECK_INDEX(index, nb_elems, desc)\
if ((index) < 0 || (index) >= (nb_elems)) {\
av_log(mux, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
(desc), (index));\
exit_program(1);\
}
#define SET_DICT(type, meta, context, index)\
switch (type) {\
case 'g':\
meta = &context->metadata;\
break;\
case 'c':\
METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
meta = &context->chapters[index]->metadata;\
break;\
case 'p':\
METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
meta = &context->programs[index]->metadata;\
break;\
case 's':\
break; /* handled separately below */ \
default: av_assert0(0);\
}\
SET_DICT(type_in, meta_in, ic, idx_in);
SET_DICT(type_out, meta_out, oc, idx_out);
/* for input streams choose first matching stream */
if (type_in == 's') {
for (i = 0; i < ic->nb_streams; i++) {
if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
meta_in = &ic->streams[i]->metadata;
break;
} else if (ret < 0)
exit_program(1);
}
if (!meta_in) {
av_log(mux, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
exit_program(1);
}
}
if (type_out == 's') {
for (i = 0; i < oc->nb_streams; i++) {
if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
meta_out = &oc->streams[i]->metadata;
av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
} else if (ret < 0)
exit_program(1);
}
} else
av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
return 0;
}
static void copy_meta(Muxer *mux, const OptionsContext *o)
{
OutputFile *of = &mux->of;
AVFormatContext *oc = mux->fc;
int chapters_input_file = o->chapters_input_file;
int metadata_global_manual = 0;
int metadata_streams_manual = 0;
int metadata_chapters_manual = 0;
/* copy metadata */
for (int i = 0; i < o->nb_metadata_map; i++) {
char *p;
int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
if (in_file_index >= nb_input_files) {
av_log(mux, AV_LOG_FATAL, "Invalid input file index %d while "
"processing metadata maps\n", in_file_index);
exit_program(1);
}
copy_metadata(mux,
in_file_index >= 0 ? input_files[in_file_index]->ctx : NULL,
o->metadata_map[i].specifier, *p ? p + 1 : p,
&metadata_global_manual, &metadata_streams_manual,
&metadata_chapters_manual, o);
}
/* copy chapters */
if (chapters_input_file >= nb_input_files) {
if (chapters_input_file == INT_MAX) {
/* copy chapters from the first input file that has them*/
chapters_input_file = -1;
for (int i = 0; i < nb_input_files; i++)
if (input_files[i]->ctx->nb_chapters) {
chapters_input_file = i;
break;
}
} else {
av_log(mux, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
chapters_input_file);
exit_program(1);
}
}
if (chapters_input_file >= 0)
copy_chapters(input_files[chapters_input_file], of, oc,
!metadata_chapters_manual);
/* copy global metadata by default */
if (!metadata_global_manual && nb_input_files){
av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
AV_DICT_DONT_OVERWRITE);
if (of->recording_time != INT64_MAX)
av_dict_set(&oc->metadata, "duration", NULL, 0);
av_dict_set(&oc->metadata, "creation_time", NULL, 0);
av_dict_set(&oc->metadata, "company_name", NULL, 0);
av_dict_set(&oc->metadata, "product_name", NULL, 0);
av_dict_set(&oc->metadata, "product_version", NULL, 0);
}
if (!metadata_streams_manual)
for (int i = 0; i < of->nb_streams; i++) {
OutputStream *ost = of->streams[i];
if (!ost->ist) /* this is true e.g. for attached files */
continue;
av_dict_copy(&ost->st->metadata, ost->ist->st->metadata, AV_DICT_DONT_OVERWRITE);
if (ost->enc_ctx) {
av_dict_set(&ost->st->metadata, "encoder", NULL, 0);
}
}
}
static int set_dispositions(Muxer *mux, const OptionsContext *o)
{
OutputFile *of = &mux->of;
AVFormatContext *ctx = mux->fc;
int nb_streams[AVMEDIA_TYPE_NB] = { 0 };
int have_default[AVMEDIA_TYPE_NB] = { 0 };
int have_manual = 0;
int ret = 0;
const char **dispositions;
dispositions = av_calloc(ctx->nb_streams, sizeof(*dispositions));
if (!dispositions)
return AVERROR(ENOMEM);
// first, copy the input dispositions
for (int i = 0; i < ctx->nb_streams; i++) {
OutputStream *ost = of->streams[i];
nb_streams[ost->st->codecpar->codec_type]++;
MATCH_PER_STREAM_OPT(disposition, str, dispositions[i], ctx, ost->st);
have_manual |= !!dispositions[i];
if (ost->ist) {
ost->st->disposition = ost->ist->st->disposition;
if (ost->st->disposition & AV_DISPOSITION_DEFAULT)
have_default[ost->st->codecpar->codec_type] = 1;
}
}
if (have_manual) {
// process manually set dispositions - they override the above copy
for (int i = 0; i < ctx->nb_streams; i++) {
OutputStream *ost = of->streams[i];
const char *disp = dispositions[i];
if (!disp)
continue;
ret = av_opt_set(ost->st, "disposition", disp, 0);
if (ret < 0)
goto finish;
}
} else {
// For each media type with more than one stream, find a suitable stream to
// mark as default, unless one is already marked default.
// "Suitable" means the first of that type, skipping attached pictures.
for (int i = 0; i < ctx->nb_streams; i++) {
OutputStream *ost = of->streams[i];
enum AVMediaType type = ost->st->codecpar->codec_type;
if (nb_streams[type] < 2 || have_default[type] ||
ost->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
continue;
ost->st->disposition |= AV_DISPOSITION_DEFAULT;
have_default[type] = 1;
}
}
finish:
av_freep(&dispositions);
return ret;
}
const char *const forced_keyframes_const_names[] = {
"n",
"n_forced",
"prev_forced_n",
"prev_forced_t",
"t",
NULL
};
static int compare_int64(const void *a, const void *b)
{
return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
}
static void parse_forced_key_frames(KeyframeForceCtx *kf, const Muxer *mux,
const char *spec)
{
const char *p;
int n = 1, i, size, index = 0;
int64_t t, *pts;
for (p = spec; *p; p++)
if (*p == ',')
n++;
size = n;
pts = av_malloc_array(size, sizeof(*pts));
if (!pts)
report_and_exit(AVERROR(ENOMEM));
p = spec;
for (i = 0; i < n; i++) {
char *next = strchr(p, ',');
if (next)
*next++ = 0;
if (!memcmp(p, "chapters", 8)) {
AVChapter * const *ch = mux->fc->chapters;
unsigned int nb_ch = mux->fc->nb_chapters;
int j;
if (nb_ch > INT_MAX - size ||
!(pts = av_realloc_f(pts, size += nb_ch - 1,
sizeof(*pts))))
report_and_exit(AVERROR(ENOMEM));
t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
for (j = 0; j < nb_ch; j++) {
const AVChapter *c = ch[j];
av_assert1(index < size);
pts[index++] = av_rescale_q(c->start, c->time_base,
AV_TIME_BASE_Q) + t;
}
} else {
av_assert1(index < size);
pts[index++] = parse_time_or_die("force_key_frames", p, 1);
}
p = next;
}
av_assert0(index == size);
qsort(pts, size, sizeof(*pts), compare_int64);
kf->nb_pts = size;
kf->pts = pts;
}
static int process_forced_keyframes(Muxer *mux, const OptionsContext *o)
{
for (int i = 0; i < mux->of.nb_streams; i++) {
OutputStream *ost = mux->of.streams[i];
const char *forced_keyframes = NULL;
MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_keyframes, mux->fc, ost->st);
if (!(ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
ost->enc_ctx && forced_keyframes))
continue;
if (!strncmp(forced_keyframes, "expr:", 5)) {
int ret = av_expr_parse(&ost->kf.pexpr, forced_keyframes + 5,
forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
if (ret < 0) {
av_log(ost, AV_LOG_ERROR,
"Invalid force_key_frames expression '%s'\n", forced_keyframes + 5);
return ret;
}
ost->kf.expr_const_values[FKF_N] = 0;
ost->kf.expr_const_values[FKF_N_FORCED] = 0;
ost->kf.expr_const_values[FKF_PREV_FORCED_N] = NAN;
ost->kf.expr_const_values[FKF_PREV_FORCED_T] = NAN;
// Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
// parse it only for static kf timings
} else if (!strcmp(forced_keyframes, "source")) {
ost->kf.type = KF_FORCE_SOURCE;
} else if (!strcmp(forced_keyframes, "source_no_drop")) {
ost->kf.type = KF_FORCE_SOURCE_NO_DROP;
} else {
parse_forced_key_frames(&ost->kf, mux, forced_keyframes);
}
}
return 0;
}
static void validate_enc_avopt(Muxer *mux, const AVDictionary *codec_avopt)
{
const AVClass *class = avcodec_get_class();
const AVClass *fclass = avformat_get_class();
const OutputFile *of = &mux->of;
AVDictionary *unused_opts;
const AVDictionaryEntry *e;
unused_opts = strip_specifiers(codec_avopt);
for (int i = 0; i < of->nb_streams; i++) {
e = NULL;
while ((e = av_dict_iterate(of->streams[i]->encoder_opts, e)))
av_dict_set(&unused_opts, e->key, NULL, 0);
}
e = NULL;
while ((e = av_dict_iterate(unused_opts, e))) {
const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
if (!option || foption)
continue;
if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) {
av_log(mux, AV_LOG_ERROR, "Codec AVOption %s (%s) is not an "
"encoding option.\n", e->key, option->help ? option->help : "");
exit_program(1);
}
// gop_timecode is injected by generic code but not always used
if (!strcmp(e->key, "gop_timecode"))
continue;
av_log(mux, AV_LOG_WARNING, "Codec AVOption %s (%s) has not been used "
"for any stream. The most likely reason is either wrong type "
"(e.g. a video option with no video streams) or that it is a "
"private option of some encoder which was not actually used for "
"any stream.\n", e->key, option->help ? option->help : "");
}
av_dict_free(&unused_opts);
}
static const char *output_file_item_name(void *obj)
{
const Muxer *mux = obj;
return mux->log_name;
}
static const AVClass output_file_class = {
.class_name = "OutputFile",
.version = LIBAVUTIL_VERSION_INT,
.item_name = output_file_item_name,
.category = AV_CLASS_CATEGORY_MUXER,
};
static Muxer *mux_alloc(void)
{
Muxer *mux = allocate_array_elem(&output_files, sizeof(*mux), &nb_output_files);
mux->of.clazz = &output_file_class;
mux->of.index = nb_output_files - 1;
snprintf(mux->log_name, sizeof(mux->log_name), "out#%d", mux->of.index);
return mux;
}
int of_open(const OptionsContext *o, const char *filename)
{
Muxer *mux;
AVFormatContext *oc;
int err;
OutputFile *of;
int64_t recording_time = o->recording_time;
int64_t stop_time = o->stop_time;
mux = mux_alloc();
of = &mux->of;
if (stop_time != INT64_MAX && recording_time != INT64_MAX) {
stop_time = INT64_MAX;
av_log(mux, AV_LOG_WARNING, "-t and -to cannot be used together; using -t.\n");
}
if (stop_time != INT64_MAX && recording_time == INT64_MAX) {
int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
if (stop_time <= start_time) {
av_log(mux, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
exit_program(1);
} else {
recording_time = stop_time - start_time;
}
}
of->recording_time = recording_time;
of->start_time = o->start_time;
of->shortest = o->shortest;
mux->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
mux->limit_filesize = o->limit_filesize;
av_dict_copy(&mux->opts, o->g->format_opts, 0);
if (!strcmp(filename, "-"))
filename = "pipe:";
err = avformat_alloc_output_context2(&oc, NULL, o->format, filename);
if (!oc) {
print_error(filename, err);
exit_program(1);
}
mux->fc = oc;
av_strlcat(mux->log_name, "/", sizeof(mux->log_name));
av_strlcat(mux->log_name, oc->oformat->name, sizeof(mux->log_name));
if (strcmp(oc->oformat->name, "rtp"))
want_sdp = 0;
of->format = oc->oformat;
if (recording_time != INT64_MAX)
oc->duration = recording_time;
oc->interrupt_callback = int_cb;
if (o->bitexact) {
oc->flags |= AVFMT_FLAG_BITEXACT;
of->bitexact = 1;
} else {
of->bitexact = check_opt_bitexact(oc, mux->opts, "fflags",
AVFMT_FLAG_BITEXACT);
}
/* create all output streams for this file */
create_streams(mux, o);
/* check if all codec options have been used */
validate_enc_avopt(mux, o->g->codec_opts);
/* set the decoding_needed flags and create simple filtergraphs */
for (int i = 0; i < of->nb_streams; i++) {
OutputStream *ost = of->streams[i];
if (ost->enc_ctx && ost->ist) {
InputStream *ist = ost->ist;
ist->decoding_needed |= DECODING_FOR_OST;
ist->processing_needed = 1;
if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
err = init_simple_filtergraph(ist, ost);
if (err < 0) {
av_log(ost, AV_LOG_ERROR,
"Error initializing a simple filtergraph\n");
exit_program(1);
}
}
} else if (ost->ist) {
ost->ist->processing_needed = 1;
}
/* set the filter output constraints */
if (ost->filter) {
const AVCodec *c = ost->enc_ctx->codec;
OutputFilter *f = ost->filter;
switch (ost->enc_ctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
f->frame_rate = ost->frame_rate;
f->width = ost->enc_ctx->width;
f->height = ost->enc_ctx->height;
if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
f->format = ost->enc_ctx->pix_fmt;
} else {
f->formats = c->pix_fmts;
}
break;
case AVMEDIA_TYPE_AUDIO:
if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
f->format = ost->enc_ctx->sample_fmt;
} else {
f->formats = c->sample_fmts;
}
if (ost->enc_ctx->sample_rate) {
f->sample_rate = ost->enc_ctx->sample_rate;
} else {
f->sample_rates = c->supported_samplerates;
}
if (ost->enc_ctx->ch_layout.nb_channels) {
set_channel_layout(f, ost);
} else if (c->ch_layouts) {
f->ch_layouts = c->ch_layouts;
}
break;
}
}
}
/* check filename in case of an image number is expected */
if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
if (!av_filename_number_test(oc->url)) {
print_error(oc->url, AVERROR(EINVAL));
exit_program(1);
}
}
if (!(oc->oformat->flags & AVFMT_NOFILE)) {
/* test if it already exists to avoid losing precious files */
assert_file_overwrite(filename);
/* open the file */
if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
&oc->interrupt_callback,
&mux->opts)) < 0) {
print_error(filename, err);
exit_program(1);
}
} else if (strcmp(oc->oformat->name, "image2")==0 && !av_filename_number_test(filename))
assert_file_overwrite(filename);
if (o->mux_preload) {
av_dict_set_int(&mux->opts, "preload", o->mux_preload*AV_TIME_BASE, 0);
}
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
/* copy metadata and chapters from input files */
copy_meta(mux, o);
of_add_programs(mux, o);
of_add_metadata(of, oc, o);
err = set_dispositions(mux, o);
if (err < 0) {
av_log(mux, AV_LOG_FATAL, "Error setting output stream dispositions\n");
exit_program(1);
}
// parse forced keyframe specifications;
// must be done after chapters are created
err = process_forced_keyframes(mux, o);
if (err < 0) {
av_log(mux, AV_LOG_FATAL, "Error processing forced keyframes\n");
exit_program(1);
}
err = setup_sync_queues(mux, oc, o->shortest_buf_duration * AV_TIME_BASE);
if (err < 0) {
av_log(mux, AV_LOG_FATAL, "Error setting up output sync queues\n");
exit_program(1);
}
of->url = filename;
/* write the header for files with no streams */
if (of->format->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
int ret = mux_check_init(mux);
if (ret < 0)
return ret;
}
return 0;
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (c) 2007-2010 Stefano Sabatini
* Copyright (c) 2020 Taner Sener ( tanersener gmail com )
* Copyright (c) 2020-2022 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -29,6 +30,13 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop mobile-ffmpeg and later ffmpeg-kit libraries.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
*
* mobile-ffmpeg / ffmpeg-kit changes by Taner Sener
*
* 09.2022
......@@ -52,9 +60,11 @@
#include "libavutil/ffversion.h"
#include <string.h>
#include <math.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/ambient_viewing_environment.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
......@@ -163,6 +173,8 @@ typedef struct ReadInterval {
__thread ReadInterval *read_intervals;
__thread int read_intervals_nb = 0;
__thread int find_stream_info = 1;
/* section structure definition */
#define SECTION_MAX_NB_CHILDREN 10
......@@ -626,6 +638,7 @@ static inline void writer_put_str_printf(WriterContext *wctx, const char *str)
static inline void writer_printf_printf(WriterContext *wctx, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
av_vlog(NULL, AV_LOG_STDERR, fmt, ap);
va_end(ap);
......@@ -671,7 +684,7 @@ static int writer_open(WriterContext **wctx, const Writer *writer, const char *a
goto fail;
}
while ((opt = av_dict_get(opts, "", opt, AV_DICT_IGNORE_SUFFIX))) {
while ((opt = av_dict_iterate(opts, opt))) {
if ((ret = av_opt_set(*wctx, opt->key, opt->value, AV_OPT_SEARCH_CHILDREN)) < 0) {
av_log(*wctx, AV_LOG_ERROR, "Failed to set option '%s' with value '%s' provided to writer context\n",
opt->key, opt->value);
......@@ -1907,12 +1920,14 @@ static void writer_register_all(void)
writer_print_string(w, k, pbuf.str, 0); \
} while (0)
#define print_list_fmt(k, f, n, ...) do { \
#define print_list_fmt(k, f, n, m, ...) do { \
av_bprint_clear(&pbuf); \
for (int idx = 0; idx < n; idx++) { \
if (idx > 0) \
av_bprint_chars(&pbuf, ' ', 1); \
av_bprintf(&pbuf, f, __VA_ARGS__); \
for (int idx2 = 0; idx2 < m; idx2++) { \
if (idx > 0 || idx2 > 0) \
av_bprint_chars(&pbuf, ' ', 1); \
av_bprintf(&pbuf, f, __VA_ARGS__); \
} \
} \
writer_print_string(w, k, pbuf.str, 0); \
} while (0)
......@@ -1953,7 +1968,7 @@ static inline int show_tags(WriterContext *w, AVDictionary *tags, int section_id
return 0;
writer_print_section_header(w, section_id);
while ((tag = av_dict_get(tags, "", tag, AV_DICT_IGNORE_SUFFIX))) {
while ((tag = av_dict_iterate(tags, tag))) {
if ((ret = print_str_validate(tag->key, tag->value)) < 0)
break;
}
......@@ -2023,7 +2038,7 @@ static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
const AVDOVIReshapingCurve *curve = &mapping->curves[c];
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_COMPONENT);
print_list_fmt("pivots", "%"PRIu16, curve->num_pivots, curve->pivots[idx]);
print_list_fmt("pivots", "%"PRIu16, curve->num_pivots, 1, curve->pivots[idx]);
writer_print_section_header(w, SECTION_ID_FRAME_SIDE_DATA_PIECE_LIST);
for (int i = 0; i < curve->num_pivots - 1; i++) {
......@@ -2035,7 +2050,7 @@ static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
print_str("mapping_idc_name", "polynomial");
print_int("poly_order", curve->poly_order[i]);
print_list_fmt("poly_coef", "%"PRIi64,
curve->poly_order[i] + 1,
curve->poly_order[i] + 1, 1,
curve->poly_coef[i][idx]);
break;
case AV_DOVI_MAPPING_MMR:
......@@ -2043,8 +2058,8 @@ static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
print_int("mmr_order", curve->mmr_order[i]);
print_int("mmr_constant", curve->mmr_constant[i]);
print_list_fmt("mmr_coef", "%"PRIi64,
curve->mmr_order[i] * 7,
curve->mmr_coef[i][0][idx]);
curve->mmr_order[i], 7,
curve->mmr_coef[i][idx][idx2]);
break;
default:
print_str("mapping_idc_name", "unknown");
......@@ -2082,15 +2097,15 @@ static void print_dovi_metadata(WriterContext *w, const AVDOVIMetadata *dovi)
print_int("dm_metadata_id", color->dm_metadata_id);
print_int("scene_refresh_flag", color->scene_refresh_flag);
print_list_fmt("ycc_to_rgb_matrix", "%d/%d",
FF_ARRAY_ELEMS(color->ycc_to_rgb_matrix),
FF_ARRAY_ELEMS(color->ycc_to_rgb_matrix), 1,
color->ycc_to_rgb_matrix[idx].num,
color->ycc_to_rgb_matrix[idx].den);
print_list_fmt("ycc_to_rgb_offset", "%d/%d",
FF_ARRAY_ELEMS(color->ycc_to_rgb_offset),
FF_ARRAY_ELEMS(color->ycc_to_rgb_offset), 1,
color->ycc_to_rgb_offset[idx].num,
color->ycc_to_rgb_offset[idx].den);
print_list_fmt("rgb_to_lms_matrix", "%d/%d",
FF_ARRAY_ELEMS(color->rgb_to_lms_matrix),
FF_ARRAY_ELEMS(color->rgb_to_lms_matrix), 1,
color->rgb_to_lms_matrix[idx].num,
color->rgb_to_lms_matrix[idx].den);
print_int("signal_eotf", color->signal_eotf);
......@@ -2276,6 +2291,17 @@ static void print_dynamic_hdr_vivid(WriterContext *w, const AVDynamicHDRVivid *m
}
}
static void print_ambient_viewing_environment(WriterContext *w,
const AVAmbientViewingEnvironment *env)
{
if (!env)
return;
print_q("ambient_illuminance", env->ambient_illuminance, '/');
print_q("ambient_light_x", env->ambient_light_x, '/');
print_q("ambient_light_y", env->ambient_light_y, '/');
}
static void print_pkt_side_data(WriterContext *w,
AVCodecParameters *par,
const AVPacketSideData *side_data,
......@@ -2293,8 +2319,11 @@ static void print_pkt_side_data(WriterContext *w,
writer_print_section_header(w, id_data);
print_str("side_data_type", name ? name : "unknown");
if (sd->type == AV_PKT_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
double rotation = av_display_rotation_get((int32_t *)sd->data);
if (isnan(rotation))
rotation = 0;
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
print_int("rotation", rotation);
} else if (sd->type == AV_PKT_DATA_STEREO3D) {
const AVStereo3D *stereo = (AVStereo3D *)sd->data;
print_str("type", av_stereo3d_type_name(stereo->type));
......@@ -2506,8 +2535,12 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
print_val("size", pkt->size, unit_byte_str);
if (pkt->pos != -1) print_fmt ("pos", "%"PRId64, pkt->pos);
else print_str_opt("pos", "N/A");
print_fmt("flags", "%c%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_',
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_');
print_fmt("flags", "%c%c%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_',
pkt->flags & AV_PKT_FLAG_DISCARD ? 'D' : '_',
pkt->flags & AV_PKT_FLAG_CORRUPT ? 'C' : '_');
if (do_show_data)
writer_print_data(w, "data", pkt->data, pkt->size);
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
if (pkt->side_data_elems) {
size_t size;
......@@ -2526,9 +2559,6 @@ static void show_packet(WriterContext *w, InputFile *ifile, AVPacket *pkt, int p
SECTION_ID_PACKET_SIDE_DATA);
}
if (do_show_data)
writer_print_data(w, "data", pkt->data, pkt->size);
writer_print_data_hash(w, "data_hash", pkt->data, pkt->size);
writer_print_section_footer(w);
av_bprint_finalize(&pbuf, NULL);
......@@ -2581,8 +2611,14 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
print_time("best_effort_timestamp_time", frame->best_effort_timestamp, &stream->time_base);
#if LIBAVUTIL_VERSION_MAJOR < 59
AV_NOWARN_DEPRECATED(
print_duration_ts ("pkt_duration", frame->pkt_duration);
print_duration_time("pkt_duration_time", frame->pkt_duration, &stream->time_base);
)
#endif
print_duration_ts ("duration", frame->duration);
print_duration_time("duration_time", frame->duration, &stream->time_base);
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
else print_str_opt("pkt_pos", "N/A");
if (frame->pkt_size != -1) print_val ("pkt_size", frame->pkt_size, unit_byte_str);
......@@ -2604,8 +2640,12 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
print_str_opt("sample_aspect_ratio", "N/A");
}
print_fmt("pict_type", "%c", av_get_picture_type_char(frame->pict_type));
#if LIBAVUTIL_VERSION_MAJOR < 59
AV_NOWARN_DEPRECATED(
print_int("coded_picture_number", frame->coded_picture_number);
print_int("display_picture_number", frame->display_picture_number);
)
#endif
print_int("interlaced_frame", frame->interlaced_frame);
print_int("top_field_first", frame->top_field_first);
print_int("repeat_pict", frame->repeat_pict);
......@@ -2644,8 +2684,11 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
name = av_frame_side_data_name(sd->type);
print_str("side_data_type", name ? name : "unknown");
if (sd->type == AV_FRAME_DATA_DISPLAYMATRIX && sd->size >= 9*4) {
double rotation = av_display_rotation_get((int32_t *)sd->data);
if (isnan(rotation))
rotation = 0;
writer_print_integers(w, "displaymatrix", sd->data, 9, " %11d", 3, 4, 1);
print_int("rotation", av_display_rotation_get((int32_t *)sd->data));
print_int("rotation", rotation);
} else if (sd->type == AV_FRAME_DATA_AFD && sd->size > 0) {
print_int("active_format", *sd->data);
} else if (sd->type == AV_FRAME_DATA_GOP_TIMECODE && sd->size >= 8) {
......@@ -2700,6 +2743,9 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
} else if (sd->type == AV_FRAME_DATA_DYNAMIC_HDR_VIVID) {
AVDynamicHDRVivid *metadata = (AVDynamicHDRVivid *)sd->data;
print_dynamic_hdr_vivid(w, metadata);
} else if (sd->type == AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT) {
print_ambient_viewing_environment(
w, (const AVAmbientViewingEnvironment *)sd->data);
}
writer_print_section_footer(w);
}
......@@ -2714,7 +2760,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
static av_always_inline int process_frame(WriterContext *w,
InputFile *ifile,
AVFrame *frame, AVPacket *pkt,
AVFrame *frame, const AVPacket *pkt,
int *packet_new)
{
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
......@@ -2858,9 +2904,10 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
}
if (selected_streams[pkt->stream_index]) {
AVRational tb = ifile->streams[pkt->stream_index].st->time_base;
int64_t pts = pkt->pts != AV_NOPTS_VALUE ? pkt->pts : pkt->dts;
if (pkt->pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pkt->pts, tb, AV_TIME_BASE_Q);
if (pts != AV_NOPTS_VALUE)
*cur_ts = av_rescale_q(pts, tb, AV_TIME_BASE_Q);
if (!has_start && *cur_ts != AV_NOPTS_VALUE) {
start = *cur_ts;
......@@ -2894,7 +2941,7 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
}
av_packet_unref(pkt);
//Flush remaining frames that are cached in the decoder
for (i = 0; i < fmt_ctx->nb_streams; i++) {
for (i = 0; i < ifile->nb_streams; i++) {
pkt->stream_index = i;
if (do_read_frames) {
while (process_frame(w, ifile, frame, pkt, &(int){1}) > 0);
......@@ -3052,6 +3099,8 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
}
print_int("bits_per_sample", av_get_bits_per_sample(par->codec_id));
print_int("initial_padding", par->initial_padding);
break;
case AVMEDIA_TYPE_SUBTITLE:
......@@ -3278,15 +3327,9 @@ static int show_format(WriterContext *w, InputFile *ifile)
static void show_error(WriterContext *w, int err)
{
char errbuf[128];
const char *errbuf_ptr = errbuf;
if (av_strerror(err, errbuf, sizeof(errbuf)) < 0)
errbuf_ptr = strerror(AVUNERROR(err));
writer_print_section_header(w, SECTION_ID_ERROR);
print_int("code", err);
print_str("string", errbuf_ptr);
print_str("string", av_err2str(err));
writer_print_section_footer(w);
}
......@@ -3299,10 +3342,8 @@ static int open_input_file(InputFile *ifile, const char *filename,
int scan_all_pmts_set = 0;
fmt_ctx = avformat_alloc_context();
if (!fmt_ctx) {
print_error(filename, AVERROR(ENOMEM));
exit_program(1);
}
if (!fmt_ctx)
report_and_exit(AVERROR(ENOMEM));
if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
......@@ -3320,7 +3361,7 @@ static int open_input_file(InputFile *ifile, const char *filename,
ifile->fmt_ctx = fmt_ctx;
if (scan_all_pmts_set)
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
while ((t = av_dict_get(format_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
while ((t = av_dict_iterate(format_opts, t)))
av_log(NULL, AV_LOG_WARNING, "Option %s skipped - not known to demuxer.\n", t->key);
if (find_stream_info) {
......@@ -3718,7 +3759,7 @@ static void opt_input_file(void *optctx, const char *arg)
exit_program(1);
}
if (!strcmp(arg, "-"))
arg = "pipe:";
arg = "fd:";
input_filename = arg;
}
......@@ -3737,7 +3778,7 @@ static void opt_output_file(void *optctx, const char *arg)
exit_program(1);
}
if (!strcmp(arg, "-"))
arg = "pipe:";
arg = "fd:";
output_filename = arg;
}
......
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of objpool.c file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
*/
#include <stdint.h>
#include "libavcodec/packet.h"
#include "libavutil/common.h"
#include "libavutil/error.h"
#include "libavutil/frame.h"
#include "libavutil/mem.h"
#include "fftools_objpool.h"
struct ObjPool {
void *pool[32];
unsigned int pool_count;
ObjPoolCBAlloc alloc;
ObjPoolCBReset reset;
ObjPoolCBFree free;
};
ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
ObjPoolCBFree cb_free)
{
ObjPool *op = av_mallocz(sizeof(*op));
if (!op)
return NULL;
op->alloc = cb_alloc;
op->reset = cb_reset;
op->free = cb_free;
return op;
}
void objpool_free(ObjPool **pop)
{
ObjPool *op = *pop;
if (!op)
return;
for (unsigned int i = 0; i < op->pool_count; i++)
op->free(&op->pool[i]);
av_freep(pop);
}
int objpool_get(ObjPool *op, void **obj)
{
if (op->pool_count) {
*obj = op->pool[--op->pool_count];
op->pool[op->pool_count] = NULL;
} else
*obj = op->alloc();
return *obj ? 0 : AVERROR(ENOMEM);
}
void objpool_release(ObjPool *op, void **obj)
{
if (!*obj)
return;
op->reset(*obj);
if (op->pool_count < FF_ARRAY_ELEMS(op->pool))
op->pool[op->pool_count++] = *obj;
else
op->free(obj);
*obj = NULL;
}
static void *alloc_packet(void)
{
return av_packet_alloc();
}
static void *alloc_frame(void)
{
return av_frame_alloc();
}
static void reset_packet(void *obj)
{
av_packet_unref(obj);
}
static void reset_frame(void *obj)
{
av_frame_unref(obj);
}
static void free_packet(void **obj)
{
AVPacket *pkt = *obj;
av_packet_free(&pkt);
*obj = NULL;
}
static void free_frame(void **obj)
{
AVFrame *frame = *obj;
av_frame_free(&frame);
*obj = NULL;
}
ObjPool *objpool_alloc_packets(void)
{
return objpool_alloc(alloc_packet, reset_packet, free_packet);
}
ObjPool *objpool_alloc_frames(void)
{
return objpool_alloc(alloc_frame, reset_frame, free_frame);
}
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of objpool.h file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*/
#ifndef FFTOOLS_OBJPOOL_H
#define FFTOOLS_OBJPOOL_H
typedef struct ObjPool ObjPool;
typedef void* (*ObjPoolCBAlloc)(void);
typedef void (*ObjPoolCBReset)(void *);
typedef void (*ObjPoolCBFree)(void **);
void objpool_free(ObjPool **op);
ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
ObjPoolCBFree cb_free);
ObjPool *objpool_alloc_packets(void);
ObjPool *objpool_alloc_frames(void);
int objpool_get(ObjPool *op, void **obj);
void objpool_release(ObjPool *op, void **obj);
#endif // FFTOOLS_OBJPOOL_H
/*
* Option handlers shared between the tools.
* copyright (c) 2022 Taner Sener ( tanersener gmail com )
* Copyright (c) 2022 Taner Sener
* Copyright (c) 2023 ARTHENICA LTD
*
* This file is part of FFmpeg.
*
......@@ -24,6 +25,12 @@
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop the ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - time field in report_callback updated as double
*
* ffmpeg-kit changes by Taner Sener
*
* 09.2022
......@@ -88,7 +95,7 @@ static __thread FILE *report_file = NULL;
static __thread int report_file_level = AV_LOG_DEBUG;
extern void ffmpegkit_log_callback_function(void *ptr, int level, const char* format, va_list vargs);
extern void (*report_callback)(int, float, float, int64_t, int, double, double);
extern void (*report_callback)(int, float, float, int64_t, double, double, double);
extern __thread char *program_name;
int show_license(void *optctx, const char *opt, const char *arg)
......
/*
* Option handlers shared between the tools.
* copyright (c) 2022 Taner Sener ( tanersener gmail com )
* Copyright (c) 2022 Taner Sener
*
* This file is part of FFmpeg.
*
......
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of sync_queue.c file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
*/
#include <stdint.h>
#include <string.h>
#include "libavutil/avassert.h"
#include "libavutil/error.h"
#include "libavutil/fifo.h"
#include "libavutil/mathematics.h"
#include "libavutil/mem.h"
#include "fftools_objpool.h"
#include "fftools_sync_queue.h"
typedef struct SyncQueueStream {
AVFifo *fifo;
AVRational tb;
/* stream head: largest timestamp seen */
int64_t head_ts;
int limiting;
/* no more frames will be sent for this stream */
int finished;
uint64_t frames_sent;
uint64_t frames_max;
} SyncQueueStream;
struct SyncQueue {
enum SyncQueueType type;
/* no more frames will be sent for any stream */
int finished;
/* sync head: the stream with the _smallest_ head timestamp
* this stream determines which frames can be output */
int head_stream;
/* the finished stream with the smallest finish timestamp or -1 */
int head_finished_stream;
// maximum buffering duration in microseconds
int64_t buf_size_us;
SyncQueueStream *streams;
unsigned int nb_streams;
// pool of preallocated frames to avoid constant allocations
ObjPool *pool;
};
static void frame_move(const SyncQueue *sq, SyncQueueFrame dst,
SyncQueueFrame src)
{
if (sq->type == SYNC_QUEUE_PACKETS)
av_packet_move_ref(dst.p, src.p);
else
av_frame_move_ref(dst.f, src.f);
}
static int64_t frame_ts(const SyncQueue *sq, SyncQueueFrame frame)
{
return (sq->type == SYNC_QUEUE_PACKETS) ?
frame.p->pts + frame.p->duration :
frame.f->pts + frame.f->duration;
}
static int frame_null(const SyncQueue *sq, SyncQueueFrame frame)
{
return (sq->type == SYNC_QUEUE_PACKETS) ? (frame.p == NULL) : (frame.f == NULL);
}
static void finish_stream(SyncQueue *sq, unsigned int stream_idx)
{
SyncQueueStream *st = &sq->streams[stream_idx];
st->finished = 1;
if (st->limiting && st->head_ts != AV_NOPTS_VALUE) {
/* check if this stream is the new finished head */
if (sq->head_finished_stream < 0 ||
av_compare_ts(st->head_ts, st->tb,
sq->streams[sq->head_finished_stream].head_ts,
sq->streams[sq->head_finished_stream].tb) < 0) {
sq->head_finished_stream = stream_idx;
}
/* mark as finished all streams that should no longer receive new frames,
* due to them being ahead of some finished stream */
st = &sq->streams[sq->head_finished_stream];
for (unsigned int i = 0; i < sq->nb_streams; i++) {
SyncQueueStream *st1 = &sq->streams[i];
if (st != st1 && st1->head_ts != AV_NOPTS_VALUE &&
av_compare_ts(st->head_ts, st->tb, st1->head_ts, st1->tb) <= 0)
st1->finished = 1;
}
}
/* mark the whole queue as finished if all streams are finished */
for (unsigned int i = 0; i < sq->nb_streams; i++) {
if (!sq->streams[i].finished)
return;
}
sq->finished = 1;
}
static void queue_head_update(SyncQueue *sq)
{
if (sq->head_stream < 0) {
/* wait for one timestamp in each stream before determining
* the queue head */
for (unsigned int i = 0; i < sq->nb_streams; i++) {
SyncQueueStream *st = &sq->streams[i];
if (st->limiting && st->head_ts == AV_NOPTS_VALUE)
return;
}
// placeholder value, correct one will be found below
sq->head_stream = 0;
}
for (unsigned int i = 0; i < sq->nb_streams; i++) {
SyncQueueStream *st_head = &sq->streams[sq->head_stream];
SyncQueueStream *st_other = &sq->streams[i];
if (st_other->limiting && st_other->head_ts != AV_NOPTS_VALUE &&
av_compare_ts(st_other->head_ts, st_other->tb,
st_head->head_ts, st_head->tb) < 0)
sq->head_stream = i;
}
}
/* update this stream's head timestamp */
static void stream_update_ts(SyncQueue *sq, unsigned int stream_idx, int64_t ts)
{
SyncQueueStream *st = &sq->streams[stream_idx];
if (ts == AV_NOPTS_VALUE ||
(st->head_ts != AV_NOPTS_VALUE && st->head_ts >= ts))
return;
st->head_ts = ts;
/* if this stream is now ahead of some finished stream, then
* this stream is also finished */
if (sq->head_finished_stream >= 0 &&
av_compare_ts(sq->streams[sq->head_finished_stream].head_ts,
sq->streams[sq->head_finished_stream].tb,
ts, st->tb) <= 0)
finish_stream(sq, stream_idx);
/* update the overall head timestamp if it could have changed */
if (st->limiting &&
(sq->head_stream < 0 || sq->head_stream == stream_idx))
queue_head_update(sq);
}
/* If the queue for the given stream (or all streams when stream_idx=-1)
* is overflowing, trigger a fake heartbeat on lagging streams.
*
* @return 1 if heartbeat triggered, 0 otherwise
*/
static int overflow_heartbeat(SyncQueue *sq, int stream_idx)
{
SyncQueueStream *st;
SyncQueueFrame frame;
int64_t tail_ts = AV_NOPTS_VALUE;
/* if no stream specified, pick the one that is most ahead */
if (stream_idx < 0) {
int64_t ts = AV_NOPTS_VALUE;
for (int i = 0; i < sq->nb_streams; i++) {
st = &sq->streams[i];
if (st->head_ts != AV_NOPTS_VALUE &&
(ts == AV_NOPTS_VALUE ||
av_compare_ts(ts, sq->streams[stream_idx].tb,
st->head_ts, st->tb) < 0)) {
ts = st->head_ts;
stream_idx = i;
}
}
/* no stream has a timestamp yet -> nothing to do */
if (stream_idx < 0)
return 0;
}
st = &sq->streams[stream_idx];
/* get the chosen stream's tail timestamp */
for (size_t i = 0; tail_ts == AV_NOPTS_VALUE &&
av_fifo_peek(st->fifo, &frame, 1, i) >= 0; i++)
tail_ts = frame_ts(sq, frame);
/* overflow triggers when the tail is over specified duration behind the head */
if (tail_ts == AV_NOPTS_VALUE || tail_ts >= st->head_ts ||
av_rescale_q(st->head_ts - tail_ts, st->tb, AV_TIME_BASE_Q) < sq->buf_size_us)
return 0;
/* signal a fake timestamp for all streams that prevent tail_ts from being output */
tail_ts++;
for (unsigned int i = 0; i < sq->nb_streams; i++) {
SyncQueueStream *st1 = &sq->streams[i];
int64_t ts;
if (st == st1 || st1->finished ||
(st1->head_ts != AV_NOPTS_VALUE &&
av_compare_ts(tail_ts, st->tb, st1->head_ts, st1->tb) <= 0))
continue;
ts = av_rescale_q(tail_ts, st->tb, st1->tb);
if (st1->head_ts != AV_NOPTS_VALUE)
ts = FFMAX(st1->head_ts + 1, ts);
stream_update_ts(sq, i, ts);
}
return 1;
}
int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
{
SyncQueueStream *st;
SyncQueueFrame dst;
int64_t ts;
int ret;
av_assert0(stream_idx < sq->nb_streams);
st = &sq->streams[stream_idx];
av_assert0(st->tb.num > 0 && st->tb.den > 0);
if (frame_null(sq, frame)) {
finish_stream(sq, stream_idx);
return 0;
}
if (st->finished)
return AVERROR_EOF;
ret = objpool_get(sq->pool, (void**)&dst);
if (ret < 0)
return ret;
frame_move(sq, dst, frame);
ts = frame_ts(sq, dst);
ret = av_fifo_write(st->fifo, &dst, 1);
if (ret < 0) {
frame_move(sq, frame, dst);
objpool_release(sq->pool, (void**)&dst);
return ret;
}
stream_update_ts(sq, stream_idx, ts);
st->frames_sent++;
if (st->frames_sent >= st->frames_max)
finish_stream(sq, stream_idx);
return 0;
}
static int receive_for_stream(SyncQueue *sq, unsigned int stream_idx,
SyncQueueFrame frame)
{
SyncQueueStream *st_head = sq->head_stream >= 0 ?
&sq->streams[sq->head_stream] : NULL;
SyncQueueStream *st;
av_assert0(stream_idx < sq->nb_streams);
st = &sq->streams[stream_idx];
if (av_fifo_can_read(st->fifo)) {
SyncQueueFrame peek;
int64_t ts;
int cmp = 1;
av_fifo_peek(st->fifo, &peek, 1, 0);
ts = frame_ts(sq, peek);
/* check if this stream's tail timestamp does not overtake
* the overall queue head */
if (ts != AV_NOPTS_VALUE && st_head)
cmp = av_compare_ts(ts, st->tb, st_head->head_ts, st_head->tb);
/* We can release frames that do not end after the queue head.
* Frames with no timestamps are just passed through with no conditions.
*/
if (cmp <= 0 || ts == AV_NOPTS_VALUE) {
frame_move(sq, frame, peek);
objpool_release(sq->pool, (void**)&peek);
av_fifo_drain2(st->fifo, 1);
return 0;
}
}
return (sq->finished || (st->finished && !av_fifo_can_read(st->fifo))) ?
AVERROR_EOF : AVERROR(EAGAIN);
}
static int receive_internal(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
{
int nb_eof = 0;
int ret;
/* read a frame for a specific stream */
if (stream_idx >= 0) {
ret = receive_for_stream(sq, stream_idx, frame);
return (ret < 0) ? ret : stream_idx;
}
/* read a frame for any stream with available output */
for (unsigned int i = 0; i < sq->nb_streams; i++) {
ret = receive_for_stream(sq, i, frame);
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
nb_eof += (ret == AVERROR_EOF);
continue;
}
return (ret < 0) ? ret : i;
}
return (nb_eof == sq->nb_streams) ? AVERROR_EOF : AVERROR(EAGAIN);
}
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
{
int ret = receive_internal(sq, stream_idx, frame);
/* try again if the queue overflowed and triggered a fake heartbeat
* for lagging streams */
if (ret == AVERROR(EAGAIN) && overflow_heartbeat(sq, stream_idx))
ret = receive_internal(sq, stream_idx, frame);
return ret;
}
int sq_add_stream(SyncQueue *sq, int limiting)
{
SyncQueueStream *tmp, *st;
tmp = av_realloc_array(sq->streams, sq->nb_streams + 1, sizeof(*sq->streams));
if (!tmp)
return AVERROR(ENOMEM);
sq->streams = tmp;
st = &sq->streams[sq->nb_streams];
memset(st, 0, sizeof(*st));
st->fifo = av_fifo_alloc2(1, sizeof(SyncQueueFrame), AV_FIFO_FLAG_AUTO_GROW);
if (!st->fifo)
return AVERROR(ENOMEM);
/* we set a valid default, so that a pathological stream that never
* receives even a real timebase (and no frames) won't stall all other
* streams forever; cf. overflow_heartbeat() */
st->tb = (AVRational){ 1, 1 };
st->head_ts = AV_NOPTS_VALUE;
st->frames_max = UINT64_MAX;
st->limiting = limiting;
return sq->nb_streams++;
}
void sq_set_tb(SyncQueue *sq, unsigned int stream_idx, AVRational tb)
{
SyncQueueStream *st;
av_assert0(stream_idx < sq->nb_streams);
st = &sq->streams[stream_idx];
av_assert0(!av_fifo_can_read(st->fifo));
if (st->head_ts != AV_NOPTS_VALUE)
st->head_ts = av_rescale_q(st->head_ts, st->tb, tb);
st->tb = tb;
}
void sq_limit_frames(SyncQueue *sq, unsigned int stream_idx, uint64_t frames)
{
SyncQueueStream *st;
av_assert0(stream_idx < sq->nb_streams);
st = &sq->streams[stream_idx];
st->frames_max = frames;
if (st->frames_sent >= st->frames_max)
finish_stream(sq, stream_idx);
}
SyncQueue *sq_alloc(enum SyncQueueType type, int64_t buf_size_us)
{
SyncQueue *sq = av_mallocz(sizeof(*sq));
if (!sq)
return NULL;
sq->type = type;
sq->buf_size_us = buf_size_us;
sq->head_stream = -1;
sq->head_finished_stream = -1;
sq->pool = (type == SYNC_QUEUE_PACKETS) ? objpool_alloc_packets() :
objpool_alloc_frames();
if (!sq->pool) {
av_freep(&sq);
return NULL;
}
return sq;
}
void sq_free(SyncQueue **psq)
{
SyncQueue *sq = *psq;
if (!sq)
return;
for (unsigned int i = 0; i < sq->nb_streams; i++) {
SyncQueueFrame frame;
while (av_fifo_read(sq->streams[i].fifo, &frame, 1) >= 0)
objpool_release(sq->pool, (void**)&frame);
av_fifo_freep2(&sq->streams[i].fifo);
}
av_freep(&sq->streams);
objpool_free(&sq->pool);
av_freep(psq);
}
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of sync_queue.h file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*/
#ifndef FFTOOLS_SYNC_QUEUE_H
#define FFTOOLS_SYNC_QUEUE_H
#include <stdint.h>
#include "libavcodec/packet.h"
#include "libavutil/frame.h"
enum SyncQueueType {
SYNC_QUEUE_PACKETS,
SYNC_QUEUE_FRAMES,
};
typedef union SyncQueueFrame {
AVFrame *f;
AVPacket *p;
} SyncQueueFrame;
#define SQFRAME(frame) ((SyncQueueFrame){ .f = (frame) })
#define SQPKT(pkt) ((SyncQueueFrame){ .p = (pkt) })
typedef struct SyncQueue SyncQueue;
/**
* Allocate a sync queue of the given type.
*
* @param buf_size_us maximum duration that will be buffered in microseconds
*/
SyncQueue *sq_alloc(enum SyncQueueType type, int64_t buf_size_us);
void sq_free(SyncQueue **sq);
/**
* Add a new stream to the sync queue.
*
* @param limiting whether the stream is limiting, i.e. no other stream can be
* longer than this one
* @return
* - a non-negative stream index on success
* - a negative error code on error
*/
int sq_add_stream(SyncQueue *sq, int limiting);
/**
* Set the timebase for the stream with index stream_idx. Should be called
* before sending any frames for this stream.
*/
void sq_set_tb(SyncQueue *sq, unsigned int stream_idx, AVRational tb);
/**
* Limit the number of output frames for stream with index stream_idx
* to max_frames.
*/
void sq_limit_frames(SyncQueue *sq, unsigned int stream_idx,
uint64_t max_frames);
/**
* Submit a frame for the stream with index stream_idx.
*
* On success, the sync queue takes ownership of the frame and will reset the
* contents of the supplied frame. On failure, the frame remains owned by the
* caller.
*
* Sending a frame with NULL contents marks the stream as finished.
*
* @return
* - 0 on success
* - AVERROR_EOF when no more frames should be submitted for this stream
* - another a negative error code on failure
*/
int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame);
/**
* Read a frame from the queue.
*
* @param stream_idx index of the stream to read a frame for. May be -1, then
* try to read a frame from any stream that is ready for
* output.
* @param frame output frame will be written here on success. The frame is owned
* by the caller.
*
* @return
* - a non-negative index of the stream to which the returned frame belongs
* - AVERROR(EAGAIN) when more frames need to be submitted to the queue
* - AVERROR_EOF when no more frames will be available for this stream (for any
* stream if stream_idx is -1)
* - another negative error code on failure
*/
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame);
#endif // FFTOOLS_SYNC_QUEUE_H
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of thread_queue.c file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
* - fftools header names updated
*/
#include <stdint.h>
#include <string.h>
#include "libavutil/avassert.h"
#include "libavutil/error.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/thread.h"
#include "fftools_objpool.h"
#include "fftools_thread_queue.h"
enum {
FINISHED_SEND = (1 << 0),
FINISHED_RECV = (1 << 1),
};
typedef struct FifoElem {
void *obj;
unsigned int stream_idx;
} FifoElem;
struct ThreadQueue {
int *finished;
unsigned int nb_streams;
AVFifo *fifo;
ObjPool *obj_pool;
void (*obj_move)(void *dst, void *src);
pthread_mutex_t lock;
pthread_cond_t cond;
};
void tq_free(ThreadQueue **ptq)
{
ThreadQueue *tq = *ptq;
if (!tq)
return;
if (tq->fifo) {
FifoElem elem;
while (av_fifo_read(tq->fifo, &elem, 1) >= 0)
objpool_release(tq->obj_pool, &elem.obj);
}
av_fifo_freep2(&tq->fifo);
objpool_free(&tq->obj_pool);
av_freep(&tq->finished);
pthread_cond_destroy(&tq->cond);
pthread_mutex_destroy(&tq->lock);
av_freep(ptq);
}
ThreadQueue *tq_alloc(unsigned int nb_streams, size_t queue_size,
ObjPool *obj_pool, void (*obj_move)(void *dst, void *src))
{
ThreadQueue *tq;
int ret;
tq = av_mallocz(sizeof(*tq));
if (!tq)
return NULL;
ret = pthread_cond_init(&tq->cond, NULL);
if (ret) {
av_freep(&tq);
return NULL;
}
ret = pthread_mutex_init(&tq->lock, NULL);
if (ret) {
pthread_cond_destroy(&tq->cond);
av_freep(&tq);
return NULL;
}
tq->finished = av_calloc(nb_streams, sizeof(*tq->finished));
if (!tq->finished)
goto fail;
tq->nb_streams = nb_streams;
tq->fifo = av_fifo_alloc2(queue_size, sizeof(FifoElem), 0);
if (!tq->fifo)
goto fail;
tq->obj_pool = obj_pool;
tq->obj_move = obj_move;
return tq;
fail:
tq_free(&tq);
return NULL;
}
int tq_send(ThreadQueue *tq, unsigned int stream_idx, void *data)
{
int *finished;
int ret;
av_assert0(stream_idx < tq->nb_streams);
finished = &tq->finished[stream_idx];
pthread_mutex_lock(&tq->lock);
if (*finished & FINISHED_SEND) {
ret = AVERROR(EINVAL);
goto finish;
}
while (!(*finished & FINISHED_RECV) && !av_fifo_can_write(tq->fifo))
pthread_cond_wait(&tq->cond, &tq->lock);
if (*finished & FINISHED_RECV) {
ret = AVERROR_EOF;
*finished |= FINISHED_SEND;
} else {
FifoElem elem = { .stream_idx = stream_idx };
ret = objpool_get(tq->obj_pool, &elem.obj);
if (ret < 0)
goto finish;
tq->obj_move(elem.obj, data);
ret = av_fifo_write(tq->fifo, &elem, 1);
av_assert0(ret >= 0);
pthread_cond_broadcast(&tq->cond);
}
finish:
pthread_mutex_unlock(&tq->lock);
return ret;
}
static int receive_locked(ThreadQueue *tq, int *stream_idx,
void *data)
{
FifoElem elem;
unsigned int nb_finished = 0;
if (av_fifo_read(tq->fifo, &elem, 1) >= 0) {
tq->obj_move(data, elem.obj);
objpool_release(tq->obj_pool, &elem.obj);
*stream_idx = elem.stream_idx;
return 0;
}
for (unsigned int i = 0; i < tq->nb_streams; i++) {
if (!(tq->finished[i] & FINISHED_SEND))
continue;
/* return EOF to the consumer at most once for each stream */
if (!(tq->finished[i] & FINISHED_RECV)) {
tq->finished[i] |= FINISHED_RECV;
*stream_idx = i;
return AVERROR_EOF;
}
nb_finished++;
}
return nb_finished == tq->nb_streams ? AVERROR_EOF : AVERROR(EAGAIN);
}
int tq_receive(ThreadQueue *tq, int *stream_idx, void *data)
{
int ret;
*stream_idx = -1;
pthread_mutex_lock(&tq->lock);
while (1) {
ret = receive_locked(tq, stream_idx, data);
if (ret == AVERROR(EAGAIN)) {
pthread_cond_wait(&tq->cond, &tq->lock);
continue;
}
break;
}
if (ret == 0)
pthread_cond_broadcast(&tq->cond);
pthread_mutex_unlock(&tq->lock);
return ret;
}
void tq_send_finish(ThreadQueue *tq, unsigned int stream_idx)
{
av_assert0(stream_idx < tq->nb_streams);
pthread_mutex_lock(&tq->lock);
/* mark the stream as send-finished;
* next time the consumer thread tries to read this stream it will get
* an EOF and recv-finished flag will be set */
tq->finished[stream_idx] |= FINISHED_SEND;
pthread_cond_broadcast(&tq->cond);
pthread_mutex_unlock(&tq->lock);
}
void tq_receive_finish(ThreadQueue *tq, unsigned int stream_idx)
{
av_assert0(stream_idx < tq->nb_streams);
pthread_mutex_lock(&tq->lock);
/* mark the stream as recv-finished;
* next time the producer thread tries to send for this stream, it will
* get an EOF and send-finished flag will be set */
tq->finished[stream_idx] |= FINISHED_RECV;
pthread_cond_broadcast(&tq->cond);
pthread_mutex_unlock(&tq->lock);
}
/*
* This file is part of FFmpeg.
* Copyright (c) 2023 ARTHENICA LTD
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* This file is the modified version of thread_queue.h file living in ffmpeg source code under the fftools folder. We
* manually update it each time we depend on a new ffmpeg version. Below you can see the list of changes applied
* by us to develop ffmpeg-kit library.
*
* ffmpeg-kit changes by ARTHENICA LTD
*
* 07.2023
* --------------------------------------------------------
* - FFmpeg 6.0 changes migrated
*/
#ifndef FFTOOLS_THREAD_QUEUE_H
#define FFTOOLS_THREAD_QUEUE_H
#include <string.h>
#include "fftools_objpool.h"
typedef struct ThreadQueue ThreadQueue;
/**
* Allocate a queue for sending data between threads.
*
* @param nb_streams number of streams for which a distinct EOF state is
* maintained
* @param queue_size number of items that can be stored in the queue without
* blocking
* @param obj_pool object pool that will be used to allocate items stored in the
* queue; the pool becomes owned by the queue
* @param callback that moves the contents between two data pointers
*/
ThreadQueue *tq_alloc(unsigned int nb_streams, size_t queue_size,
ObjPool *obj_pool, void (*obj_move)(void *dst, void *src));
void tq_free(ThreadQueue **tq);
/**
* Send an item for the given stream to the queue.
*
* @param data the item to send, its contents will be moved using the callback
* provided to tq_alloc(); on failure the item will be left
* untouched
* @return
* - 0 the item was successfully sent
* - AVERROR(ENOMEM) could not allocate an item for writing to the FIFO
* - AVERROR(EINVAL) the sending side has previously been marked as finished
* - AVERROR_EOF the receiving side has marked the given stream as finished
*/
int tq_send(ThreadQueue *tq, unsigned int stream_idx, void *data);
/**
* Mark the given stream finished from the sending side.
*/
void tq_send_finish(ThreadQueue *tq, unsigned int stream_idx);
/**
* Read the next item from the queue.
*
* @param stream_idx the index of the stream that was processed or -1 will be
* written here
* @param data the data item will be written here on success using the
* callback provided to tq_alloc()
* @return
* - 0 a data item was successfully read; *stream_idx contains a non-negative
* stream index
* - AVERROR_EOF When *stream_idx is non-negative, this signals that the sending
* side has marked the given stream as finished. This will happen at most once
* for each stream. When *stream_idx is -1, all streams are done.
*/
int tq_receive(ThreadQueue *tq, int *stream_idx, void *data);
/**
* Mark the given stream finished from the receiving side.
*/
void tq_receive_finish(ThreadQueue *tq, unsigned int stream_idx);
#endif // FFTOOLS_THREAD_QUEUE_H
......@@ -144,8 +144,6 @@ public class FFmpegKitConfig {
NativeLoader.loadFFmpegKit(nativeFFmpegTriedAndFailed);
android.util.Log.i(FFmpegKitConfig.TAG, String.format("Loaded ffmpeg-kit-%s-%s-%s-%s.", NativeLoader.loadPackageName(), NativeLoader.loadAbi(), NativeLoader.loadVersion(), NativeLoader.loadBuildDate()));
uniqueIdGenerator = new AtomicInteger(1);
/* NATIVE LOG LEVEL IS RECEIVED ONLY ON STARTUP */
......@@ -175,7 +173,7 @@ public class FFmpegKitConfig {
safFileDescriptorMap = new SparseArray<>();
globalLogRedirectionStrategy = LogRedirectionStrategy.PRINT_LOGS_WHEN_NO_CALLBACKS_DEFINED;
NativeLoader.enableRedirection();
android.util.Log.i(FFmpegKitConfig.TAG, String.format("Loaded ffmpeg-kit-%s-%s-%s-%s.", NativeLoader.loadPackageName(), NativeLoader.loadAbi(), NativeLoader.loadVersion(), NativeLoader.loadBuildDate()));
}
/**
......@@ -336,7 +334,7 @@ public class FFmpegKitConfig {
*/
private static void statistics(final long sessionId, final int videoFrameNumber,
final float videoFps, final float videoQuality, final long size,
final int time, final double bitrate, final double speed) {
final double time, final double bitrate, final double speed) {
final Statistics statistics = new Statistics(sessionId, videoFrameNumber, videoFps, videoQuality, size, time, bitrate, speed);
final Session session = getSession(sessionId);
......
......@@ -36,7 +36,7 @@ public class NativeLoader {
static final String[] FFMPEG_LIBRARIES = {"avutil", "swscale", "swresample", "avcodec", "avformat", "avfilter", "avdevice"};
static final String[] LIBRARIES_LINKED_WITH_CXX = {"chromaprint", "openh264", "rubberband", "snappy", "srt", "tesseract", "x265", "zimg"};
static final String[] LIBRARIES_LINKED_WITH_CXX = {"chromaprint", "openh264", "rubberband", "snappy", "srt", "tesseract", "x265", "zimg", "libilbc"};
static boolean isTestModeDisabled() {
return (System.getProperty("enable.ffmpeg.kit.test.mode") == null);
......@@ -85,7 +85,7 @@ public class NativeLoader {
}
static String loadVersion() {
final String version = "5.1";
final String version = "6.0";
if (isTestModeDisabled()) {
return FFmpegKitConfig.getVersion();
......
......@@ -28,11 +28,11 @@ public class Statistics {
private float videoFps;
private float videoQuality;
private long size;
private int time;
private double time;
private double bitrate;
private double speed;
public Statistics(final long sessionId, final int videoFrameNumber, final float videoFps, final float videoQuality, final long size, final int time, final double bitrate, final double speed) {
public Statistics(final long sessionId, final int videoFrameNumber, final float videoFps, final float videoQuality, final long size, final double time, final double bitrate, final double speed) {
this.sessionId = sessionId;
this.videoFrameNumber = videoFrameNumber;
this.videoFps = videoFps;
......@@ -83,11 +83,11 @@ public class Statistics {
this.size = size;
}
public int getTime() {
public double getTime() {
return time;
}
public void setTime(int time) {
public void setTime(double time) {
this.time = time;
}
......
......@@ -69,7 +69,7 @@ include $(BUILD_SHARED_LIBRARY)
$(call import-module, cpu-features)
MY_SRC_FILES := ffmpegkit.c ffprobekit.c ffmpegkit_exception.c fftools_cmdutils.c fftools_ffmpeg.c fftools_ffprobe.c fftools_ffmpeg_mux.c fftools_ffmpeg_opt.c fftools_opt_common.c fftools_ffmpeg_hw.c fftools_ffmpeg_filter.c
MY_SRC_FILES := ffmpegkit.c ffprobekit.c ffmpegkit_exception.c fftools_cmdutils.c fftools_ffmpeg.c fftools_ffprobe.c fftools_ffmpeg_mux.c fftools_ffmpeg_mux_init.c fftools_ffmpeg_demux.c fftools_ffmpeg_opt.c fftools_opt_common.c fftools_ffmpeg_hw.c fftools_ffmpeg_filter.c fftools_objpool.c fftools_sync_queue.c fftools_thread_queue.c
ifeq ($(TARGET_PLATFORM),android-16)
MY_SRC_FILES += android_lts_support.c
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论