aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordec05eba <dec05eba@protonmail.com>2022-11-24 20:43:25 +0100
committerdec05eba <dec05eba@protonmail.com>2022-11-24 20:43:25 +0100
commit397d3cb91968fdd7f9a4cf2ac74b4d8a4e5a208e (patch)
tree926eb92879830ff47564b95728a0e1a22dcf1162
parent152ae1b7b4ae5681cddfa8fd208a84ebd3dba235 (diff)
Make -c optional, select container format from file extension by default
-rw-r--r--src/main.cpp73
1 files changed, 41 insertions, 32 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 85e25d2..aac777e 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -932,12 +932,12 @@ static void close_video(AVStream *video_stream, AVFrame *frame) {
}
static void usage() {
- fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> -c <container_format> -f <fps> [-a <audio_input>...] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n");
+ fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> [-c <container_format>] -f <fps> [-a <audio_input>...] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n");
fprintf(stderr, "OPTIONS:\n");
fprintf(stderr, " -w Window to record or a display, \"screen\" or \"screen-direct\". The display is the display name in xrandr and if \"screen\" or \"screen-direct\" is selected then all displays are recorded and they are recorded in h265 (aka hevc)."
"\"screen-direct\" skips one texture copy for fullscreen applications so it may lead to better performance and it works with VRR monitors when recording fullscreen application but may break some applications, such as mpv in fullscreen mode. Recording a display requires a gpu with NvFBC support.\n");
fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. Usually you want to set this to the size of the window. Optional, by default the size of the window (which is passed to -w). This option is only supported when recording a window, not a screen/monitor.\n");
- fprintf(stderr, " -c Container format for output file, for example mp4, or flv.\n");
+ fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode. If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
fprintf(stderr, " -f Framerate to record at.\n");
fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device. A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\". Optional, no audio track is added by default.\n");
fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra'. 'high' is the recommended option when live streaming or when you have a slower harddrive. Optional, set to 'very_high' be default.\n");
@@ -1032,7 +1032,7 @@ static std::future<void> save_replay_thread;
static std::vector<AVPacket> save_replay_packets;
static std::string save_replay_output_filepath;
-static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, const std::deque<AVPacket> &frame_data_queue, bool frames_erased, std::string output_dir, std::string container_format, std::mutex &write_output_mutex) {
+static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, const std::deque<AVPacket> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex) {
if(save_replay_thread.valid())
return;
@@ -1075,11 +1075,10 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
}
}
- save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + container_format;
+ save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
save_replay_thread = std::async(std::launch::async, [video_stream_index, container_format, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks]() mutable {
AVFormatContext *av_format_context;
- // The output format is automatically guessed from the file extension
- avformat_alloc_output_context2(&av_format_context, nullptr, container_format.c_str(), nullptr);
+ avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
av_format_context->flags |= AVFMT_FLAG_GENPTS;
av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
@@ -1174,7 +1173,7 @@ int main(int argc, char **argv) {
std::map<std::string, Arg> args = {
{ "-w", Arg { {}, false, false } },
//{ "-s", Arg { nullptr, true } },
- { "-c", Arg { {}, false, false } },
+ { "-c", Arg { {}, true, false } },
{ "-f", Arg { {}, false, false } },
{ "-s", Arg { {}, true, false } },
{ "-a", Arg { {}, true, true } },
@@ -1397,6 +1396,11 @@ int main(int argc, char **argv) {
const char *filename = args["-o"].value();
if(filename) {
if(replay_buffer_size_secs != -1) {
+ if(!container_format) {
+ fprintf(stderr, "Error: option -c is required when using option -r\n");
+ usage();
+ }
+
struct stat buf;
if(stat(filename, &buf) == -1 || !S_ISDIR(buf.st_mode)) {
fprintf(stderr, "Error: directory \"%s\" does not exist or is not a directory\n", filename);
@@ -1407,7 +1411,12 @@ int main(int argc, char **argv) {
if(replay_buffer_size_secs == -1) {
filename = "/dev/stdout";
} else {
- fprintf(stderr, "Option -o is required when using option -r\n");
+ fprintf(stderr, "Error: Option -o is required when using option -r\n");
+ usage();
+ }
+
+ if(!container_format) {
+ fprintf(stderr, "Error: option -c is required when not using option -o\n");
usage();
}
}
@@ -1494,6 +1503,25 @@ int main(int argc, char **argv) {
if(!gl_loaded)
gl.unload();
+ AVFormatContext *av_format_context;
+ // The output format is automatically guessed by the file extension
+ avformat_alloc_output_context2(&av_format_context, nullptr, container_format, filename);
+ if (!av_format_context) {
+ fprintf(stderr, "Error: Failed to deduce container format from file extension\n");
+ return 1;
+ }
+
+ av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+ av_format_context->flags |= AVFMT_FLAG_GENPTS;
+ const AVOutputFormat *output_format = av_format_context->oformat;
+
+ std::string file_extension = output_format->extensions;
+ {
+ size_t comma_index = file_extension.find(',');
+ if(comma_index != std::string::npos)
+ file_extension = file_extension.substr(0, comma_index);
+ }
+
if(strcmp(codec_to_use, "auto") == 0) {
const AVCodec *h265_codec = find_h265_encoder();
@@ -1516,7 +1544,7 @@ int main(int argc, char **argv) {
}
//bool use_hevc = strcmp(window_str, "screen") == 0 || strcmp(window_str, "screen-direct") == 0;
- if(video_codec != VideoCodec::H264 && strcmp(container_format, "flv") == 0) {
+ if(video_codec != VideoCodec::H264 && strcmp(file_extension.c_str(), "flv") == 0) {
video_codec = VideoCodec::H264;
fprintf(stderr, "Warning: h265 is not compatible with flv, falling back to h264 instead.\n");
}
@@ -1536,22 +1564,6 @@ int main(int argc, char **argv) {
exit(2);
}
- // Video start
- AVFormatContext *av_format_context;
- // The output format is automatically guessed by the file extension
- avformat_alloc_output_context2(&av_format_context, nullptr, container_format,
- nullptr);
- if (!av_format_context) {
- fprintf(
- stderr,
- "Error: Failed to deduce output format from file extension\n");
- return 1;
- }
-
- av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
- av_format_context->flags |= AVFMT_FLAG_GENPTS;
- const AVOutputFormat *output_format = av_format_context->oformat;
-
const bool is_livestream = is_livestream_path(filename);
// (Some?) livestreaming services require at least one audio track to work.
// If not audio is provided then create one silent audio track.
@@ -1654,7 +1666,6 @@ int main(int argc, char **argv) {
double window_resize_timer = start_time;
bool window_resized = false;
int fps_counter = 0;
- int current_fps = 30;
AVFrame *frame = av_frame_alloc();
if (!frame) {
@@ -1912,7 +1923,6 @@ int main(int argc, char **argv) {
if (elapsed >= 1.0) {
fprintf(stderr, "update fps: %d\n", fps_counter);
start_time = time_now;
- current_fps = fps_counter;
fps_counter = 0;
}
@@ -2042,15 +2052,12 @@ int main(int argc, char **argv) {
if(save_replay_thread.valid() && save_replay_thread.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
save_replay_thread.get();
puts(save_replay_output_filepath.c_str());
- for(size_t i = 0; i < save_replay_packets.size(); ++i) {
- av_packet_unref(&save_replay_packets[i]);
- }
save_replay_packets.clear();
}
if(save_replay == 1 && !save_replay_thread.valid() && replay_buffer_size_secs != -1) {
save_replay = 0;
- save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, write_output_mutex);
+ save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex);
}
// av_frame_free(&frame);
@@ -2063,8 +2070,10 @@ int main(int argc, char **argv) {
running = 0;
- if(save_replay_thread.valid())
+ if(save_replay_thread.valid()) {
save_replay_thread.get();
+ puts(save_replay_output_filepath.c_str());
+ }
for(AudioTrack &audio_track : audio_tracks) {
audio_track.thread.join();