aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md2
-rw-r--r--TODO1
-rw-r--r--extra/gpu-screen-recorder.env2
-rw-r--r--src/main.cpp43
4 files changed, 31 insertions, 17 deletions
diff --git a/README.md b/README.md
index 52cb1ee..508460b 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ This software works with x11 and wayland, but when using Wayland then only monit
4) FLAC audio codec is disabled at the moment because of temporary issues.
### AMD/Intel/Wayland root permission
When recording a window under AMD/Intel no special user permission is required, however when recording a monitor (or when using wayland) the program needs root permission (to access KMS).\
-To make this safer, the part that needs root access has been moved to its own executable (to make it as small as possible).\
+This is safe in GPU Screen Recorder as the part that needs root access has been moved to its own small program that only does one thing.\
For you as a user this only means that if you installed GPU Screen Recorder as a flatpak then a prompt asking for root password will show up when you start recording.
# Performance
On a system with a i5 4690k CPU and a GTX 1080 GPU:\
diff --git a/TODO b/TODO
index 39c8c54..7301a03 100644
--- a/TODO
+++ b/TODO
@@ -133,6 +133,7 @@ Add 10-bit capture option. This is good because it reduces banding and quality i
Enable b-frames.
Support vfr matching games exact fps all the time. On x11 use damage tracking, on wayland? maybe there is drm plane damage tracking. But that may not be accurate as the compositor may update it every monitor hz anyways. On wayland maybe only support it for desktop portal + pipewire capture.
+ Another method to track damage that works regardless of the display server would be to do a diff between frames with a shader.
Support selecting which gpu to use. This can be done in egl with eglQueryDevicesEXT and then eglGetPlatformDisplayEXT. This will automatically work on AMD and Intel as vaapi uses the same device. On nvidia we need to use eglQueryDeviceAttribEXT with EGL_CUDA_DEVICE_NV.
Maybe on glx (nvidia x11 nvfbc) we need to use __NV_PRIME_RENDER_OFFLOAD_PROVIDER and __GLX_VENDOR_LIBRARY_NAME instead.
diff --git a/extra/gpu-screen-recorder.env b/extra/gpu-screen-recorder.env
index e776bd6..ce9f223 100644
--- a/extra/gpu-screen-recorder.env
+++ b/extra/gpu-screen-recorder.env
@@ -1,7 +1,7 @@
WINDOW=screen
CONTAINER=mp4
QUALITY=very_high
-CODEC=hevc
+CODEC=h264
AUDIO_CODEC=opus
AUDIO_DEVICE=alsa_output.pci-0000_0a_00.4.iec958-stereo.monitor
SECONDARY_AUDIO_DEVICE=alsa_input.some-mic.mono-fallback
diff --git a/src/main.cpp b/src/main.cpp
index bd4be62..1d479fe 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -328,7 +328,7 @@ static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_code
static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
VideoQuality video_quality,
- int fps, const AVCodec *codec, bool is_livestream, gsr_gpu_vendor vendor, FramerateMode framerate_mode,
+ int fps, const AVCodec *codec, bool low_latency, gsr_gpu_vendor vendor, FramerateMode framerate_mode,
bool hdr, gsr_color_range color_range, float keyint) {
AVCodecContext *codec_context = avcodec_alloc_context3(codec);
@@ -347,14 +347,14 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
codec_context->framerate.den = 1;
codec_context->sample_aspect_ratio.num = 0;
codec_context->sample_aspect_ratio.den = 0;
- // High values reduce file size but increases time it takes to seek
- if(is_livestream) {
+ if(low_latency) {
codec_context->flags |= (AV_CODEC_FLAG_CLOSED_GOP | AV_CODEC_FLAG_LOW_DELAY);
codec_context->flags2 |= AV_CODEC_FLAG2_FAST;
//codec_context->gop_size = std::numeric_limits<int>::max();
//codec_context->keyint_min = std::numeric_limits<int>::max();
codec_context->gop_size = fps * keyint;
} else {
+ // High values reduce file size but increases time it takes to seek
codec_context->gop_size = fps * keyint;
}
codec_context->max_b_frames = 0;
@@ -824,7 +824,7 @@ static void open_video(AVCodecContext *codec_context, VideoQuality video_quality
static void usage_header() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
- fprintf(stderr, "usage: %s -w <window_id|monitor|focused> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|hevc_hdr|av1|av1_hdr] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-cr limited|full] [-v yes|no] [-h|--help] [-o <output_file>] [-mf yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>]\n", program_name);
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|hevc_hdr|av1|av1_hdr] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-cr limited|full] [-mf yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-o <output_file>] [-v yes|no] [-h|--help]\n", program_name);
}
static void usage_full() {
@@ -887,11 +887,6 @@ static void usage_full() {
fprintf(stderr, " Limited color range means that colors are in range 16-235 (4112-60395 for hdr) while full color range means that colors are in range 0-255 (0-65535 for hdr).\n");
fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -v Prints per second, fps updates. Optional, set to 'yes' by default.\n");
- fprintf(stderr, "\n");
- fprintf(stderr, " -h, --help\n");
- fprintf(stderr, " Show this help.\n");
- fprintf(stderr, "\n");
fprintf(stderr, " -mf Organise replays in folders based on the current date.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -sc Run a script on the saved video file (non-blocking). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
@@ -913,6 +908,11 @@ static void usage_full() {
fprintf(stderr, " In replay mode this has to be a directory instead of a file.\n");
fprintf(stderr, " The directory to the file is created (recursively) if it doesn't already exist.\n");
fprintf(stderr, "\n");
+ fprintf(stderr, " -v Prints per second, fps updates. Optional, set to 'yes' by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -h, --help\n");
+ fprintf(stderr, " Show this help.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, "NOTES:\n");
fprintf(stderr, " Send signal SIGINT to gpu-screen-recorder (Ctrl+C, or killall -SIGINT gpu-screen-recorder) to stop and save the recording. When in replay mode this stops recording without saving.\n");
fprintf(stderr, " Send signal SIGUSR1 to gpu-screen-recorder (killall -SIGUSR1 gpu-screen-recorder) to save a replay (when in replay mode).\n");
@@ -1298,6 +1298,14 @@ static bool is_livestream_path(const char *str) {
return true;
else if((len >= 7 && memcmp(str, "rtmp://", 7) == 0) || (len >= 8 && memcmp(str, "rtmps://", 8) == 0))
return true;
+ else if((len >= 7 && memcmp(str, "rtsp://", 7) == 0))
+ return true;
+ else if((len >= 6 && memcmp(str, "srt://", 6) == 0))
+ return true;
+ else if((len >= 6 && memcmp(str, "tcp://", 6) == 0))
+ return true;
+ else if((len >= 6 && memcmp(str, "udp://", 6) == 0))
+ return true;
else
return false;
}
@@ -1736,7 +1744,7 @@ int main(int argc, char **argv) {
}
}
- VideoCodec video_codec = VideoCodec::HEVC;
+ VideoCodec video_codec = VideoCodec::H264;
const char *video_codec_to_use = args["-k"].value();
if(!video_codec_to_use)
video_codec_to_use = "auto";
@@ -2122,14 +2130,18 @@ int main(int argc, char **argv) {
}
}
+ const bool is_output_piped = strcmp(filename, "/dev/stdout") == 0;
+
AVFormatContext *av_format_context;
// The output format is automatically guessed by the file extension
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, filename);
if (!av_format_context) {
- if(container_format)
+ if(container_format) {
fprintf(stderr, "Error: Container format '%s' (argument -c) is not valid\n", container_format);
- else
- fprintf(stderr, "Error: Failed to deduce container format from file extension\n");
+ } else {
+ fprintf(stderr, "Error: Failed to deduce container format from file extension. Use the '-c' option to specify container format\n");
+ usage();
+ }
_exit(1);
}
@@ -2142,7 +2154,7 @@ int main(int argc, char **argv) {
file_extension = file_extension.substr(0, comma_index);
}
- const bool force_no_audio_offset = is_livestream || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm");
+ const bool force_no_audio_offset = is_livestream || is_output_piped || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm");
switch(audio_codec) {
case AudioCodec::AAC: {
@@ -2322,8 +2334,9 @@ int main(int argc, char **argv) {
AVStream *video_stream = nullptr;
std::vector<AudioTrack> audio_tracks;
const bool hdr = video_codec_is_hdr(video_codec);
+ const bool low_latency_recording = is_livestream || is_output_piped;
- AVCodecContext *video_codec_context = create_video_codec_context(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI, quality, fps, video_codec_f, is_livestream, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint);
+ AVCodecContext *video_codec_context = create_video_codec_context(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI, quality, fps, video_codec_f, low_latency_recording, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint);
if(replay_buffer_size_secs == -1)
video_stream = create_stream(av_format_context, video_codec_context);