aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp3313
1 files changed, 1976 insertions, 1337 deletions
diff --git a/src/main.cpp b/src/main.cpp
index aac777e..d00a9be 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1,19 +1,13 @@
-/*
- Copyright (C) 2020 dec05eba
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-*/
+extern "C" {
+#include "../include/capture/nvfbc.h"
+#include "../include/capture/xcomposite_cuda.h"
+#include "../include/capture/xcomposite_vaapi.h"
+#include "../include/capture/kms_vaapi.h"
+#include "../include/capture/kms_cuda.h"
+#include "../include/egl.h"
+#include "../include/utils.h"
+#include "../include/color_conversion.h"
+}
#include <assert.h>
#include <stdio.h>
@@ -26,45 +20,50 @@
#include <map>
#include <signal.h>
#include <sys/stat.h>
-
#include <unistd.h>
-#include <fcntl.h>
+#include <sys/wait.h>
+#include <libgen.h>
#include "../include/sound.hpp"
-#include "../include/NvFBCLibrary.hpp"
-#include "../include/CudaLibrary.hpp"
-#include "../include/GlLibrary.hpp"
-
-#include <X11/extensions/Xcomposite.h>
-//#include <X11/Xatom.h>
extern "C" {
#include <libavutil/pixfmt.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/hwcontext_cuda.h>
#include <libavutil/opt.h>
#include <libswresample/swresample.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
-}
-
-extern "C" {
-#include <libavutil/hwcontext.h>
+#include <libavfilter/avfilter.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
}
#include <deque>
#include <future>
+// TODO: If options are not supported then they are returned (allocated) in the options. This should be free'd.
+
// TODO: Remove LIBAVUTIL_VERSION_MAJOR checks in the future when ubuntu, pop os LTS etc update ffmpeg to >= 5.0
static const int VIDEO_STREAM_INDEX = 0;
static thread_local char av_error_buffer[AV_ERROR_MAX_STRING_SIZE];
-static Cuda cuda;
-static GlLibrary gl;
+static void monitor_output_callback_print(const gsr_monitor *monitor, void *userdata) {
+ (void)userdata;
+ fprintf(stderr, " \"%.*s\" (%dx%d+%d+%d)\n", monitor->name_len, monitor->name, monitor->size.x, monitor->size.y, monitor->pos.x, monitor->pos.y);
+}
+
+typedef struct {
+ const char *output_name;
+} FirstOutputCallback;
+
+static void get_first_output(const gsr_monitor *monitor, void *userdata) {
+ FirstOutputCallback *first_output = (FirstOutputCallback*)userdata;
+ if(!first_output->output_name)
+ first_output->output_name = strndup(monitor->name, monitor->name_len + 1);
+}
static char* av_error_to_string(int err) {
if(av_strerror(err, av_error_buffer, sizeof(av_error_buffer)) < 0)
@@ -72,30 +71,6 @@ static char* av_error_to_string(int err) {
return av_error_buffer;
}
-struct ScopedGLXFBConfig {
- ~ScopedGLXFBConfig() {
- if (configs)
- XFree(configs);
- }
-
- GLXFBConfig *configs = nullptr;
-};
-
-struct WindowPixmap {
- Pixmap pixmap = None;
- GLXPixmap glx_pixmap = None;
- unsigned int texture_id = 0;
- unsigned int target_texture_id = 0;
-
- int texture_width = 0;
- int texture_height = 0;
-
- int texture_real_width = 0;
- int texture_real_height = 0;
-
- Window composite_window = None;
-};
-
enum class VideoQuality {
MEDIUM,
HIGH,
@@ -105,482 +80,234 @@ enum class VideoQuality {
enum class VideoCodec {
H264,
- H265
+ HEVC,
+ HEVC_HDR,
+ AV1,
+ AV1_HDR
};
-static double clock_get_monotonic_seconds() {
- struct timespec ts;
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
-}
+enum class AudioCodec {
+ AAC,
+ OPUS,
+ FLAC
+};
-static bool x11_supports_composite_named_window_pixmap(Display *dpy) {
- int extension_major;
- int extension_minor;
- if (!XCompositeQueryExtension(dpy, &extension_major, &extension_minor))
- return false;
+enum class PixelFormat {
+ YUV420,
+ YUV444
+};
- int major_version;
- int minor_version;
- return XCompositeQueryVersion(dpy, &major_version, &minor_version) &&
- (major_version > 0 || minor_version >= 2);
-}
+enum class FramerateMode {
+ CONSTANT,
+ VARIABLE
+};
-static int x11_error_handler(Display *dpy, XErrorEvent *ev) {
-#if 0
- char type_str[128];
- XGetErrorText(dpy, ev->type, type_str, sizeof(type_str));
-
- char major_opcode_str[128];
- XGetErrorText(dpy, ev->type, major_opcode_str, sizeof(major_opcode_str));
-
- char minor_opcode_str[128];
- XGetErrorText(dpy, ev->type, minor_opcode_str, sizeof(minor_opcode_str));
-
- fprintf(stderr,
- "X Error of failed request: %s\n"
- "Major opcode of failed request: %d (%s)\n"
- "Minor opcode of failed request: %d (%s)\n"
- "Serial number of failed request: %d\n",
- type_str,
- ev->request_code, major_opcode_str,
- ev->minor_code, minor_opcode_str);
-#endif
+static int x11_error_handler(Display*, XErrorEvent*) {
return 0;
}
-static int x11_io_error_handler(Display *dpy) {
+static int x11_io_error_handler(Display*) {
return 0;
}
-static Window get_compositor_window(Display *display) {
- Window overlay_window = XCompositeGetOverlayWindow(display, DefaultRootWindow(display));
- XCompositeReleaseOverlayWindow(display, DefaultRootWindow(display));
-
- /*
- Atom xdnd_proxy = XInternAtom(display, "XdndProxy", False);
- if(!xdnd_proxy)
- return None;
-
- Atom type = None;
- int format = 0;
- unsigned long nitems = 0, after = 0;
- unsigned char *data = nullptr;
- if(XGetWindowProperty(display, overlay_window, xdnd_proxy, 0, 1, False, XA_WINDOW, &type, &format, &nitems, &after, &data) != Success)
- return None;
-
- fprintf(stderr, "type: %ld, format: %d, num items: %lu\n", type, format, nitems);
- if(type == XA_WINDOW && format == 32 && nitems == 1)
- fprintf(stderr, "Proxy window: %ld\n", *(Window*)data);
-
- if(data)
- XFree(data);
- */
-
- Window root_window, parent_window;
- Window *children = nullptr;
- unsigned int num_children = 0;
- if(XQueryTree(display, overlay_window, &root_window, &parent_window, &children, &num_children) == 0)
- return None;
-
- Window compositor_window = None;
- if(num_children == 1) {
- compositor_window = children[0];
- const int screen_width = XWidthOfScreen(DefaultScreenOfDisplay(display));
- const int screen_height = XHeightOfScreen(DefaultScreenOfDisplay(display));
-
- XWindowAttributes attr;
- if(!XGetWindowAttributes(display, compositor_window, &attr) || attr.width != screen_width || attr.height != screen_height)
- compositor_window = None;
- }
-
- if(children)
- XFree(children);
-
- return compositor_window;
-}
-
-static void cleanup_window_pixmap(Display *dpy, WindowPixmap &pixmap) {
- if (pixmap.target_texture_id) {
- gl.glDeleteTextures(1, &pixmap.target_texture_id);
- pixmap.target_texture_id = 0;
- }
-
- if (pixmap.texture_id) {
- gl.glDeleteTextures(1, &pixmap.texture_id);
- pixmap.texture_id = 0;
- pixmap.texture_width = 0;
- pixmap.texture_height = 0;
- pixmap.texture_real_width = 0;
- pixmap.texture_real_height = 0;
- }
-
- if (pixmap.glx_pixmap) {
- gl.glXDestroyPixmap(dpy, pixmap.glx_pixmap);
- gl.glXReleaseTexImageEXT(dpy, pixmap.glx_pixmap, GLX_FRONT_EXT);
- pixmap.glx_pixmap = None;
- }
-
- if (pixmap.pixmap) {
- XFreePixmap(dpy, pixmap.pixmap);
- pixmap.pixmap = None;
- }
-
- if(pixmap.composite_window) {
- XCompositeUnredirectWindow(dpy, pixmap.composite_window, CompositeRedirectAutomatic);
- pixmap.composite_window = None;
- }
-}
-
-static bool recreate_window_pixmap(Display *dpy, Window window_id,
- WindowPixmap &pixmap, bool fallback_composite_window = true) {
- cleanup_window_pixmap(dpy, pixmap);
-
- XWindowAttributes attr;
- if (!XGetWindowAttributes(dpy, window_id, &attr)) {
- fprintf(stderr, "Failed to get window attributes\n");
- return false;
- }
-
- const int pixmap_config[] = {
- GLX_BIND_TO_TEXTURE_RGB_EXT, True,
- GLX_DRAWABLE_TYPE, GLX_PIXMAP_BIT | GLX_WINDOW_BIT,
- GLX_BIND_TO_TEXTURE_TARGETS_EXT, GLX_TEXTURE_2D_BIT_EXT,
- GLX_BUFFER_SIZE, 24,
- GLX_RED_SIZE, 8,
- GLX_GREEN_SIZE, 8,
- GLX_BLUE_SIZE, 8,
- GLX_ALPHA_SIZE, 0,
- // GLX_Y_INVERTED_EXT, (int)GLX_DONT_CARE,
- None};
-
- const int pixmap_attribs[] = {GLX_TEXTURE_TARGET_EXT,
- GLX_TEXTURE_2D_EXT,
- GLX_TEXTURE_FORMAT_EXT,
- GLX_TEXTURE_FORMAT_RGB_EXT,
- None};
-
- int c;
- GLXFBConfig *configs = gl.glXChooseFBConfig(dpy, 0, pixmap_config, &c);
- if (!configs) {
- fprintf(stderr, "Failed too choose fb config\n");
- return false;
- }
- ScopedGLXFBConfig scoped_configs;
- scoped_configs.configs = configs;
-
- bool found = false;
- GLXFBConfig config;
- for (int i = 0; i < c; i++) {
- config = configs[i];
- XVisualInfo *visual = gl.glXGetVisualFromFBConfig(dpy, config);
- if (!visual)
- continue;
-
- if (attr.depth != visual->depth) {
- XFree(visual);
- continue;
- }
- XFree(visual);
- found = true;
- break;
- }
-
- if(!found) {
- fprintf(stderr, "No matching fb config found\n");
- return false;
- }
-
- Pixmap new_window_pixmap = XCompositeNameWindowPixmap(dpy, window_id);
- if (!new_window_pixmap) {
- fprintf(stderr, "Failed to get pixmap for window %ld\n", window_id);
- return false;
- }
-
- GLXPixmap glx_pixmap = gl.glXCreatePixmap(dpy, config, new_window_pixmap, pixmap_attribs);
- if (!glx_pixmap) {
- fprintf(stderr, "Failed to create glx pixmap\n");
- XFreePixmap(dpy, new_window_pixmap);
- return false;
- }
-
- pixmap.pixmap = new_window_pixmap;
- pixmap.glx_pixmap = glx_pixmap;
-
- //glEnable(GL_TEXTURE_2D);
- gl.glGenTextures(1, &pixmap.texture_id);
- gl.glBindTexture(GL_TEXTURE_2D, pixmap.texture_id);
-
- // glEnable(GL_BLEND);
- // glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
-
- gl.glXBindTexImageEXT(dpy, pixmap.glx_pixmap, GLX_FRONT_EXT, NULL);
- gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
- GL_NEAREST); // GL_LINEAR );
- gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
- GL_NEAREST); // GL_LINEAR);//GL_LINEAR_MIPMAP_LINEAR );
- //glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
-
- gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH,
- &pixmap.texture_width);
- gl.glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT,
- &pixmap.texture_height);
-
- pixmap.texture_real_width = pixmap.texture_width;
- pixmap.texture_real_height = pixmap.texture_height;
-
- if(pixmap.texture_width == 0 || pixmap.texture_height == 0) {
- gl.glBindTexture(GL_TEXTURE_2D, 0);
- pixmap.texture_width = attr.width;
- pixmap.texture_height = attr.height;
-
- pixmap.texture_real_width = pixmap.texture_width;
- pixmap.texture_real_height = pixmap.texture_height;
-
- if(fallback_composite_window) {
- Window compositor_window = get_compositor_window(dpy);
- if(!compositor_window) {
- fprintf(stderr, "Warning: failed to get texture size. You are probably running an unsupported compositor and recording the selected window doesn't work at the moment. This could also happen if you are trying to record a window with client-side decorations. A black window will be displayed instead. A workaround is to record the whole monitor (which uses NvFBC).\n");
- return false;
- }
-
- fprintf(stderr, "Warning: failed to get texture size. You are probably trying to record a window with client-side decorations (using GNOME?). Trying to fallback to recording the compositor proxy window\n");
- XCompositeRedirectWindow(dpy, compositor_window, CompositeRedirectAutomatic);
-
- // TODO: Target texture should be the same size as the target window, not the size of the composite window
- if(recreate_window_pixmap(dpy, compositor_window, pixmap, false)) {
- pixmap.composite_window = compositor_window;
- pixmap.texture_width = attr.width;
- pixmap.texture_height = attr.height;
- return true;
- }
-
- pixmap.texture_width = attr.width;
- pixmap.texture_height = attr.height;
-
+static bool video_codec_is_hdr(VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::AV1_HDR:
+ return true;
+ default:
return false;
- } else {
- fprintf(stderr, "Warning: failed to get texture size. You are probably running an unsupported compositor and recording the selected window doesn't work at the moment. This could also happen if you are trying to record a window with client-side decorations. A black window will be displayed instead. A workaround is to record the whole monitor (which uses NvFBC).\n");
- }
- }
-
- fprintf(stderr, "texture width: %d, height: %d\n", pixmap.texture_width,
- pixmap.texture_height);
-
- // Generating this second texture is needed because
- // cuGraphicsGLRegisterImage cant be used with the texture that is mapped
- // directly to the pixmap.
- // TODO: Investigate if it's somehow possible to use the pixmap texture
- // directly, this should improve performance since only less image copy is
- // then needed every frame.
- gl.glGenTextures(1, &pixmap.target_texture_id);
- gl.glBindTexture(GL_TEXTURE_2D, pixmap.target_texture_id);
- gl.glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pixmap.texture_width,
- pixmap.texture_height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
- unsigned int err2 = gl.glGetError();
- //fprintf(stderr, "error: %d\n", err2);
- // glXBindTexImageEXT(dpy, pixmap.glx_pixmap, GLX_FRONT_EXT, NULL);
- // glGenerateTextureMipmapEXT(glxpixmap, GL_TEXTURE_2D);
-
- // glGenerateMipmap(GL_TEXTURE_2D);
-
- // gl.glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
- // gl.glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
-
- gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
- GL_NEAREST); // GL_LINEAR );
- gl.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
- GL_NEAREST); // GL_LINEAR);//GL_LINEAR_MIPMAP_LINEAR );
- //glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
-
- gl.glBindTexture(GL_TEXTURE_2D, 0);
-
- return pixmap.texture_id != 0 && pixmap.target_texture_id != 0;
-}
-
-static Window create_opengl_window(Display *display) {
- const int attr[] = {
- GLX_RENDER_TYPE, GLX_RGBA_BIT,
- GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
- GLX_DOUBLEBUFFER, True,
- GLX_RED_SIZE, 8,
- GLX_GREEN_SIZE, 8,
- GLX_BLUE_SIZE, 8,
- GLX_ALPHA_SIZE, 8,
- GLX_DEPTH_SIZE, 0,
- None
- };
-
- XVisualInfo *visual_info = NULL;
- GLXFBConfig fbconfig = NULL;
-
- int numfbconfigs = 0;
- GLXFBConfig *fbconfigs = gl.glXChooseFBConfig(display, DefaultScreen(display), attr, &numfbconfigs);
- for(int i = 0; i < numfbconfigs; i++) {
- visual_info = gl.glXGetVisualFromFBConfig(display, fbconfigs[i]);
- if(!visual_info)
- continue;
-
- fbconfig = fbconfigs[i];
- break;
- }
-
- if(!visual_info) {
- fprintf(stderr, "mgl error: no appropriate visual found\n");
- return -1;
- }
-
- // TODO: Core profile? GLX_CONTEXT_CORE_PROFILE_BIT_ARB.
- // TODO: Remove need for 4.2 when copy texture function has been removed
- int context_attribs[] = {
- GLX_CONTEXT_MAJOR_VERSION_ARB, 4,
- GLX_CONTEXT_MINOR_VERSION_ARB, 2,
- GLX_CONTEXT_FLAGS_ARB, GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
- None
- };
-
- GLXContext gl_context = gl.glXCreateContextAttribsARB(display, fbconfig, nullptr, True, context_attribs);
- if(!gl_context) {
- fprintf(stderr, "Error: failed to create gl context\n");
- return None;
- }
-
- Colormap colormap = XCreateColormap(display, DefaultRootWindow(display), visual_info->visual, AllocNone);
- if(!colormap) {
- fprintf(stderr, "Error: failed to create x11 colormap\n");
- gl.glXDestroyContext(display, gl_context);
- }
-
- XSetWindowAttributes window_attr;
- window_attr.colormap = colormap;
-
- // TODO: Is there a way to remove the need to create a window?
- Window window = XCreateWindow(display, DefaultRootWindow(display), 0, 0, 1, 1, 0, visual_info->depth, InputOutput, visual_info->visual, CWColormap, &window_attr);
-
- if(!window) {
- fprintf(stderr, "Error: failed to create gl window\n");
- goto fail;
- }
-
- if(!gl.glXMakeContextCurrent(display, window, window, gl_context)) {
- fprintf(stderr, "Error: failed to make gl context current\n");
- goto fail;
}
-
- return window;
-
- fail:
- XFreeColormap(display, colormap);
- gl.glXDestroyContext(display, gl_context);
- return None;
}
-/* TODO: check for glx swap control extension string (GLX_EXT_swap_control, etc) */
-static void set_vertical_sync_enabled(Display *display, Window window, bool enabled) {
- int result = 0;
+struct PacketData {
+ PacketData() {}
+ PacketData(const PacketData&) = delete;
+ PacketData& operator=(const PacketData&) = delete;
- if(gl.glXSwapIntervalEXT) {
- gl.glXSwapIntervalEXT(display, window, enabled ? 1 : 0);
- } else if(gl.glXSwapIntervalMESA) {
- result = gl.glXSwapIntervalMESA(enabled ? 1 : 0);
- } else if(gl.glXSwapIntervalSGI) {
- result = gl.glXSwapIntervalSGI(enabled ? 1 : 0);
- } else {
- static int warned = 0;
- if (!warned) {
- warned = 1;
- fprintf(stderr, "Warning: setting vertical sync not supported\n");
- }
+ ~PacketData() {
+ av_free(data.data);
}
- if(result != 0)
- fprintf(stderr, "Warning: setting vertical sync failed\n");
-}
+ AVPacket data;
+};
// |stream| is only required for non-replay mode
-static void receive_frames(AVCodecContext *av_codec_context, int stream_index, AVStream *stream, AVFrame *frame,
+static void receive_frames(AVCodecContext *av_codec_context, int stream_index, AVStream *stream, int64_t pts,
AVFormatContext *av_format_context,
double replay_start_time,
- std::deque<AVPacket> &frame_data_queue,
+ std::deque<std::shared_ptr<PacketData>> &frame_data_queue,
int replay_buffer_size_secs,
bool &frames_erased,
- std::mutex &write_output_mutex) {
+ std::mutex &write_output_mutex,
+ double paused_time_offset) {
for (;;) {
- // TODO: Use av_packet_alloc instead because sizeof(av_packet) might not be future proof(?)
- AVPacket av_packet;
- memset(&av_packet, 0, sizeof(av_packet));
- av_packet.data = NULL;
- av_packet.size = 0;
- int res = avcodec_receive_packet(av_codec_context, &av_packet);
- if (res == 0) { // we have a packet, send the packet to the muxer
- av_packet.stream_index = stream_index;
- av_packet.pts = av_packet.dts = frame->pts;
+ AVPacket *av_packet = av_packet_alloc();
+ if(!av_packet)
+ break;
- if(frame->flags & AV_FRAME_FLAG_DISCARD)
- av_packet.flags |= AV_PKT_FLAG_DISCARD;
+ av_packet->data = NULL;
+ av_packet->size = 0;
+ int res = avcodec_receive_packet(av_codec_context, av_packet);
+ if (res == 0) { // we have a packet, send the packet to the muxer
+ av_packet->stream_index = stream_index;
+ av_packet->pts = pts;
+ av_packet->dts = pts;
std::lock_guard<std::mutex> lock(write_output_mutex);
if(replay_buffer_size_secs != -1) {
- double time_now = clock_get_monotonic_seconds();
+ // TODO: Preallocate all frames data and use those instead.
+ // Why are we doing this you ask? there is a new ffmpeg bug that causes cpu usage to increase over time when you have
+ // packets that are not being free'd until later. So we copy the packet data, free the packet and then reconstruct
+ // the packet later on when we need it, to keep packets alive only for a short period.
+ auto new_packet = std::make_shared<PacketData>();
+ new_packet->data = *av_packet;
+ new_packet->data.data = (uint8_t*)av_malloc(av_packet->size);
+ memcpy(new_packet->data.data, av_packet->data, av_packet->size);
+
+ double time_now = clock_get_monotonic_seconds() - paused_time_offset;
double replay_time_elapsed = time_now - replay_start_time;
- AVPacket new_pack;
- av_packet_move_ref(&new_pack, &av_packet);
- frame_data_queue.push_back(std::move(new_pack));
+ frame_data_queue.push_back(std::move(new_packet));
if(replay_time_elapsed >= replay_buffer_size_secs) {
- av_packet_unref(&frame_data_queue.front());
frame_data_queue.pop_front();
frames_erased = true;
}
- av_packet_unref(&av_packet);
} else {
- av_packet_rescale_ts(&av_packet, av_codec_context->time_base, stream->time_base);
- av_packet.stream_index = stream->index;
- int ret = av_interleaved_write_frame(av_format_context, &av_packet);
+ av_packet_rescale_ts(av_packet, av_codec_context->time_base, stream->time_base);
+ av_packet->stream_index = stream->index;
+ // TODO: Is av_interleaved_write_frame needed?
+ int ret = av_write_frame(av_format_context, av_packet);
if(ret < 0) {
- fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet.stream_index, av_error_to_string(ret), ret);
+ fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet->stream_index, av_error_to_string(ret), ret);
}
}
+ av_packet_free(&av_packet);
} else if (res == AVERROR(EAGAIN)) { // we have no packet
// fprintf(stderr, "No packet!\n");
- av_packet_unref(&av_packet);
+ av_packet_free(&av_packet);
break;
} else if (res == AVERROR_EOF) { // this is the end of the stream
+ av_packet_free(&av_packet);
fprintf(stderr, "End of stream!\n");
- av_packet_unref(&av_packet);
break;
} else {
+ av_packet_free(&av_packet);
fprintf(stderr, "Unexpected error: %d\n", res);
- av_packet_unref(&av_packet);
break;
}
}
}
-static AVCodecContext* create_audio_codec_context(int fps) {
- const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
+static const char* audio_codec_get_name(AudioCodec audio_codec) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: return "aac";
+ case AudioCodec::OPUS: return "opus";
+ case AudioCodec::FLAC: return "flac";
+ }
+ assert(false);
+ return "";
+}
+
+static AVCodecID audio_codec_get_id(AudioCodec audio_codec) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: return AV_CODEC_ID_AAC;
+ case AudioCodec::OPUS: return AV_CODEC_ID_OPUS;
+ case AudioCodec::FLAC: return AV_CODEC_ID_FLAC;
+ }
+ assert(false);
+ return AV_CODEC_ID_AAC;
+}
+
+static AVSampleFormat audio_codec_get_sample_format(AudioCodec audio_codec, const AVCodec *codec, bool mix_audio) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: {
+ return AV_SAMPLE_FMT_FLTP;
+ }
+ case AudioCodec::OPUS: {
+ bool supports_s16 = false;
+ bool supports_flt = false;
+
+ for(size_t i = 0; codec->sample_fmts && codec->sample_fmts[i] != -1; ++i) {
+ if(codec->sample_fmts[i] == AV_SAMPLE_FMT_S16) {
+ supports_s16 = true;
+ } else if(codec->sample_fmts[i] == AV_SAMPLE_FMT_FLT) {
+ supports_flt = true;
+ }
+ }
+
+ // Amix only works with float audio
+ if(mix_audio)
+ supports_s16 = false;
+
+ if(!supports_s16 && !supports_flt) {
+ fprintf(stderr, "Warning: opus audio codec is chosen but your ffmpeg version does not support s16/flt sample format and performance might be slightly worse.\n");
+ fprintf(stderr, " You can either rebuild ffmpeg with libopus instead of the built-in opus, use the flatpak version of gpu screen recorder or record with aac audio codec instead (-ac aac).\n");
+ fprintf(stderr, " Falling back to fltp audio sample format instead.\n");
+ }
+
+ if(supports_s16)
+ return AV_SAMPLE_FMT_S16;
+ else if(supports_flt)
+ return AV_SAMPLE_FMT_FLT;
+ else
+ return AV_SAMPLE_FMT_FLTP;
+ }
+ case AudioCodec::FLAC: {
+ return AV_SAMPLE_FMT_S32;
+ }
+ }
+ assert(false);
+ return AV_SAMPLE_FMT_FLTP;
+}
+
+static int64_t audio_codec_get_get_bitrate(AudioCodec audio_codec) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: return 160000;
+ case AudioCodec::OPUS: return 128000;
+ case AudioCodec::FLAC: return 128000;
+ }
+ assert(false);
+ return 128000;
+}
+
+static AudioFormat audio_codec_context_get_audio_format(const AVCodecContext *audio_codec_context) {
+ switch(audio_codec_context->sample_fmt) {
+ case AV_SAMPLE_FMT_FLT: return F32;
+ case AV_SAMPLE_FMT_FLTP: return S32;
+ case AV_SAMPLE_FMT_S16: return S16;
+ case AV_SAMPLE_FMT_S32: return S32;
+ default: return S16;
+ }
+}
+
+static AVSampleFormat audio_format_to_sample_format(const AudioFormat audio_format) {
+ switch(audio_format) {
+ case S16: return AV_SAMPLE_FMT_S16;
+ case S32: return AV_SAMPLE_FMT_S32;
+ case F32: return AV_SAMPLE_FMT_FLT;
+ }
+ assert(false);
+ return AV_SAMPLE_FMT_S16;
+}
+
+static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_codec, bool mix_audio, int audio_bitrate) {
+ (void)fps;
+ const AVCodec *codec = avcodec_find_encoder(audio_codec_get_id(audio_codec));
if (!codec) {
- fprintf(
- stderr,
- "Error: Could not find aac encoder\n");
- exit(1);
+ fprintf(stderr, "Error: Could not find %s audio encoder\n", audio_codec_get_name(audio_codec));
+ _exit(1);
}
AVCodecContext *codec_context = avcodec_alloc_context3(codec);
assert(codec->type == AVMEDIA_TYPE_AUDIO);
- /*
- codec_context->sample_fmt = (*codec)->sample_fmts
- ? (*codec)->sample_fmts[0]
- : AV_SAMPLE_FMT_FLTP;
- */
- codec_context->codec_id = AV_CODEC_ID_AAC;
- codec_context->sample_fmt = AV_SAMPLE_FMT_FLTP;
- //codec_context->bit_rate = 64000;
+ codec_context->codec_id = codec->id;
+ codec_context->sample_fmt = audio_codec_get_sample_format(audio_codec, codec, mix_audio);
+ codec_context->bit_rate = audio_bitrate == 0 ? audio_codec_get_get_bitrate(audio_codec) : audio_bitrate;
codec_context->sample_rate = 48000;
- codec_context->profile = FF_PROFILE_AAC_LOW;
+ if(audio_codec == AudioCodec::AAC)
+ codec_context->profile = FF_PROFILE_AAC_LOW;
#if LIBAVCODEC_VERSION_MAJOR < 60
codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
codec_context->channels = 2;
@@ -589,10 +316,8 @@ static AVCodecContext* create_audio_codec_context(int fps) {
#endif
codec_context->time_base.num = 1;
- codec_context->time_base.den = codec_context->sample_rate;
- codec_context->framerate.num = fps;
- codec_context->framerate.den = 1;
-
+ codec_context->time_base.den = AV_TIME_BASE;
+ codec_context->thread_count = 1;
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
return codec_context;
@@ -600,8 +325,8 @@ static AVCodecContext* create_audio_codec_context(int fps) {
static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
VideoQuality video_quality,
- int record_width, int record_height,
- int fps, const AVCodec *codec, bool is_livestream) {
+ int fps, const AVCodec *codec, bool is_livestream, gsr_gpu_vendor vendor, FramerateMode framerate_mode,
+ bool hdr, gsr_color_range color_range) {
AVCodecContext *codec_context = avcodec_alloc_context3(codec);
@@ -609,19 +334,17 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
assert(codec->type == AVMEDIA_TYPE_VIDEO);
codec_context->codec_id = codec->id;
- codec_context->width = record_width & ~1;
- codec_context->height = record_height & ~1;
// Timebase: This is the fundamental unit of time (in seconds) in terms
// of which frame timestamps are represented. For fixed-fps content,
// timebase should be 1/framerate and timestamp increments should be
// identical to 1
codec_context->time_base.num = 1;
- codec_context->time_base.den = fps;
+ codec_context->time_base.den = framerate_mode == FramerateMode::CONSTANT ? fps : AV_TIME_BASE;
codec_context->framerate.num = fps;
codec_context->framerate.den = 1;
codec_context->sample_aspect_ratio.num = 0;
codec_context->sample_aspect_ratio.den = 0;
- // High values reeduce file size but increases time it takes to seek
+ // High values reduce file size but increases time it takes to seek
if(is_livestream) {
codec_context->flags |= (AV_CODEC_FLAG_CLOSED_GOP | AV_CODEC_FLAG_LOW_DELAY);
codec_context->flags2 |= AV_CODEC_FLAG2_FAST;
@@ -633,7 +356,17 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
}
codec_context->max_b_frames = 0;
codec_context->pix_fmt = pix_fmt;
- codec_context->color_range = AVCOL_RANGE_JPEG;
+ codec_context->color_range = color_range == GSR_COLOR_RANGE_LIMITED ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
+ if(hdr) {
+ codec_context->color_primaries = AVCOL_PRI_BT2020;
+ codec_context->color_trc = AVCOL_TRC_SMPTE2084;
+ codec_context->colorspace = AVCOL_SPC_BT2020_NCL;
+ } else {
+ codec_context->color_primaries = AVCOL_PRI_BT709;
+ codec_context->color_trc = AVCOL_TRC_BT709;
+ codec_context->colorspace = AVCOL_SPC_BT709;
+ }
+ //codec_context->chroma_sample_location = AVCHROMA_LOC_CENTER;
if(codec->id == AV_CODEC_ID_HEVC)
codec_context->codec_tag = MKTAG('h', 'v', 'c', '1');
switch(video_quality) {
@@ -681,74 +414,197 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
codec_context->bit_rate = 0;
#endif
+ if(vendor != GSR_GPU_VENDOR_NVIDIA) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 180;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 140;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 120;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 100;
+ break;
+ }
+ }
+
+ av_opt_set_int(codec_context->priv_data, "b_ref_mode", 0, 0);
+ //av_opt_set_int(codec_context->priv_data, "cbr", true, 0);
+
+ if(vendor != GSR_GPU_VENDOR_NVIDIA) {
+ // TODO: More options, better options
+ //codec_context->bit_rate = codec_context->width * codec_context->height;
+ av_opt_set(codec_context->priv_data, "rc_mode", "CQP", 0);
+ //codec_context->global_quality = 4;
+ //codec_context->compression_level = 2;
+ }
+
+ //av_opt_set(codec_context->priv_data, "bsf", "hevc_metadata=colour_primaries=9:transfer_characteristics=16:matrix_coefficients=9", 0);
+
//codec_context->rc_max_rate = codec_context->bit_rate;
//codec_context->rc_min_rate = codec_context->bit_rate;
//codec_context->rc_buffer_size = codec_context->bit_rate / 10;
+ // TODO: Do this when not using cqp
+ //codec_context->rc_initial_buffer_occupancy = codec_context->bit_rate * 1000;
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
return codec_context;
}
-static const AVCodec* find_h264_encoder() {
- const AVCodec *codec = avcodec_find_encoder_by_name("h264_nvenc");
+static bool vaapi_create_codec_context(AVCodecContext *video_codec_context, const char *card_path) {
+ char render_path[128];
+ if(!gsr_card_path_get_render_path(card_path, render_path)) {
+ fprintf(stderr, "gsr error: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
+ return false;
+ }
+
+ AVBufferRef *device_ctx;
+ if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
+ fprintf(stderr, "Error: Failed to create hardware device context\n");
+ return false;
+ }
+
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ if(!frame_context) {
+ fprintf(stderr, "Error: Failed to create hwframe context\n");
+ av_buffer_unref(&device_ctx);
+ return false;
+ }
+
+ AVHWFramesContext *hw_frame_context =
+ (AVHWFramesContext *)frame_context->data;
+ hw_frame_context->width = video_codec_context->width;
+ hw_frame_context->height = video_codec_context->height;
+ hw_frame_context->sw_format = AV_PIX_FMT_NV12;
+ hw_frame_context->format = video_codec_context->pix_fmt;
+ hw_frame_context->device_ref = device_ctx;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+
+ //hw_frame_context->initial_pool_size = 1;
+
+ if (av_hwframe_ctx_init(frame_context) < 0) {
+ fprintf(stderr, "Error: Failed to initialize hardware frame context "
+ "(note: ffmpeg version needs to be > 4.0)\n");
+ av_buffer_unref(&device_ctx);
+ //av_buffer_unref(&frame_context);
+ return false;
+ }
+
+ video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
+ video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ return true;
+}
+
+static bool check_if_codec_valid_for_hardware(const AVCodec *codec, gsr_gpu_vendor vendor, const char *card_path) {
+ // Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
+ AVCodecContext *codec_context = create_video_codec_context(vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_VAAPI, VideoQuality::VERY_HIGH, 60, codec, false, vendor, FramerateMode::CONSTANT, false, GSR_COLOR_RANGE_LIMITED);
+ if(!codec_context)
+ return false;
+
+ codec_context->width = 512;
+ codec_context->height = 512;
+
+ if(vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(!vaapi_create_codec_context(codec_context, card_path)) {
+ avcodec_free_context(&codec_context);
+ return false;
+ }
+ }
+
+ bool success = false;
+ success = avcodec_open2(codec_context, codec_context->codec, NULL) == 0;
+ if(codec_context->hw_device_ctx)
+ av_buffer_unref(&codec_context->hw_device_ctx);
+ if(codec_context->hw_frames_ctx)
+ av_buffer_unref(&codec_context->hw_frames_ctx);
+ avcodec_free_context(&codec_context);
+ return success;
+}
+
+static const AVCodec* find_h264_encoder(gsr_gpu_vendor vendor, const char *card_path) {
+ const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "h264_nvenc" : "h264_vaapi");
+ if(!codec)
+ codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_h264" : "vaapi_h264");
+
if(!codec)
- codec = avcodec_find_encoder_by_name("nvenc_h264");
+ return nullptr;
static bool checked = false;
+ static bool checked_success = true;
if(!checked) {
checked = true;
- // Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
- AVCodecContext *codec_context = create_video_codec_context(AV_PIX_FMT_YUV420P, VideoQuality::VERY_HIGH, 1920, 1080, 60, codec, false);
- if(codec_context) {
- if (avcodec_open2(codec_context, codec_context->codec, NULL) < 0) {
- avcodec_free_context(&codec_context);
- return nullptr;
- }
- avcodec_free_context(&codec_context);
- }
+ if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
+ checked_success = false;
}
- return codec;
+ return checked_success ? codec : nullptr;
}
-static const AVCodec* find_h265_encoder() {
- const AVCodec *codec = avcodec_find_encoder_by_name("hevc_nvenc");
+static const AVCodec* find_h265_encoder(gsr_gpu_vendor vendor, const char *card_path) {
+ const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "hevc_nvenc" : "hevc_vaapi");
if(!codec)
- codec = avcodec_find_encoder_by_name("nvenc_hevc");
+ codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_hevc" : "vaapi_hevc");
if(!codec)
return nullptr;
static bool checked = false;
+ static bool checked_success = true;
if(!checked) {
checked = true;
- // Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
- AVCodecContext *codec_context = create_video_codec_context(AV_PIX_FMT_YUV420P, VideoQuality::VERY_HIGH, 1920, 1080, 60, codec, false);
- if(codec_context) {
- if (avcodec_open2(codec_context, codec_context->codec, NULL) < 0) {
- avcodec_free_context(&codec_context);
- return nullptr;
- }
- avcodec_free_context(&codec_context);
- }
+ if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
+ checked_success = false;
+ }
+ return checked_success ? codec : nullptr;
+}
+
+static const AVCodec* find_av1_encoder(gsr_gpu_vendor vendor, const char *card_path) {
+ // Workaround bug with av1 nvidia in older ffmpeg versions that causes the whole application to crash
+ // when avcodec_open2 is opened with av1_nvenc
+ if(vendor == GSR_GPU_VENDOR_NVIDIA && LIBAVCODEC_BUILD < AV_VERSION_INT(60, 30, 100)) {
+ return nullptr;
+ }
+
+ const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "av1_nvenc" : "av1_vaapi");
+ if(!codec)
+ codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_av1" : "vaapi_av1");
+
+ if(!codec)
+ return nullptr;
+
+ static bool checked = false;
+ static bool checked_success = true;
+ if(!checked) {
+ checked = true;
+ if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
+ checked_success = false;
}
- return codec;
+ return checked_success ? codec : nullptr;
}
-static AVFrame* open_audio(AVCodecContext *audio_codec_context) {
+static void open_audio(AVCodecContext *audio_codec_context) {
+ AVDictionary *options = nullptr;
+ av_dict_set(&options, "strict", "experimental", 0);
+
int ret;
- ret = avcodec_open2(audio_codec_context, audio_codec_context->codec, nullptr);
+ ret = avcodec_open2(audio_codec_context, audio_codec_context->codec, &options);
if(ret < 0) {
fprintf(stderr, "failed to open codec, reason: %s\n", av_error_to_string(ret));
- exit(1);
+ _exit(1);
}
+}
+static AVFrame* create_audio_frame(AVCodecContext *audio_codec_context) {
AVFrame *frame = av_frame_alloc();
if(!frame) {
fprintf(stderr, "failed to allocate audio frame\n");
- exit(1);
+ _exit(1);
}
+ frame->sample_rate = audio_codec_context->sample_rate;
frame->nb_samples = audio_codec_context->frame_size;
frame->format = audio_codec_context->sample_fmt;
#if LIBAVCODEC_VERSION_MAJOR < 60
@@ -758,204 +614,308 @@ static AVFrame* open_audio(AVCodecContext *audio_codec_context) {
av_channel_layout_copy(&frame->ch_layout, &audio_codec_context->ch_layout);
#endif
- ret = av_frame_get_buffer(frame, 0);
+ int ret = av_frame_get_buffer(frame, 0);
if(ret < 0) {
fprintf(stderr, "failed to allocate audio data buffers, reason: %s\n", av_error_to_string(ret));
- exit(1);
+ _exit(1);
}
return frame;
}
-#if LIBAVUTIL_VERSION_MAJOR < 57
-static AVBufferRef* dummy_hw_frame_init(int size) {
- return av_buffer_alloc(size);
-}
-#else
-static AVBufferRef* dummy_hw_frame_init(size_t size) {
- return av_buffer_alloc(size);
-}
-#endif
-
-static void open_video(AVCodecContext *codec_context,
- WindowPixmap &window_pixmap, AVBufferRef **device_ctx,
- CUgraphicsResource *cuda_graphics_resource, CUcontext cuda_context, bool use_nvfbc, VideoQuality video_quality, bool is_livestream, bool very_old_gpu) {
- int ret;
-
- *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
- if(!*device_ctx) {
- fprintf(stderr, "Error: Failed to create hardware device context\n");
- exit(1);
- }
-
- AVHWDeviceContext *hw_device_context = (AVHWDeviceContext *)(*device_ctx)->data;
- AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext *)hw_device_context->hwctx;
- cuda_device_context->cuda_ctx = cuda_context;
- if(av_hwdevice_ctx_init(*device_ctx) < 0) {
- fprintf(stderr, "Error: Failed to create hardware device context\n");
- exit(1);
- }
-
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(*device_ctx);
- if (!frame_context) {
- fprintf(stderr, "Error: Failed to create hwframe context\n");
- exit(1);
- }
-
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
- hw_frame_context->width = codec_context->width;
- hw_frame_context->height = codec_context->height;
- hw_frame_context->sw_format = AV_PIX_FMT_0RGB32;
- hw_frame_context->format = codec_context->pix_fmt;
- hw_frame_context->device_ref = *device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext *)(*device_ctx)->data;
-
- if(use_nvfbc) {
- hw_frame_context->pool = av_buffer_pool_init(1, dummy_hw_frame_init);
- hw_frame_context->initial_pool_size = 1;
- }
-
- if (av_hwframe_ctx_init(frame_context) < 0) {
- fprintf(stderr, "Error: Failed to initialize hardware frame context "
- "(note: ffmpeg version needs to be > 4.0\n");
- exit(1);
- }
-
- codec_context->hw_device_ctx = *device_ctx;
- codec_context->hw_frames_ctx = frame_context;
-
- bool supports_p4 = false;
- bool supports_p6 = false;
+static void open_video(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu, gsr_gpu_vendor vendor, PixelFormat pixel_format, bool hdr) {
+ AVDictionary *options = nullptr;
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ // Disable setting preset since some nvidia gpus cant handle it nicely and greatly reduce encoding performance (from more than 60 fps to less than 45 fps) (such as Nvidia RTX A2000)
+ #if 0
+ bool supports_p4 = false;
+ bool supports_p5 = false;
+
+ const AVOption *opt = nullptr;
+ while((opt = av_opt_next(codec_context->priv_data, opt))) {
+ if(opt->type == AV_OPT_TYPE_CONST) {
+ if(strcmp(opt->name, "p4") == 0)
+ supports_p4 = true;
+ else if(strcmp(opt->name, "p5") == 0)
+ supports_p5 = true;
+ }
+ }
+ #endif
- const AVOption *opt = nullptr;
- while((opt = av_opt_next(codec_context->priv_data, opt))) {
- if(opt->type == AV_OPT_TYPE_CONST) {
- if(strcmp(opt->name, "p4") == 0)
- supports_p4 = true;
- else if(strcmp(opt->name, "p6") == 0)
- supports_p6 = true;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(&options, "qp", 37, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(&options, "qp", 32, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(&options, "qp", 28, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(&options, "qp", 24, 0);
+ break;
+ }
+ } else if(very_old_gpu || codec_context->codec_id == AV_CODEC_ID_H264) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(&options, "qp", 37, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(&options, "qp", 32, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(&options, "qp", 27, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(&options, "qp", 21, 0);
+ break;
+ }
+ } else {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(&options, "qp", 37, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(&options, "qp", 32, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(&options, "qp", 28, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(&options, "qp", 24, 0);
+ break;
+ }
}
- }
- AVDictionary *options = nullptr;
- if(very_old_gpu) {
- switch(video_quality) {
- case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37, 0);
- break;
- case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32, 0);
- break;
- case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 27, 0);
- break;
- case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 21, 0);
- break;
+ #if 0
+ if(!supports_p4 && !supports_p5)
+ fprintf(stderr, "Info: your ffmpeg version is outdated. It's recommended that you use the flatpak version of gpu-screen-recorder version instead, which you can find at https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder\n");
+
+ //if(is_livestream) {
+ // av_dict_set_int(&options, "zerolatency", 1, 0);
+ // //av_dict_set(&options, "preset", "llhq", 0);
+ //}
+
+ // I want to use a good preset for the gpu but all gpus prefer different
+ // presets. Nvidia and ffmpeg used to support "hq" preset that chose the best preset for the gpu
+ // with pretty good performance but you now have to choose p1-p7, which are gpu agnostic and on
+ // older gpus p5-p7 slow the gpu down to a crawl...
+ // "hq" is now just an alias for p7 in ffmpeg :(
+ // TODO: Temporary disable because of stuttering?
+
+ // TODO: Preset is set to p5 for now but it should ideally be p6 or p7.
+ // This change is needed because for certain sizes of a window (or monitor?) such as 971x780 causes encoding to freeze
+ // when using h264 codec. This is a new(?) nvidia driver bug.
+ if(very_old_gpu)
+ av_dict_set(&options, "preset", supports_p4 ? "p4" : "medium", 0);
+ else
+ av_dict_set(&options, "preset", supports_p5 ? "p5" : "slow", 0);
+ #endif
+
+ av_dict_set(&options, "tune", "hq", 0);
+ av_dict_set(&options, "rc", "constqp", 0);
+
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ switch(pixel_format) {
+ case PixelFormat::YUV420:
+ av_dict_set(&options, "profile", "high", 0);
+ break;
+ case PixelFormat::YUV444:
+ av_dict_set(&options, "profile", "high444p", 0);
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(pixel_format) {
+ case PixelFormat::YUV420:
+ av_dict_set(&options, "rgb_mode", "yuv420", 0);
+ break;
+ case PixelFormat::YUV444:
+ av_dict_set(&options, "rgb_mode", "yuv444", 0);
+ break;
+ }
+ } else {
+ //av_dict_set(&options, "profile", "main10", 0);
+ //av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
+ if(hdr) {
+ av_dict_set(&options, "profile", "main10", 0);
+ } else {
+ av_dict_set(&options, "profile", "main", 0);
+ }
}
} else {
- switch(video_quality) {
- case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 40, 0);
- break;
- case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 35, 0);
- break;
- case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 30, 0);
- break;
- case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24, 0);
- break;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ // Using global_quality option
+ } else if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(&options, "qp", 34, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(&options, "qp", 30, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(&options, "qp", 26, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(&options, "qp", 22, 0);
+ break;
+ }
+ } else {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(&options, "qp", 37, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(&options, "qp", 32, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(&options, "qp", 28, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(&options, "qp", 24, 0);
+ break;
+ }
}
- }
- if(!supports_p4 && !supports_p6) {
- fprintf(stderr, "Info: your ffmpeg version is outdated. It's recommended that you use the flatpak version of gpu-screen-recorder version instead, which you can find at https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder\n");
- }
-
- //if(is_livestream) {
- // av_dict_set_int(&options, "zerolatency", 1, 0);
- // //av_dict_set(&options, "preset", "llhq", 0);
- //}
+ // TODO: More quality options
+ av_dict_set(&options, "rc_mode", "CQP", 0);
+ //av_dict_set_int(&options, "low_power", 1, 0);
- // Fuck nvidia and ffmpeg, I want to use a good preset for the gpu but all gpus prefer different
- // presets. Nvidia and ffmpeg used to support "hq" preset that chose the best preset for the gpu
- // with pretty good performance but you now have to choose p1-p7, which are gpu agnostic and on
- // older gpus p5-p7 slow the gpu down to a crawl...
- // "hq" is now just an alias for p7 in ffmpeg :(
- // TODO: Temporary disable because of stuttering?
- if(very_old_gpu)
- av_dict_set(&options, "preset", supports_p4 ? "p4" : "medium", 0);
- else
- av_dict_set(&options, "preset", supports_p6 ? "p6" : "slow", 0);
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ av_dict_set(&options, "profile", "high", 0);
+ //av_dict_set_int(&options, "quality", 5, 0); // quality preset
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ av_dict_set(&options, "profile", "main", 0); // TODO: use professional instead?
+ av_dict_set(&options, "tier", "main", 0);
+ } else {
+ if(hdr) {
+ av_dict_set(&options, "profile", "main10", 0);
+ av_dict_set(&options, "sei", "hdr", 0);
+ } else {
+ av_dict_set(&options, "profile", "main", 0);
+ }
+ }
+ }
- av_dict_set(&options, "tune", "hq", 0);
- av_dict_set(&options, "rc", "constqp", 0);
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ av_dict_set(&options, "coder", "cabac", 0); // TODO: cavlc is faster than cabac but worse compression. Which to use?
+ }
- if(codec_context->codec_id == AV_CODEC_ID_H264)
- av_dict_set(&options, "profile", "high", 0);
+ av_dict_set(&options, "strict", "experimental", 0);
- ret = avcodec_open2(codec_context, codec_context->codec, &options);
+ int ret = avcodec_open2(codec_context, codec_context->codec, &options);
if (ret < 0) {
- fprintf(stderr, "Error: Could not open video codec: %s\n",
- "blabla"); // av_err2str(ret));
- exit(1);
- }
-
- if(window_pixmap.target_texture_id != 0) {
- CUresult res;
- CUcontext old_ctx;
- res = cuda.cuCtxPopCurrent_v2(&old_ctx);
- res = cuda.cuCtxPushCurrent_v2(cuda_context);
- res = cuda.cuGraphicsGLRegisterImage(
- cuda_graphics_resource, window_pixmap.target_texture_id, GL_TEXTURE_2D,
- CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY);
- // cuda.cuGraphicsUnregisterResource(*cuda_graphics_resource);
- if (res != CUDA_SUCCESS) {
- const char *err_str;
- cuda.cuGetErrorString(res, &err_str);
- fprintf(stderr,
- "Error: cuda.cuGraphicsGLRegisterImage failed, error %s, texture "
- "id: %u\n",
- err_str, window_pixmap.target_texture_id);
- exit(1);
- }
- res = cuda.cuCtxPopCurrent_v2(&old_ctx);
+ fprintf(stderr, "Error: Could not open video codec: %s\n", av_error_to_string(ret));
+ _exit(1);
}
}
-static void close_video(AVStream *video_stream, AVFrame *frame) {
- // avcodec_close(video_stream->codec);
- // av_frame_free(&frame);
+static void usage_header() {
+ const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
+ const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|hevc_hdr|av1|av1_hdr] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr] [-cr limited|full] [-v yes|no] [-h|--help] [-o <output_file>] [-mf yes|no] [-sc <script_path>] [-cursor yes|no]\n", program_name);
}
-static void usage() {
- fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> [-c <container_format>] -f <fps> [-a <audio_input>...] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n");
+static void usage_full() {
+ const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
+ const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
+ usage_header();
+ fprintf(stderr, "\n");
fprintf(stderr, "OPTIONS:\n");
- fprintf(stderr, " -w Window to record or a display, \"screen\" or \"screen-direct\". The display is the display name in xrandr and if \"screen\" or \"screen-direct\" is selected then all displays are recorded and they are recorded in h265 (aka hevc)."
- "\"screen-direct\" skips one texture copy for fullscreen applications so it may lead to better performance and it works with VRR monitors when recording fullscreen application but may break some applications, such as mpv in fullscreen mode. Recording a display requires a gpu with NvFBC support.\n");
- fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. Usually you want to set this to the size of the window. Optional, by default the size of the window (which is passed to -w). This option is only supported when recording a window, not a screen/monitor.\n");
- fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode. If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
+ fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\" or \"focused\".\n");
+ fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded.\n");
+ fprintf(stderr, " \"screen-direct-force\" is not recommended unless you use a VRR (G-SYNC) monitor on Nvidia X11 and you are aware that using this option can cause games to freeze/crash or other issues because of Nvidia driver issues.\n");
+ fprintf(stderr, " \"screen-direct-force\" option is only available on Nvidia X11. VRR works without this option on other systems.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode.\n");
+ fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
+ fprintf(stderr, " Only containers that support h264, hevc or av1 are supported, which means that only mp4, mkv, flv (and some others) are supported.\n");
+ fprintf(stderr, " WebM is not supported yet (most hardware doesn't support WebM video encoding).\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. This option is only supported (and required) when -w is \"focused\".\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -f Framerate to record at.\n");
- fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device. A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\". Optional, no audio track is added by default.\n");
- fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra'. 'high' is the recommended option when live streaming or when you have a slower harddrive. Optional, set to 'very_high' be default.\n");
- fprintf(stderr, " -r Replay buffer size in seconds. If this is set, then only the last seconds as set by this option will be stored"
- " and the video will only be saved when the gpu-screen-recorder is closed. This feature is similar to Nvidia's instant replay feature."
- " This option has be between 5 and 1200. Note that the replay buffer size will not always be precise, because of keyframes. Optional, disabled by default.\n");
- fprintf(stderr, " -k Codec to use. Should be either 'auto', 'h264' or 'h265'. Defaults to 'auto' which defaults to 'h265' unless recording at a higher resolution than 60. Forcefully set to 'h264' if -c is 'flv'.\n");
- fprintf(stderr, " -o The output file path. If omitted then the encoded data is sent to stdout. Required in replay mode (when using -r). In replay mode this has to be an existing directory instead of a file.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
+ fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
+ fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
+ fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra'. 'high' is the recommended option when live streaming or when you have a slower harddrive.\n");
+ fprintf(stderr, " Optional, set to 'very_high' be default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -r Replay buffer size in seconds. If this is set, then only the last seconds as set by this option will be stored\n");
+ fprintf(stderr, " and the video will only be saved when the gpu-screen-recorder is closed. This feature is similar to Nvidia's instant replay feature.\n");
+ fprintf(stderr, " This option has be between 5 and 1200. Note that the replay buffer size will not always be precise, because of keyframes. Optional, disabled by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -k Video codec to use. Should be either 'auto', 'h264', 'hevc', 'av1', 'hevc_hdr' or 'av1_hdr'. Defaults to 'auto' which defaults to 'hevc' on AMD/Nvidia and 'h264' on intel.\n");
+ fprintf(stderr, " Forcefully set to 'h264' if the file container type is 'flv'.\n");
+ fprintf(stderr, " Forcefully set to 'hevc' on AMD/intel if video codec is 'h264' and if the file container type is 'mkv'.\n");
+ fprintf(stderr, " 'hevc_hdr' and 'av1_hdr' option is not available on X11.\n");
+ fprintf(stderr, " Note: hdr metadata is not included in the video when recording with 'hevc_hdr'/'av1_hdr' because of bugs in AMD, Intel and NVIDIA drivers (amazin', they are all bugged).\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -ac Audio codec to use. Should be either 'aac', 'opus' or 'flac'. Defaults to 'opus' for .mp4/.mkv files, otherwise defaults to 'aac'.\n");
+ fprintf(stderr, " 'opus' and 'flac' is only supported by .mp4/.mkv files. 'opus' is recommended for best performance and smallest audio size.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -ab Audio bitrate to use. Optional, by default the bitrate is 128000 for opus and flac and 160000 for aac.\n");
+ fprintf(stderr, " If this is set to 0 then it's the same as if it's absent, in which case the bitrate is determined automatically depending on the audio codec.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -oc Overclock memory transfer rate to the maximum performance level. This only applies to NVIDIA on X11 and exists to overcome a bug in NVIDIA driver where performance level\n");
+ fprintf(stderr, " is dropped when you record a game. Only needed if you are recording a game that is bottlenecked by GPU. The same issue exists on Wayland but overclocking is not possible on Wayland.\n");
+ fprintf(stderr, " Works only if your have \"Coolbits\" set to \"12\" in NVIDIA X settings, see README for more information. Note! use at your own risk! Optional, disabled by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -fm Framerate mode. Should be either 'cfr' or 'vfr'. Defaults to 'vfr'.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -cr Color range. Should be either 'limited' (aka mpeg) or 'full' (aka jpeg). Defaults to 'limited'.\n");
+ fprintf(stderr, " Limited color range means that colors are in range 16-235 while full color range means that colors are in range 0-255 (when not recording with hdr).\n");
+ fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -v Prints per second, fps updates. Optional, set to 'yes' by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -h, --help\n");
+ fprintf(stderr, " Show this help.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -mf Organise replays in folders based on the current date.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -sc Run a script on the saved video file (non-blocking). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
+ fprintf(stderr, " Not applicable for live streams.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -cursor\n");
+ fprintf(stderr, " Record cursor. Defaults to 'yes'.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " --list-supported-video-codecs\n");
+ fprintf(stderr, " List supported video codecs and exits. Prints h264, hevc, hevc_hdr, av1 and av1_hdr (if supported).\n");
+ fprintf(stderr, "\n");
+ //fprintf(stderr, " -pixfmt The pixel format to use for the output video. yuv420 is the most common format and is best supported, but the color is compressed, so colors can look washed out and certain colors of text can look bad. Use yuv444 for no color compression, but the video may not work everywhere and it may not work with hardware video decoding. Optional, defaults to yuv420\n");
+ fprintf(stderr, " -o The output file path. If omitted then the encoded data is sent to stdout. Required in replay mode (when using -r).\n");
+ fprintf(stderr, " In replay mode this has to be a directory instead of a file.\n");
+ fprintf(stderr, " The directory to the file is created (recursively) if it doesn't already exist.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, "NOTES:\n");
- fprintf(stderr, " Send signal SIGINT (Ctrl+C) to gpu-screen-recorder to stop and save the recording (when not using replay mode).\n");
- fprintf(stderr, " Send signal SIGUSR1 (killall -SIGUSR1 gpu-screen-recorder) to gpu-screen-recorder to save a replay.\n");
- exit(1);
+ fprintf(stderr, " Send signal SIGINT to gpu-screen-recorder (Ctrl+C, or killall -SIGINT gpu-screen-recorder) to stop and save the recording. When in replay mode this stops recording without saving.\n");
+ fprintf(stderr, " Send signal SIGUSR1 to gpu-screen-recorder (killall -SIGUSR1 gpu-screen-recorder) to save a replay (when in replay mode).\n");
+ fprintf(stderr, " Send signal SIGUSR2 to gpu-screen-recorder (killall -SIGUSR2 gpu-screen-recorder) to pause/unpause recording. Only applicable and useful when recording (not streaming nor replay).\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, "EXAMPLES:\n");
+ fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor|$(pactl get-default-source)\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
+ //fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
+ _exit(1);
+}
+
+static void usage() {
+ usage_header();
+ _exit(1);
}
static sig_atomic_t running = 1;
static sig_atomic_t save_replay = 0;
+static sig_atomic_t toggle_pause = 0;
-static void int_handler(int) {
+static void stop_handler(int) {
running = 0;
}
@@ -963,37 +923,35 @@ static void save_replay_handler(int) {
save_replay = 1;
}
-struct Arg {
- std::vector<const char*> values;
- bool optional = false;
- bool list = false;
-
- const char* value() const {
- if(values.empty())
- return nullptr;
- return values.front();
- }
-};
+static void toggle_pause_handler(int) {
+ toggle_pause = 1;
+}
static bool is_hex_num(char c) {
return (c >= 'A' && c <= 'F') || (c >= 'a' && c <= 'f') || (c >= '0' && c <= '9');
}
static bool contains_non_hex_number(const char *str) {
+ bool hex_start = false;
size_t len = strlen(str);
if(len >= 2 && memcmp(str, "0x", 2) == 0) {
str += 2;
len -= 2;
+ hex_start = true;
}
+ bool is_hex = false;
for(size_t i = 0; i < len; ++i) {
char c = str[i];
if(c == '\0')
return false;
if(!is_hex_num(c))
return true;
+ if((c >= 'A' && c <= 'F') || (c >= 'a' && c <= 'f'))
+ is_hex = true;
}
- return false;
+
+ return is_hex && !hex_start;
}
static std::string get_date_str() {
@@ -1004,11 +962,27 @@ static std::string get_date_str() {
return str;
}
+static std::string get_date_only_str() {
+ char str[128];
+ time_t now = time(NULL);
+ struct tm *t = localtime(&now);
+ strftime(str, sizeof(str)-1, "%Y-%m-%d", t);
+ return str;
+}
+
+static std::string get_time_only_str() {
+ char str[128];
+ time_t now = time(NULL);
+ struct tm *t = localtime(&now);
+ strftime(str, sizeof(str)-1, "%H-%M-%S", t);
+ return str;
+}
+
static AVStream* create_stream(AVFormatContext *av_format_context, AVCodecContext *codec_context) {
AVStream *stream = avformat_new_stream(av_format_context, nullptr);
if (!stream) {
fprintf(stderr, "Error: Could not allocate stream\n");
- exit(1);
+ _exit(1);
}
stream->id = av_format_context->nb_streams - 1;
stream->time_base = codec_context->time_base;
@@ -1016,23 +990,109 @@ static AVStream* create_stream(AVFormatContext *av_format_context, AVCodecContex
return stream;
}
-struct AudioTrack {
- AVCodecContext *codec_context = nullptr;
- AVFrame *frame = nullptr;
- AVStream *stream = nullptr;
+static void run_recording_saved_script_async(const char *script_file, const char *video_file, const char *type) {
+ char script_file_full[PATH_MAX];
+ script_file_full[0] = '\0';
+ if(!realpath(script_file, script_file_full)) {
+ fprintf(stderr, "Error: script file not found: %s\n", script_file);
+ return;
+ }
+
+ const char *args[6];
+ const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
+
+ if(inside_flatpak) {
+ args[0] = "flatpak-spawn";
+ args[1] = "--host";
+ args[2] = script_file_full;
+ args[3] = video_file;
+ args[4] = type;
+ args[5] = NULL;
+ } else {
+ args[0] = script_file_full;
+ args[1] = video_file;
+ args[2] = type;
+ args[3] = NULL;
+ }
+
+ pid_t pid = fork();
+ if(pid == -1) {
+ perror(script_file_full);
+ return;
+ } else if(pid == 0) { // child
+ setsid();
+ signal(SIGHUP, SIG_IGN);
+
+ pid_t second_child = fork();
+ if(second_child == 0) { // child
+ execvp(args[0], (char* const*)args);
+ perror(script_file_full);
+ _exit(127);
+ } else if(second_child != -1) { // parent
+ _exit(0);
+ }
+ } else { // parent
+ waitpid(pid, NULL, 0);
+ }
+}
+struct AudioDevice {
SoundDevice sound_device;
+ AudioInput audio_input;
+ AVFilterContext *src_filter_ctx = nullptr;
+ AVFrame *frame = nullptr;
std::thread thread; // TODO: Instead of having a thread for each track, have one thread for all threads and read the data with non-blocking read
+};
+// TODO: Cleanup
+struct AudioTrack {
+ AVCodecContext *codec_context = nullptr;
+ AVStream *stream = nullptr;
+
+ std::vector<AudioDevice> audio_devices;
+ AVFilterGraph *graph = nullptr;
+ AVFilterContext *sink = nullptr;
int stream_index = 0;
- AudioInput audio_input;
+ int64_t pts = 0;
};
static std::future<void> save_replay_thread;
-static std::vector<AVPacket> save_replay_packets;
+static std::vector<std::shared_ptr<PacketData>> save_replay_packets;
static std::string save_replay_output_filepath;
-static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, const std::deque<AVPacket> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex) {
+static int create_directory_recursive(char *path) {
+ int path_len = strlen(path);
+ char *p = path;
+ char *end = path + path_len;
+ for(;;) {
+ char *slash_p = strchr(p, '/');
+
+ // Skips first '/', we don't want to try and create the root directory
+ if(slash_p == path) {
+ ++p;
+ continue;
+ }
+
+ if(!slash_p)
+ slash_p = end;
+
+ char prev_char = *slash_p;
+ *slash_p = '\0';
+ int err = mkdir(path, S_IRWXU);
+ *slash_p = prev_char;
+
+ if(err == -1 && errno != EEXIST)
+ return err;
+
+ if(slash_p == end)
+ break;
+ else
+ p = slash_p + 1;
+ }
+ return 0;
+}
+
+static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, std::deque<std::shared_ptr<PacketData>> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex, bool make_folders) {
if(save_replay_thread.valid())
return;
@@ -1044,7 +1104,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
std::lock_guard<std::mutex> lock(write_output_mutex);
start_index = (size_t)-1;
for(size_t i = 0; i < frame_data_queue.size(); ++i) {
- const AVPacket &av_packet = frame_data_queue[i];
+ const AVPacket &av_packet = frame_data_queue[i]->data;
if((av_packet.flags & AV_PKT_FLAG_KEY) && av_packet.stream_index == video_stream_index) {
start_index = i;
break;
@@ -1055,11 +1115,11 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
return;
if(frames_erased) {
- video_pts_offset = frame_data_queue[start_index].pts;
+ video_pts_offset = frame_data_queue[start_index]->data.pts;
// Find the next audio packet to use as audio pts offset
for(size_t i = start_index; i < frame_data_queue.size(); ++i) {
- const AVPacket &av_packet = frame_data_queue[i];
+ const AVPacket &av_packet = frame_data_queue[i]->data;
if(av_packet.stream_index != video_stream_index) {
audio_pts_offset = av_packet.pts;
break;
@@ -1071,18 +1131,23 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
save_replay_packets.resize(frame_data_queue.size());
for(size_t i = 0; i < frame_data_queue.size(); ++i) {
- av_packet_ref(&save_replay_packets[i], &frame_data_queue[i]);
+ save_replay_packets[i] = frame_data_queue[i];
}
}
- save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
+ if (make_folders) {
+ std::string output_folder = output_dir + '/' + get_date_only_str();
+ create_directory_recursive(&output_folder[0]);
+ save_replay_output_filepath = output_folder + "/Replay_" + get_time_only_str() + "." + file_extension;
+ } else {
+ create_directory_recursive(&output_dir[0]);
+ save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
+ }
+
save_replay_thread = std::async(std::launch::async, [video_stream_index, container_format, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks]() mutable {
AVFormatContext *av_format_context;
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
- av_format_context->flags |= AVFMT_FLAG_GENPTS;
- av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-
AVStream *video_stream = create_stream(av_format_context, video_codec_context);
avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
@@ -1100,14 +1165,26 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
return;
}
- ret = avformat_write_header(av_format_context, nullptr);
+ AVDictionary *options = nullptr;
+ av_dict_set(&options, "strict", "experimental", 0);
+
+ ret = avformat_write_header(av_format_context, &options);
if (ret < 0) {
fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(ret));
return;
}
for(size_t i = start_index; i < save_replay_packets.size(); ++i) {
- AVPacket &av_packet = save_replay_packets[i];
+ // TODO: Check if successful
+ AVPacket av_packet;
+ memset(&av_packet, 0, sizeof(av_packet));
+ //av_packet_from_data(av_packet, save_replay_packets[i]->data.data, save_replay_packets[i]->data.size);
+ av_packet.data = save_replay_packets[i]->data.data;
+ av_packet.size = save_replay_packets[i]->data.size;
+ av_packet.stream_index = save_replay_packets[i]->data.stream_index;
+ av_packet.pts = save_replay_packets[i]->data.pts;
+ av_packet.dts = save_replay_packets[i]->data.pts;
+ av_packet.flags = save_replay_packets[i]->data.flags;
AVStream *stream = video_stream;
AVCodecContext *codec_context = video_codec_context;
@@ -1127,9 +1204,11 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
av_packet.stream_index = stream->index;
av_packet_rescale_ts(&av_packet, codec_context->time_base, stream->time_base);
- int ret = av_interleaved_write_frame(av_format_context, &av_packet);
+ ret = av_write_frame(av_format_context, &av_packet);
if(ret < 0)
fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", stream->index, av_error_to_string(ret), ret);
+
+ //av_packet_free(&av_packet);
}
if (av_write_trailer(av_format_context) != 0)
@@ -1137,6 +1216,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
avio_close(av_format_context->pb);
avformat_free_context(av_format_context);
+ av_dict_free(&options);
for(AudioTrack &audio_track : audio_tracks) {
audio_track.stream = nullptr;
@@ -1144,15 +1224,34 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
});
}
-static AudioInput parse_audio_input_arg(const char *str) {
- AudioInput audio_input;
- audio_input.name = str;
- const size_t index = audio_input.name.find('/');
- if(index != std::string::npos) {
- audio_input.description = audio_input.name.substr(0, index);
- audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + index + 1);
+static void split_string(const std::string &str, char delimiter, std::function<bool(const char*,size_t)> callback) {
+ size_t index = 0;
+ while(index < str.size()) {
+ size_t end_index = str.find(delimiter, index);
+ if(end_index == std::string::npos)
+ end_index = str.size();
+
+ if(!callback(&str[index], end_index - index))
+ break;
+
+ index = end_index + 1;
}
- return audio_input;
+}
+
+static std::vector<AudioInput> parse_audio_input_arg(const char *str) {
+ std::vector<AudioInput> audio_inputs;
+ split_string(str, '|', [&audio_inputs](const char *sub, size_t size) {
+ AudioInput audio_input;
+ audio_input.name.assign(sub, size);
+ const size_t index = audio_input.name.find('/');
+ if(index != std::string::npos) {
+ audio_input.description = audio_input.name.substr(0, index);
+ audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + index + 1);
+ }
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ });
+ return audio_inputs;
}
// TODO: Does this match all livestreaming cases?
@@ -1166,13 +1265,386 @@ static bool is_livestream_path(const char *str) {
return false;
}
+// TODO: Proper cleanup
+static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph **graph, AVFilterContext **sink, std::vector<AVFilterContext*> &src_filter_ctx, size_t num_sources) {
+ char ch_layout[64];
+ int err = 0;
+ ch_layout[0] = '\0';
+
+ AVFilterGraph *filter_graph = avfilter_graph_alloc();
+ if (!filter_graph) {
+ fprintf(stderr, "Unable to create filter graph.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ for(size_t i = 0; i < num_sources; ++i) {
+ const AVFilter *abuffer = avfilter_get_by_name("abuffer");
+ if (!abuffer) {
+ fprintf(stderr, "Could not find the abuffer filter.\n");
+ return AVERROR_FILTER_NOT_FOUND;
+ }
+
+ AVFilterContext *abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, NULL);
+ if (!abuffer_ctx) {
+ fprintf(stderr, "Could not allocate the abuffer instance.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ #if LIBAVCODEC_VERSION_MAJOR < 60
+ av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, AV_CH_LAYOUT_STEREO);
+ #else
+ av_channel_layout_describe(&audio_codec_context->ch_layout, ch_layout, sizeof(ch_layout));
+ #endif
+ av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
+ av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(audio_codec_context->sample_fmt), AV_OPT_SEARCH_CHILDREN);
+ av_opt_set_q (abuffer_ctx, "time_base", audio_codec_context->time_base, AV_OPT_SEARCH_CHILDREN);
+ av_opt_set_int(abuffer_ctx, "sample_rate", audio_codec_context->sample_rate, AV_OPT_SEARCH_CHILDREN);
+ av_opt_set_int(abuffer_ctx, "bit_rate", audio_codec_context->bit_rate, AV_OPT_SEARCH_CHILDREN);
+
+ err = avfilter_init_str(abuffer_ctx, NULL);
+ if (err < 0) {
+ fprintf(stderr, "Could not initialize the abuffer filter.\n");
+ return err;
+ }
+
+ src_filter_ctx.push_back(abuffer_ctx);
+ }
+
+ const AVFilter *mix_filter = avfilter_get_by_name("amix");
+ if (!mix_filter) {
+ av_log(NULL, AV_LOG_ERROR, "Could not find the mix filter.\n");
+ return AVERROR_FILTER_NOT_FOUND;
+ }
+
+ char args[512];
+ snprintf(args, sizeof(args), "inputs=%d", (int)num_sources);
+
+ AVFilterContext *mix_ctx;
+ err = avfilter_graph_create_filter(&mix_ctx, mix_filter, "amix", args, NULL, filter_graph);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio amix filter\n");
+ return err;
+ }
+
+ const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
+ if (!abuffersink) {
+ fprintf(stderr, "Could not find the abuffersink filter.\n");
+ return AVERROR_FILTER_NOT_FOUND;
+ }
+
+ AVFilterContext *abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
+ if (!abuffersink_ctx) {
+ fprintf(stderr, "Could not allocate the abuffersink instance.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ err = avfilter_init_str(abuffersink_ctx, NULL);
+ if (err < 0) {
+ fprintf(stderr, "Could not initialize the abuffersink instance.\n");
+ return err;
+ }
+
+ err = 0;
+ for(size_t i = 0; i < src_filter_ctx.size(); ++i) {
+ AVFilterContext *src_ctx = src_filter_ctx[i];
+ if (err >= 0)
+ err = avfilter_link(src_ctx, 0, mix_ctx, i);
+ }
+ if (err >= 0)
+ err = avfilter_link(mix_ctx, 0, abuffersink_ctx, 0);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error connecting filters\n");
+ return err;
+ }
+
+ err = avfilter_graph_config(filter_graph, NULL);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
+ return err;
+ }
+
+ *graph = filter_graph;
+ *sink = abuffersink_ctx;
+
+ return 0;
+}
+
+static void xwayland_check_callback(const gsr_monitor *monitor, void *userdata) {
+ bool *xwayland_found = (bool*)userdata;
+ if(monitor->name_len >= 8 && strncmp(monitor->name, "XWAYLAND", 8) == 0)
+ *xwayland_found = true;
+ else if(memmem(monitor->name, monitor->name_len, "X11", 3))
+ *xwayland_found = true;
+}
+
+static bool is_xwayland(Display *display) {
+ int opcode, event, error;
+ if(XQueryExtension(display, "XWAYLAND", &opcode, &event, &error))
+ return true;
+
+ bool xwayland_found = false;
+ for_each_active_monitor_output_x11(display, xwayland_check_callback, &xwayland_found);
+ return xwayland_found;
+}
+
+static void list_supported_video_codecs() {
+ bool wayland = false;
+ Display *dpy = XOpenDisplay(nullptr);
+ if (!dpy) {
+ wayland = true;
+ fprintf(stderr, "Warning: failed to connect to the X server. Assuming wayland is running without Xwayland\n");
+ }
+
+ XSetErrorHandler(x11_error_handler);
+ XSetIOErrorHandler(x11_io_error_handler);
+
+ if(!wayland)
+ wayland = is_xwayland(dpy);
+
+ gsr_egl egl;
+ if(!gsr_egl_load(&egl, dpy, wayland, false)) {
+ fprintf(stderr, "gsr error: failed to load opengl\n");
+ _exit(1);
+ }
+
+ char card_path[128];
+ card_path[0] = '\0';
+ if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ // TODO: Allow specifying another card, and in other places
+ if(!gsr_get_valid_card_path(&egl, card_path)) {
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it\n");
+ _exit(2);
+ }
+ }
+
+ av_log_set_level(AV_LOG_FATAL);
+
+ // TODO: Output hdr
+ if(find_h264_encoder(egl.gpu_info.vendor, card_path))
+ puts("h264");
+ if(find_h265_encoder(egl.gpu_info.vendor, card_path))
+ puts("hevc");
+ if(find_av1_encoder(egl.gpu_info.vendor, card_path))
+ puts("av1");
+
+ fflush(stdout);
+
+ gsr_egl_unload(&egl);
+ if(dpy)
+ XCloseDisplay(dpy);
+}
+
+static gsr_capture* create_capture_impl(const char *window_str, const char *screen_region, bool wayland, gsr_gpu_info gpu_inf, gsr_egl &egl, int fps, bool overclock, VideoCodec video_codec, gsr_color_range color_range, bool record_cursor) {
+ vec2i region_size = { 0, 0 };
+ Window src_window_id = None;
+ bool follow_focused = false;
+
+ gsr_capture *capture = nullptr;
+ if(strcmp(window_str, "focused") == 0) {
+ if(wayland) {
+ fprintf(stderr, "Error: GPU Screen Recorder window capture only works in a pure X11 session. Xwayland is not supported. You can record a monitor instead on wayland\n");
+ _exit(2);
+ }
+
+ if(!screen_region) {
+ fprintf(stderr, "Error: option -s is required when using -w focused\n");
+ usage();
+ }
+
+ if(sscanf(screen_region, "%dx%d", &region_size.x, &region_size.y) != 2) {
+ fprintf(stderr, "Error: invalid value for option -s '%s', expected a value in format WxH\n", screen_region);
+ usage();
+ }
+
+ if(region_size.x <= 0 || region_size.y <= 0) {
+ fprintf(stderr, "Error: invalud value for option -s '%s', expected width and height to be greater than 0\n", screen_region);
+ usage();
+ }
+
+ follow_focused = true;
+ } else if(contains_non_hex_number(window_str)) {
+ if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(strcmp(window_str, "screen") == 0) {
+ FirstOutputCallback first_output;
+ first_output.output_name = NULL;
+ for_each_active_monitor_output(&egl, GSR_CONNECTION_DRM, get_first_output, &first_output);
+
+ if(first_output.output_name) {
+ window_str = first_output.output_name;
+ } else {
+ fprintf(stderr, "Error: no available output found\n");
+ }
+ }
+
+ gsr_monitor gmon;
+ if(!get_monitor_by_name(&egl, GSR_CONNECTION_DRM, window_str, &gmon)) {
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
+ fprintf(stderr, " \"screen\"\n");
+ for_each_active_monitor_output(&egl, GSR_CONNECTION_DRM, monitor_output_callback_print, NULL);
+ _exit(1);
+ }
+ } else {
+ if(strcmp(window_str, "screen") != 0 && strcmp(window_str, "screen-direct") != 0 && strcmp(window_str, "screen-direct-force") != 0) {
+ gsr_monitor gmon;
+ if(!get_monitor_by_name(&egl, GSR_CONNECTION_X11, window_str, &gmon)) {
+ const int screens_width = XWidthOfScreen(DefaultScreenOfDisplay(egl.x11.dpy));
+ const int screens_height = XWidthOfScreen(DefaultScreenOfDisplay(egl.x11.dpy));
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
+ fprintf(stderr, " \"screen\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
+ fprintf(stderr, " \"screen-direct\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
+ fprintf(stderr, " \"screen-direct-force\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
+ for_each_active_monitor_output(&egl, GSR_CONNECTION_X11, monitor_output_callback_print, NULL);
+ _exit(1);
+ }
+ }
+ }
+
+ if(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA) {
+ if(wayland) {
+ gsr_capture_kms_cuda_params kms_params;
+ kms_params.egl = &egl;
+ kms_params.display_to_capture = window_str;
+ kms_params.gpu_inf = gpu_inf;
+ kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.color_range = color_range;
+ kms_params.record_cursor = record_cursor;
+ capture = gsr_capture_kms_cuda_create(&kms_params);
+ if(!capture)
+ _exit(1);
+ } else {
+ const char *capture_target = window_str;
+ bool direct_capture = strcmp(window_str, "screen-direct") == 0;
+ if(direct_capture) {
+ capture_target = "screen";
+ // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
+ direct_capture = false;
+ fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
+ }
+
+ if(strcmp(window_str, "screen-direct-force") == 0) {
+ direct_capture = true;
+ capture_target = "screen";
+ }
+
+ gsr_capture_nvfbc_params nvfbc_params;
+ nvfbc_params.egl = &egl;
+ nvfbc_params.display_to_capture = capture_target;
+ nvfbc_params.fps = fps;
+ nvfbc_params.pos = { 0, 0 };
+ nvfbc_params.size = { 0, 0 };
+ nvfbc_params.direct_capture = direct_capture;
+ nvfbc_params.overclock = overclock;
+ nvfbc_params.hdr = video_codec_is_hdr(video_codec);
+ nvfbc_params.color_range = color_range;
+ nvfbc_params.record_cursor = record_cursor;
+ capture = gsr_capture_nvfbc_create(&nvfbc_params);
+ if(!capture)
+ _exit(1);
+ }
+ } else {
+ gsr_capture_kms_vaapi_params kms_params;
+ kms_params.egl = &egl;
+ kms_params.display_to_capture = window_str;
+ kms_params.gpu_inf = gpu_inf;
+ kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.color_range = color_range;
+ kms_params.record_cursor = record_cursor;
+ capture = gsr_capture_kms_vaapi_create(&kms_params);
+ if(!capture)
+ _exit(1);
+ }
+ } else {
+ if(wayland) {
+ fprintf(stderr, "Error: GPU Screen Recorder window capture only works in a pure X11 session. Xwayland is not supported. You can record a monitor instead on wayland\n");
+ _exit(2);
+ }
+
+ errno = 0;
+ src_window_id = strtol(window_str, nullptr, 0);
+ if(src_window_id == None || errno == EINVAL) {
+ fprintf(stderr, "Invalid window number %s\n", window_str);
+ usage();
+ }
+ }
+
+ if(!capture) {
+ switch(egl.gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ case GSR_GPU_VENDOR_INTEL: {
+ gsr_capture_xcomposite_vaapi_params xcomposite_params;
+ xcomposite_params.base.egl = &egl;
+ xcomposite_params.base.window = src_window_id;
+ xcomposite_params.base.follow_focused = follow_focused;
+ xcomposite_params.base.region_size = region_size;
+ xcomposite_params.base.color_range = color_range;
+ xcomposite_params.base.record_cursor = record_cursor;
+ capture = gsr_capture_xcomposite_vaapi_create(&xcomposite_params);
+ if(!capture)
+ _exit(1);
+ break;
+ }
+ case GSR_GPU_VENDOR_NVIDIA: {
+ gsr_capture_xcomposite_cuda_params xcomposite_params;
+ xcomposite_params.base.egl = &egl;
+ xcomposite_params.base.window = src_window_id;
+ xcomposite_params.base.follow_focused = follow_focused;
+ xcomposite_params.base.region_size = region_size;
+ xcomposite_params.base.color_range = color_range;
+ xcomposite_params.base.record_cursor = record_cursor;
+ xcomposite_params.overclock = overclock;
+ capture = gsr_capture_xcomposite_cuda_create(&xcomposite_params);
+ if(!capture)
+ _exit(1);
+ break;
+ }
+ }
+ }
+
+ return capture;
+}
+
+struct Arg {
+ std::vector<const char*> values;
+ bool optional = false;
+ bool list = false;
+
+ const char* value() const {
+ if(values.empty())
+ return nullptr;
+ return values.front();
+ }
+};
+
int main(int argc, char **argv) {
- signal(SIGINT, int_handler);
+ signal(SIGINT, stop_handler);
signal(SIGUSR1, save_replay_handler);
+ signal(SIGUSR2, toggle_pause_handler);
+
+ // Stop nvidia driver from buffering frames
+ setenv("__GL_MaxFramesAllowed", "1", true);
+ // If this is set to 1 then cuGraphicsGLRegisterImage will fail for egl context with error: invalid OpenGL or DirectX context,
+ // so we overwrite it
+ setenv("__GL_THREADED_OPTIMIZATIONS", "0", true);
+ // Some people set this to nvidia (for nvdec) or vdpau (for nvidia vdpau), which breaks gpu screen recorder since
+ // nvidia doesn't support vaapi and nvidia-vaapi-driver doesn't support encoding yet.
+ // Let vaapi find the match vaapi driver instead of forcing a specific one.
+ unsetenv("LIBVA_DRIVER_NAME");
+
+ if(argc <= 1)
+ usage_full();
+
+ if(argc == 2 && (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0))
+ usage_full();
+
+ if(argc == 2 && strcmp(argv[1], "--list-supported-video-codecs") == 0) {
+ list_supported_video_codecs();
+ _exit(0);
+ }
+
+ //av_log_set_level(AV_LOG_TRACE);
std::map<std::string, Arg> args = {
{ "-w", Arg { {}, false, false } },
- //{ "-s", Arg { nullptr, true } },
{ "-c", Arg { {}, true, false } },
{ "-f", Arg { {}, false, false } },
{ "-s", Arg { {}, true, false } },
@@ -1180,10 +1652,20 @@ int main(int argc, char **argv) {
{ "-q", Arg { {}, true, false } },
{ "-o", Arg { {}, true, false } },
{ "-r", Arg { {}, true, false } },
- { "-k", Arg { {}, true, false } }
+ { "-k", Arg { {}, true, false } },
+ { "-ac", Arg { {}, true, false } },
+ { "-ab", Arg { {}, true, false } },
+ { "-oc", Arg { {}, true, false } },
+ { "-fm", Arg { {}, true, false } },
+ { "-pixfmt", Arg { {}, true, false } },
+ { "-v", Arg { {}, true, false } },
+ { "-mf", Arg { {}, true, false } },
+ { "-sc", Arg { {}, true, false } },
+ { "-cr", Arg { {}, true, false } },
+ { "-cursor", Arg { {}, true, false } },
};
- for(int i = 1; i < argc - 1; i += 2) {
+ for(int i = 1; i < argc; i += 2) {
auto it = args.find(argv[i]);
if(it == args.end()) {
fprintf(stderr, "Invalid argument '%s'\n", argv[i]);
@@ -1195,6 +1677,11 @@ int main(int argc, char **argv) {
usage();
}
+ if(i + 1 >= argc) {
+ fprintf(stderr, "Missing value for argument '%s'\n", argv[i]);
+ usage();
+ }
+
it->second.values.push_back(argv[i + 1]);
}
@@ -1205,72 +1692,189 @@ int main(int argc, char **argv) {
}
}
- VideoCodec video_codec;
- const char *codec_to_use = args["-k"].value();
- if(!codec_to_use)
- codec_to_use = "auto";
+ VideoCodec video_codec = VideoCodec::HEVC;
+ const char *video_codec_to_use = args["-k"].value();
+ if(!video_codec_to_use)
+ video_codec_to_use = "auto";
- if(strcmp(codec_to_use, "h264") == 0) {
+ if(strcmp(video_codec_to_use, "h264") == 0) {
video_codec = VideoCodec::H264;
- } else if(strcmp(codec_to_use, "h265") == 0) {
- video_codec = VideoCodec::H265;
- } else if(strcmp(codec_to_use, "auto") != 0) {
- fprintf(stderr, "Error: -k should either be either 'auto', 'h264' or 'h265', got: '%s'\n", codec_to_use);
+ } else if(strcmp(video_codec_to_use, "h265") == 0 || strcmp(video_codec_to_use, "hevc") == 0) {
+ video_codec = VideoCodec::HEVC;
+ } else if(strcmp(video_codec_to_use, "hevc_hdr") == 0) {
+ video_codec = VideoCodec::HEVC_HDR;
+ } else if(strcmp(video_codec_to_use, "av1") == 0) {
+ video_codec = VideoCodec::AV1;
+ } else if(strcmp(video_codec_to_use, "av1_hdr") == 0) {
+ video_codec = VideoCodec::AV1_HDR;
+ } else if(strcmp(video_codec_to_use, "auto") != 0) {
+ fprintf(stderr, "Error: -k should either be either 'auto', 'h264', 'hevc', 'hevc_hdr', 'av1' or 'av1_hdr', got: '%s'\n", video_codec_to_use);
+ usage();
+ }
+
+ AudioCodec audio_codec = AudioCodec::AAC;
+ const char *audio_codec_to_use = args["-ac"].value();
+ if(!audio_codec_to_use)
+ audio_codec_to_use = "aac";
+
+ if(strcmp(audio_codec_to_use, "aac") == 0) {
+ audio_codec = AudioCodec::AAC;
+ } else if(strcmp(audio_codec_to_use, "opus") == 0) {
+ audio_codec = AudioCodec::OPUS;
+ } else if(strcmp(audio_codec_to_use, "flac") == 0) {
+ audio_codec = AudioCodec::FLAC;
+ } else {
+ fprintf(stderr, "Error: -ac should either be either 'aac', 'opus' or 'flac', got: '%s'\n", audio_codec_to_use);
+ usage();
+ }
+
+ if(audio_codec == AudioCodec::OPUS || audio_codec == AudioCodec::FLAC) {
+ fprintf(stderr, "Warning: opus and flac audio codecs are temporary disabled, using aac audio codec instead\n");
+ audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ }
+
+ int audio_bitrate = 0;
+ const char *audio_bitrate_str = args["-ab"].value();
+ if(audio_bitrate_str) {
+ if(sscanf(audio_bitrate_str, "%d", &audio_bitrate) != 1) {
+ fprintf(stderr, "Error: -ab argument \"%s\" is not an integer\n", audio_bitrate_str);
+ usage();
+ }
+ }
+
+ bool overclock = false;
+ const char *overclock_str = args["-oc"].value();
+ if(!overclock_str)
+ overclock_str = "no";
+
+ if(strcmp(overclock_str, "yes") == 0) {
+ overclock = true;
+ } else if(strcmp(overclock_str, "no") == 0) {
+ overclock = false;
+ } else {
+ fprintf(stderr, "Error: -oc should either be either 'yes' or 'no', got: '%s'\n", overclock_str);
+ usage();
+ }
+
+ bool verbose = true;
+ const char *verbose_str = args["-v"].value();
+ if(!verbose_str)
+ verbose_str = "yes";
+
+ if(strcmp(verbose_str, "yes") == 0) {
+ verbose = true;
+ } else if(strcmp(verbose_str, "no") == 0) {
+ verbose = false;
+ } else {
+ fprintf(stderr, "Error: -v should either be either 'yes' or 'no', got: '%s'\n", verbose_str);
+ usage();
+ }
+
+ bool record_cursor = true;
+ const char *record_cursor_str = args["-cursor"].value();
+ if(!record_cursor_str)
+ record_cursor_str = "yes";
+
+ if(strcmp(record_cursor_str, "yes") == 0) {
+ record_cursor = true;
+ } else if(strcmp(record_cursor_str, "no") == 0) {
+ record_cursor = false;
+ } else {
+ fprintf(stderr, "Error: -cursor should either be either 'yes' or 'no', got: '%s'\n", record_cursor_str);
+ usage();
+ }
+
+ bool make_folders = false;
+ const char *make_folders_str = args["-mf"].value();
+ if(!make_folders_str)
+ make_folders_str = "no";
+
+ if(strcmp(make_folders_str, "yes") == 0) {
+ make_folders = true;
+ } else if(strcmp(make_folders_str, "no") == 0) {
+ make_folders = false;
+ } else {
+ fprintf(stderr, "Error: -mf should either be either 'yes' or 'no', got: '%s'\n", make_folders_str);
+ usage();
+ }
+
+ const char *recording_saved_script = args["-sc"].value();
+ if(recording_saved_script) {
+ struct stat buf;
+ if(stat(recording_saved_script, &buf) == -1 || !S_ISREG(buf.st_mode)) {
+ fprintf(stderr, "Error: Script \"%s\" either doesn't exist or it's not a file\n", recording_saved_script);
+ usage();
+ }
+
+ if(!(buf.st_mode & S_IXUSR)) {
+ fprintf(stderr, "Error: Script \"%s\" is not executable\n", recording_saved_script);
+ usage();
+ }
+ }
+
+ PixelFormat pixel_format = PixelFormat::YUV420;
+ const char *pixfmt = args["-pixfmt"].value();
+ if(!pixfmt)
+ pixfmt = "yuv420";
+
+ if(strcmp(pixfmt, "yuv420") == 0) {
+ pixel_format = PixelFormat::YUV420;
+ } else if(strcmp(pixfmt, "yuv444") == 0) {
+ pixel_format = PixelFormat::YUV444;
+ } else {
+ fprintf(stderr, "Error: -pixfmt should either be either 'yuv420', or 'yuv444', got: '%s'\n", pixfmt);
usage();
}
const Arg &audio_input_arg = args["-a"];
- const std::vector<AudioInput> audio_inputs = get_pulseaudio_inputs();
- std::vector<AudioInput> requested_audio_inputs;
+ std::vector<AudioInput> audio_inputs;
+ if(!audio_input_arg.values.empty())
+ audio_inputs = get_pulseaudio_inputs();
+ std::vector<MergedAudioInputs> requested_audio_inputs;
+ bool uses_amix = false;
// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
for(const char *audio_input : audio_input_arg.values) {
- requested_audio_inputs.push_back(parse_audio_input_arg(audio_input));
- AudioInput &request_audio_input = requested_audio_inputs.back();
-
- bool match = false;
- for(const auto &existing_audio_input : audio_inputs) {
- if(strcmp(request_audio_input.name.c_str(), existing_audio_input.name.c_str()) == 0) {
- if(request_audio_input.description.empty())
- request_audio_input.description = "gsr-" + existing_audio_input.description;
+ if(!audio_input || audio_input[0] == '\0')
+ continue;
- match = true;
- break;
- }
- }
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input)});
+ if(requested_audio_inputs.back().audio_inputs.size() > 1)
+ uses_amix = true;
- if(!match) {
- fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device. Expected one of:\n", request_audio_input.name.c_str());
+ for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
+ bool match = false;
for(const auto &existing_audio_input : audio_inputs) {
- fprintf(stderr, " %s\n", existing_audio_input.name.c_str());
- }
- exit(2);
- }
- }
+ if(strcmp(request_audio_input.name.c_str(), existing_audio_input.name.c_str()) == 0) {
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-" + existing_audio_input.description;
- uint32_t region_x = 0;
- uint32_t region_y = 0;
- uint32_t region_width = 0;
- uint32_t region_height = 0;
+ match = true;
+ break;
+ }
+ }
- /*
- TODO: Fix this. Doesn't work for some reason
- const char *screen_region = args["-s"].value();
- if(screen_region) {
- if(sscanf(screen_region, "%ux%u+%u+%u", &region_x, &region_y, &region_width, &region_height) != 4) {
- fprintf(stderr, "Invalid value for -s '%s', expected a value in format WxH+X+Y\n", screen_region);
- return 1;
+ if(!match) {
+ fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device, expected one of:\n", request_audio_input.name.c_str());
+ for(const auto &existing_audio_input : audio_inputs) {
+ fprintf(stderr, " %s (%s)\n", existing_audio_input.name.c_str(), existing_audio_input.description.c_str());
+ }
+ _exit(2);
+ }
}
}
- */
const char *container_format = args["-c"].value();
+ if(container_format && strcmp(container_format, "mkv") == 0)
+ container_format = "matroska";
+
int fps = atoi(args["-f"].value());
if(fps == 0) {
fprintf(stderr, "Invalid fps argument: %s\n", args["-f"].value());
- return 1;
+ _exit(1);
}
if(fps < 1)
fps = 1;
@@ -1299,112 +1903,131 @@ int main(int argc, char **argv) {
replay_buffer_size_secs = atoi(replay_buffer_size_secs_str);
if(replay_buffer_size_secs < 5 || replay_buffer_size_secs > 1200) {
fprintf(stderr, "Error: option -r has to be between 5 and 1200, was: %s\n", replay_buffer_size_secs_str);
- return 1;
+ _exit(1);
}
- replay_buffer_size_secs += 5; // Add a few seconds to account of lost packets because of non-keyframe packets skipped
+ replay_buffer_size_secs += 3; // Add a few seconds to account of lost packets because of non-keyframe packets skipped
}
- if(!cuda.load()) {
- fprintf(stderr, "Error: failed to load cuda\n");
- return 2;
+ const char *window_str = strdup(args["-w"].value());
+
+ bool wayland = false;
+ Display *dpy = XOpenDisplay(nullptr);
+ if (!dpy) {
+ wayland = true;
+ fprintf(stderr, "Warning: failed to connect to the X server. Assuming wayland is running without Xwayland\n");
}
- CUresult res;
+ XSetErrorHandler(x11_error_handler);
+ XSetIOErrorHandler(x11_io_error_handler);
- res = cuda.cuInit(0);
- if(res != CUDA_SUCCESS) {
- const char *err_str;
- cuda.cuGetErrorString(res, &err_str);
- fprintf(stderr, "Error: cuInit failed, error %s (result: %d)\n", err_str, res);
- return 1;
- }
+ if(!wayland)
+ wayland = is_xwayland(dpy);
- int nGpu = 0;
- cuda.cuDeviceGetCount(&nGpu);
- if (nGpu <= 0) {
- fprintf(stderr, "Error: no cuda supported devices found\n");
- return 1;
+ if(video_codec_is_hdr(video_codec) && !wayland) {
+ fprintf(stderr, "Error: hdr video codec option %s is not available on X11\n", video_codec_to_use);
+ _exit(1);
}
- CUdevice cu_dev;
- res = cuda.cuDeviceGet(&cu_dev, 0);
- if(res != CUDA_SUCCESS) {
- const char *err_str;
- cuda.cuGetErrorString(res, &err_str);
- fprintf(stderr, "Error: unable to get CUDA device, error: %s (result: %d)\n", err_str, res);
- return 1;
+ const bool is_monitor_capture = strcmp(window_str, "focused") != 0 && contains_non_hex_number(window_str);
+ gsr_egl egl;
+ if(!gsr_egl_load(&egl, dpy, wayland, is_monitor_capture)) {
+ fprintf(stderr, "gsr error: failed to load opengl\n");
+ _exit(1);
}
- CUcontext cu_ctx;
- res = cuda.cuCtxCreate_v2(&cu_ctx, CU_CTX_SCHED_AUTO, cu_dev);
- if(res != CUDA_SUCCESS) {
- const char *err_str;
- cuda.cuGetErrorString(res, &err_str);
- fprintf(stderr, "Error: unable to create CUDA context, error: %s (result: %d)\n", err_str, res);
- return 1;
- }
+ bool very_old_gpu = false;
- const char *record_area = args["-s"].value();
+ if(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && egl.gpu_info.gpu_version != 0 && egl.gpu_info.gpu_version < 900) {
+ fprintf(stderr, "Info: your gpu appears to be very old (older than maxwell architecture). Switching to lower preset\n");
+ very_old_gpu = true;
+ }
- uint32_t window_width = 0;
- uint32_t window_height = 0;
- int window_x = 0;
- int window_y = 0;
+ if(egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA && overclock) {
+ fprintf(stderr, "Info: overclock option has no effect on amd/intel, ignoring option\n");
+ }
- NvFBCLibrary nv_fbc_library;
+ if(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && overclock && wayland) {
+ fprintf(stderr, "Info: overclocking is not possible on nvidia on wayland, ignoring option\n");
+ }
- const char *window_str = args["-w"].value();
- Window src_window_id = None;
- if(contains_non_hex_number(window_str)) {
- if(record_area) {
- fprintf(stderr, "Option -s is not supported when recording a monitor/screen\n");
- usage();
+ egl.card_path[0] = '\0';
+ if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ // TODO: Allow specifying another card, and in other places
+ if(!gsr_get_valid_card_path(&egl, egl.card_path)) {
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it\n");
+ _exit(2);
}
+ }
- if(!nv_fbc_library.load())
- return 1;
+ // TODO: Fix constant framerate not working properly on amd/intel because capture framerate gets locked to the same framerate as
+ // game framerate, which doesn't work well when you need to encode multiple duplicate frames (AMD/Intel is slow at encoding!).
+ // It also appears to skip audio frames on nvidia wayland? why? that should be fine, but it causes video stuttering because of audio/video sync.
+ FramerateMode framerate_mode;
+ const char *framerate_mode_str = args["-fm"].value();
+ if(!framerate_mode_str)
+ framerate_mode_str = "vfr";
- const char *capture_target = window_str;
- bool direct_capture = strcmp(window_str, "screen-direct") == 0;
- if(direct_capture) {
- capture_target = "screen";
- // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
- direct_capture = false;
- fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
- }
+ if(strcmp(framerate_mode_str, "cfr") == 0) {
+ framerate_mode = FramerateMode::CONSTANT;
+ } else if(strcmp(framerate_mode_str, "vfr") == 0) {
+ framerate_mode = FramerateMode::VARIABLE;
+ } else {
+ fprintf(stderr, "Error: -fm should either be either 'cfr' or 'vfr', got: '%s'\n", framerate_mode_str);
+ usage();
+ }
+
+ gsr_color_range color_range;
+ const char *color_range_str = args["-cr"].value();
+ if(!color_range_str)
+ color_range_str = "limited";
- if(!nv_fbc_library.create(capture_target, fps, &window_width, &window_height, region_x, region_y, region_width, region_height, direct_capture))
- return 1;
+ if(strcmp(color_range_str, "limited") == 0) {
+ color_range = GSR_COLOR_RANGE_LIMITED;
+ } else if(strcmp(color_range_str, "full") == 0) {
+ color_range = GSR_COLOR_RANGE_FULL;
} else {
- errno = 0;
- src_window_id = strtol(window_str, nullptr, 0);
- if(src_window_id == None || errno == EINVAL) {
- fprintf(stderr, "Invalid window number %s\n", window_str);
- usage();
- }
+ fprintf(stderr, "Error: -cr should either be either 'limited' or 'full', got: '%s'\n", color_range_str);
+ usage();
}
- int record_width = window_width;
- int record_height = window_height;
- if(record_area) {
- if(sscanf(record_area, "%dx%d", &record_width, &record_height) != 2) {
- fprintf(stderr, "Invalid value for -s '%s', expected a value in format WxH\n", record_area);
- return 1;
- }
+ const char *screen_region = args["-s"].value();
+
+ if(screen_region && strcmp(window_str, "focused") != 0) {
+ fprintf(stderr, "Error: option -s is only available when using -w focused\n");
+ usage();
}
+ bool is_livestream = false;
const char *filename = args["-o"].value();
if(filename) {
- if(replay_buffer_size_secs != -1) {
- if(!container_format) {
- fprintf(stderr, "Error: option -c is required when using option -r\n");
- usage();
+ is_livestream = is_livestream_path(filename);
+ if(is_livestream) {
+ if(replay_buffer_size_secs != -1) {
+ fprintf(stderr, "Error: replay mode is not applicable to live streaming\n");
+ _exit(1);
}
+ } else {
+ if(replay_buffer_size_secs == -1) {
+ char directory_buf[PATH_MAX];
+ strcpy(directory_buf, filename);
+ char *directory = dirname(directory_buf);
+ if(strcmp(directory, ".") != 0 && strcmp(directory, "/") != 0) {
+ if(create_directory_recursive(directory) != 0) {
+ fprintf(stderr, "Error: failed to create directory for output file: %s\n", filename);
+ _exit(1);
+ }
+ }
+ } else {
+ if(!container_format) {
+ fprintf(stderr, "Error: option -c is required when using option -r\n");
+ usage();
+ }
- struct stat buf;
- if(stat(filename, &buf) == -1 || !S_ISDIR(buf.st_mode)) {
- fprintf(stderr, "Error: directory \"%s\" does not exist or is not a directory\n", filename);
- usage();
+ struct stat buf;
+ if(stat(filename, &buf) != -1 && !S_ISDIR(buf.st_mode)) {
+ fprintf(stderr, "Error: File \"%s\" exists but it's not a directory\n", filename);
+ usage();
+ }
}
}
} else {
@@ -1421,98 +2044,17 @@ int main(int argc, char **argv) {
}
}
- const double target_fps = 1.0 / (double)fps;
-
- Display *dpy = XOpenDisplay(nullptr);
- if (!dpy) {
- fprintf(stderr, "Error: Failed to open display\n");
- return 1;
- }
-
- XSetErrorHandler(x11_error_handler);
- XSetIOErrorHandler(x11_io_error_handler);
-
- WindowPixmap window_pixmap;
- Window window = None;
- if(src_window_id) {
- bool has_name_pixmap = x11_supports_composite_named_window_pixmap(dpy);
- if (!has_name_pixmap) {
- fprintf(stderr, "Error: XCompositeNameWindowPixmap is not supported by "
- "your X11 server\n");
- return 1;
- }
-
- XWindowAttributes attr;
- if (!XGetWindowAttributes(dpy, src_window_id, &attr)) {
- fprintf(stderr, "Error: Invalid window id: %lu\n", src_window_id);
- return 1;
- }
-
- window_width = std::max(0, attr.width);
- window_height = std::max(0, attr.height);
- window_x = attr.x;
- window_y = attr.y;
- Window c;
- XTranslateCoordinates(dpy, src_window_id, DefaultRootWindow(dpy), 0, 0, &window_x, &window_y, &c);
-
- XCompositeRedirectWindow(dpy, src_window_id, CompositeRedirectAutomatic);
-
- if(!gl.load()) {
- fprintf(stderr, "Error: Failed to load opengl\n");
- return 1;
- }
-
- window = create_opengl_window(dpy);
- if(!window)
- return 1;
-
- set_vertical_sync_enabled(dpy, window, false);
- recreate_window_pixmap(dpy, src_window_id, window_pixmap);
-
- if(!record_area) {
- record_width = window_pixmap.texture_width;
- record_height = window_pixmap.texture_height;
- fprintf(stderr, "Record size: %dx%d\n", record_width, record_height);
- }
- } else {
- window_pixmap.texture_id = 0;
- window_pixmap.target_texture_id = 0;
- window_pixmap.texture_width = window_width;
- window_pixmap.texture_height = window_height;
- }
-
- bool very_old_gpu = false;
- bool gl_loaded = window;
- if(!gl_loaded) {
- if(!gl.load()) {
- fprintf(stderr, "Error: Failed to load opengl\n");
- return 1;
- }
- }
-
- const unsigned char *gl_renderer = gl.glGetString(GL_RENDERER);
- if(gl_renderer) {
- int gpu_num = 1000;
- sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &gpu_num);
- if(gpu_num < 900) {
- fprintf(stderr, "Info: your gpu appears to be very old (older than maxwell architecture). Switching to lower preset\n");
- very_old_gpu = true;
- }
- }
-
- if(!gl_loaded)
- gl.unload();
-
AVFormatContext *av_format_context;
// The output format is automatically guessed by the file extension
avformat_alloc_output_context2(&av_format_context, nullptr, container_format, filename);
if (!av_format_context) {
- fprintf(stderr, "Error: Failed to deduce container format from file extension\n");
- return 1;
+ if(container_format)
+ fprintf(stderr, "Error: Container format '%s' (argument -c) is not valid\n", container_format);
+ else
+ fprintf(stderr, "Error: Failed to deduce container format from file extension\n");
+ _exit(1);
}
- av_format_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
- av_format_context->flags |= AVFMT_FLAG_GENPTS;
const AVOutputFormat *output_format = av_format_context->oformat;
std::string file_extension = output_format->extensions;
@@ -1522,82 +2064,282 @@ int main(int argc, char **argv) {
file_extension = file_extension.substr(0, comma_index);
}
- if(strcmp(codec_to_use, "auto") == 0) {
- const AVCodec *h265_codec = find_h265_encoder();
-
- // h265 generally allows recording at a higher resolution than h264 on nvidia cards. On a gtx 1080 4k is the max resolution for h264 but for h265 it's 8k.
- // Another important info is that when recording at a higher fps than.. 60? h265 has very bad performance. For example when recording at 144 fps the fps drops to 1
- // while with h264 the fps doesn't drop.
- if(!h265_codec) {
- fprintf(stderr, "Info: using h264 encoder because a codec was not specified and your gpu does not support h265\n");
- codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- } else if(fps > 60) {
- fprintf(stderr, "Info: using h264 encoder because a codec was not specified and fps is more than 60\n");
- codec_to_use = "h264";
- video_codec = VideoCodec::H264;
+ if(egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA && file_extension == "mkv" && strcmp(video_codec_to_use, "h264") == 0) {
+ video_codec_to_use = "hevc";
+ video_codec = VideoCodec::HEVC;
+ fprintf(stderr, "Warning: video codec was forcefully set to hevc because mkv container is used and mesa (AMD and Intel driver) does not support h264 in mkv files\n");
+ }
+
+ switch(audio_codec) {
+ case AudioCodec::AAC: {
+ break;
+ }
+ case AudioCodec::OPUS: {
+ // TODO: Also check mpegts?
+ if(file_extension != "mp4" && file_extension != "mkv") {
+ audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: opus audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
+ }
+ break;
+ }
+ case AudioCodec::FLAC: {
+ // TODO: Also check mpegts?
+ if(file_extension != "mp4" && file_extension != "mkv") {
+ audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: flac audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
+ } else if(uses_amix) {
+ audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: flac audio codec is not supported when mixing audio sources, falling back to opus instead\n");
+ }
+ break;
+ }
+ }
+
+ const double target_fps = 1.0 / (double)fps;
+
+ const bool video_codec_auto = strcmp(video_codec_to_use, "auto") == 0;
+ if(video_codec_auto) {
+ if(egl.gpu_info.vendor == GSR_GPU_VENDOR_INTEL) {
+ const AVCodec *h264_codec = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
+ if(!h264_codec) {
+ fprintf(stderr, "Info: using hevc encoder because a codec was not specified and your gpu does not support h264\n");
+ video_codec_to_use = "hevc";
+ video_codec = VideoCodec::HEVC;
+ } else {
+ fprintf(stderr, "Info: using h264 encoder because a codec was not specified\n");
+ video_codec_to_use = "h264";
+ video_codec = VideoCodec::H264;
+ }
} else {
- fprintf(stderr, "Info: using h265 encoder because a codec was not specified\n");
- codec_to_use = "h265";
- video_codec = VideoCodec::H265;
+ const AVCodec *h265_codec = find_h265_encoder(egl.gpu_info.vendor, egl.card_path);
+
+ if(h265_codec && fps > 60) {
+ fprintf(stderr, "Warning: recording at higher fps than 60 with hevc might result in recording at a very low fps. If this happens, switch to h264 or av1\n");
+ }
+
+ // hevc generally allows recording at a higher resolution than h264 on nvidia cards. On a gtx 1080 4k is the max resolution for h264 but for hevc it's 8k.
+ // Another important info is that when recording at a higher fps than.. 60? hevc has very bad performance. For example when recording at 144 fps the fps drops to 1
+ // while with h264 the fps doesn't drop.
+ if(!h265_codec) {
+ fprintf(stderr, "Info: using h264 encoder because a codec was not specified and your gpu does not support hevc\n");
+ video_codec_to_use = "h264";
+ video_codec = VideoCodec::H264;
+ } else {
+ fprintf(stderr, "Info: using hevc encoder because a codec was not specified\n");
+ video_codec_to_use = "hevc";
+ video_codec = VideoCodec::HEVC;
+ }
}
}
- //bool use_hevc = strcmp(window_str, "screen") == 0 || strcmp(window_str, "screen-direct") == 0;
- if(video_codec != VideoCodec::H264 && strcmp(file_extension.c_str(), "flv") == 0) {
+ // TODO: Allow hevc, vp9 and av1 in (enhanced) flv (supported since ffmpeg 6.1)
+ const bool is_flv = strcmp(file_extension.c_str(), "flv") == 0;
+ if(video_codec != VideoCodec::H264 && is_flv) {
+ video_codec_to_use = "h264";
video_codec = VideoCodec::H264;
- fprintf(stderr, "Warning: h265 is not compatible with flv, falling back to h264 instead.\n");
+ fprintf(stderr, "Warning: hevc/av1 is not compatible with flv, falling back to h264 instead.\n");
}
const AVCodec *video_codec_f = nullptr;
switch(video_codec) {
case VideoCodec::H264:
- video_codec_f = find_h264_encoder();
+ video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
+ break;
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR:
+ video_codec_f = find_h265_encoder(egl.gpu_info.vendor, egl.card_path);
break;
- case VideoCodec::H265:
- video_codec_f = find_h265_encoder();
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ video_codec_f = find_av1_encoder(egl.gpu_info.vendor, egl.card_path);
break;
}
+ if(!video_codec_auto && !video_codec_f && !is_flv) {
+ switch(video_codec) {
+ case VideoCodec::H264: {
+ fprintf(stderr, "Warning: selected video codec h264 is not supported, trying hevc instead\n");
+ video_codec_to_use = "hevc";
+ video_codec = VideoCodec::HEVC;
+ video_codec_f = find_h265_encoder(egl.gpu_info.vendor, egl.card_path);
+ break;
+ }
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR: {
+ fprintf(stderr, "Warning: selected video codec hevc is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ video_codec = VideoCodec::H264;
+ video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
+ break;
+ }
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR: {
+ fprintf(stderr, "Warning: selected video codec av1 is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ video_codec = VideoCodec::H264;
+ video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
+ break;
+ }
+ }
+ }
+
if(!video_codec_f) {
- fprintf(stderr, "Error: your gpu does not support '%s' video codec\n", video_codec == VideoCodec::H264 ? "h264" : "h265");
- exit(2);
+ const char *video_codec_name = "";
+ switch(video_codec) {
+ case VideoCodec::H264: {
+ video_codec_name = "h264";
+ break;
+ }
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR: {
+ video_codec_name = "hevc";
+ break;
+ }
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR: {
+ video_codec_name = "av1";
+ break;
+ }
+ }
+
+ fprintf(stderr, "Error: your gpu does not support '%s' video codec. If you are sure that your gpu does support '%s' video encoding and you are using an AMD/Intel GPU,\n"
+ " then make sure you have installed the GPU specific vaapi packages (intel-media-driver, libva-intel-driver or libva-mesa-driver).\n"
+ " It's also possible that your distro has disabled hardware accelerated video encoding for '%s' video codec.\n"
+ " This may be the case on corporate distros such as Manjaro, Fedora or OpenSUSE.\n"
+ " You can test this by running 'vainfo | grep VAEntrypointEncSlice' to see if it matches any H264/HEVC profile.\n"
+ " On such distros, you need to manually install mesa from source to enable H264/HEVC hardware acceleration, or use a more user friendly distro. Alternatively record with AV1 if supported by your GPU.\n"
+ " You can alternatively use the flatpak version of GPU Screen Recorder (https://flathub.org/apps/com.dec05eba.gpu_screen_recorder) which bypasses system issues with patented H264/HEVC codecs.\n"
+ " Make sure you have mesa-extra freedesktop runtime installed when using the flatpak (this should be the default), which can be installed with this command:\n"
+ " flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra", video_codec_name, video_codec_name, video_codec_name);
+ _exit(2);
}
- const bool is_livestream = is_livestream_path(filename);
+ gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, egl.gpu_info, egl, fps, overclock, video_codec, color_range, record_cursor);
+
// (Some?) livestreaming services require at least one audio track to work.
// If not audio is provided then create one silent audio track.
if(is_livestream && requested_audio_inputs.empty()) {
fprintf(stderr, "Info: live streaming but no audio track was added. Adding a silent audio track\n");
- requested_audio_inputs.push_back({ "", "gsr-silent" });
+ MergedAudioInputs mai;
+ mai.audio_inputs.push_back({ "", "gsr-silent" });
+ requested_audio_inputs.push_back(std::move(mai));
+ }
+
+ if(is_livestream && framerate_mode != FramerateMode::CONSTANT) {
+ fprintf(stderr, "Info: framerate mode was forcefully set to \"cfr\" because live streaming was detected\n");
+ framerate_mode = FramerateMode::CONSTANT;
+ framerate_mode_str = "cfr";
+ }
+
+ if(is_livestream && recording_saved_script) {
+ fprintf(stderr, "Warning: live stream detected, -sc script is ignored\n");
+ recording_saved_script = nullptr;
}
AVStream *video_stream = nullptr;
std::vector<AudioTrack> audio_tracks;
+ const bool hdr = video_codec_is_hdr(video_codec);
- AVCodecContext *video_codec_context = create_video_codec_context(AV_PIX_FMT_CUDA, quality, record_width, record_height, fps, video_codec_f, is_livestream);
+ AVCodecContext *video_codec_context = create_video_codec_context(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI, quality, fps, video_codec_f, is_livestream, egl.gpu_info.vendor, framerate_mode, hdr, color_range);
if(replay_buffer_size_secs == -1)
video_stream = create_stream(av_format_context, video_codec_context);
- AVBufferRef *device_ctx;
- CUgraphicsResource cuda_graphics_resource;
- open_video(video_codec_context, window_pixmap, &device_ctx, &cuda_graphics_resource, cu_ctx, !src_window_id, quality, is_livestream, very_old_gpu);
+ AVFrame *video_frame = av_frame_alloc();
+ if(!video_frame) {
+ fprintf(stderr, "Error: Failed to allocate video frame\n");
+ _exit(1);
+ }
+ video_frame->format = video_codec_context->pix_fmt;
+ video_frame->width = video_codec_context->width;
+ video_frame->height = video_codec_context->height;
+ video_frame->color_range = video_codec_context->color_range;
+ video_frame->color_primaries = video_codec_context->color_primaries;
+ video_frame->color_trc = video_codec_context->color_trc;
+ video_frame->colorspace = video_codec_context->colorspace;
+ video_frame->chroma_location = video_codec_context->chroma_sample_location;
+
+ int capture_result = gsr_capture_start(capture, video_codec_context, video_frame);
+ if(capture_result != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_start failed\n");
+ _exit(capture_result);
+ }
+
+ open_video(video_codec_context, quality, very_old_gpu, egl.gpu_info.vendor, pixel_format, hdr);
if(video_stream)
avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
int audio_stream_index = VIDEO_STREAM_INDEX + 1;
- for(const AudioInput &audio_input : requested_audio_inputs) {
- AVCodecContext *audio_codec_context = create_audio_codec_context(fps);
+ for(const MergedAudioInputs &merged_audio_inputs : requested_audio_inputs) {
+ const bool use_amix = merged_audio_inputs.audio_inputs.size() > 1;
+ AVCodecContext *audio_codec_context = create_audio_codec_context(fps, audio_codec, use_amix, audio_bitrate);
AVStream *audio_stream = nullptr;
if(replay_buffer_size_secs == -1)
audio_stream = create_stream(av_format_context, audio_codec_context);
- AVFrame *audio_frame = open_audio(audio_codec_context);
+ open_audio(audio_codec_context);
if(audio_stream)
avcodec_parameters_from_context(audio_stream->codecpar, audio_codec_context);
- audio_tracks.push_back({ audio_codec_context, audio_frame, audio_stream, {}, {}, audio_stream_index, audio_input });
+ #if LIBAVCODEC_VERSION_MAJOR < 60
+ const int num_channels = audio_codec_context->channels;
+ #else
+ const int num_channels = audio_codec_context->ch_layout.nb_channels;
+ #endif
+
+ //audio_frame->sample_rate = audio_codec_context->sample_rate;
+
+ std::vector<AVFilterContext*> src_filter_ctx;
+ AVFilterGraph *graph = nullptr;
+ AVFilterContext *sink = nullptr;
+ if(use_amix) {
+ int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size());
+ if(err < 0) {
+ fprintf(stderr, "Error: failed to create audio filter\n");
+ _exit(1);
+ }
+ }
+
+ // TODO: Cleanup above
+
+ std::vector<AudioDevice> audio_devices;
+ for(size_t i = 0; i < merged_audio_inputs.audio_inputs.size(); ++i) {
+ auto &audio_input = merged_audio_inputs.audio_inputs[i];
+ AVFilterContext *src_ctx = nullptr;
+ if(use_amix)
+ src_ctx = src_filter_ctx[i];
+
+ AudioDevice audio_device;
+ audio_device.audio_input = audio_input;
+ audio_device.src_filter_ctx = src_ctx;
+
+ if(audio_input.name.empty()) {
+ audio_device.sound_device.handle = NULL;
+ audio_device.sound_device.frames = 0;
+ } else {
+ if(sound_device_get_by_name(&audio_device.sound_device, audio_input.name.c_str(), audio_input.description.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
+ fprintf(stderr, "Error: failed to get \"%s\" sound device\n", audio_input.name.c_str());
+ _exit(1);
+ }
+ }
+
+ audio_device.frame = create_audio_frame(audio_codec_context);
+ audio_device.frame->pts = 0;
+
+ audio_devices.push_back(std::move(audio_device));
+ }
+
+ AudioTrack audio_track;
+ audio_track.codec_context = audio_codec_context;
+ audio_track.stream = audio_stream;
+ audio_track.audio_devices = std::move(audio_devices);
+ audio_track.graph = graph;
+ audio_track.sink = sink;
+ audio_track.stream_index = audio_stream_index;
+ audio_tracks.push_back(std::move(audio_track));
++audio_stream_index;
}
@@ -1607,460 +2349,331 @@ int main(int argc, char **argv) {
int ret = avio_open(&av_format_context->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Error: Could not open '%s': %s\n", filename, av_error_to_string(ret));
- return 1;
+ _exit(1);
}
}
- //video_stream->duration = AV_TIME_BASE * 15;
- //audio_stream->duration = AV_TIME_BASE * 15;
- //av_format_context->duration = AV_TIME_BASE * 15;
if(replay_buffer_size_secs == -1) {
- int ret = avformat_write_header(av_format_context, nullptr);
+ AVDictionary *options = nullptr;
+ av_dict_set(&options, "strict", "experimental", 0);
+ //av_dict_set_int(&av_format_context->metadata, "video_full_range_flag", 1, 0);
+
+ int ret = avformat_write_header(av_format_context, &options);
if (ret < 0) {
fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(ret));
- return 1;
+ _exit(1);
}
- }
-
- // av_frame_free(&rgb_frame);
- // avcodec_close(av_codec_context);
-
- if(src_window_id)
- XSelectInput(dpy, src_window_id, StructureNotifyMask | ExposureMask);
- /*
- int damage_event;
- int damage_error;
- if (!XDamageQueryExtension(dpy, &damage_event, &damage_error)) {
- fprintf(stderr, "Error: XDamage is not supported by your X11 server\n");
- return 1;
+ av_dict_free(&options);
}
- Damage damage = XDamageCreate(dpy, src_window_id, XDamageReportNonEmpty);
- XDamageSubtract(dpy, damage,None,None);
- */
-
const double start_time_pts = clock_get_monotonic_seconds();
- CUcontext old_ctx;
- CUarray mapped_array;
- if(src_window_id) {
- res = cuda.cuCtxPopCurrent_v2(&old_ctx);
- res = cuda.cuCtxPushCurrent_v2(cu_ctx);
-
- // Get texture
- res = cuda.cuGraphicsResourceSetMapFlags(
- cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY);
- res = cuda.cuGraphicsMapResources(1, &cuda_graphics_resource, 0);
-
- // Map texture to cuda array
- res = cuda.cuGraphicsSubResourceGetMappedArray(&mapped_array,
- cuda_graphics_resource, 0, 0);
- }
-
- // Release texture
- // res = cuGraphicsUnmapResources(1, &cuda_graphics_resource, 0);
-
double start_time = clock_get_monotonic_seconds();
- double frame_timer_start = start_time;
- double window_resize_timer = start_time;
- bool window_resized = false;
+ double frame_timer_start = start_time - target_fps; // We want to capture the first frame immediately
int fps_counter = 0;
- AVFrame *frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Error: Failed to allocate frame\n");
- exit(1);
- }
- frame->format = video_codec_context->pix_fmt;
- frame->width = video_codec_context->width;
- frame->height = video_codec_context->height;
-
- if(src_window_id) {
- if (av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0) < 0) {
- fprintf(stderr, "Error: av_hwframe_get_buffer failed\n");
- exit(1);
- }
- } else {
- frame->hw_frames_ctx = av_buffer_ref(video_codec_context->hw_frames_ctx);
- frame->buf[0] = av_buffer_pool_get(((AVHWFramesContext*)video_codec_context->hw_frames_ctx->data)->pool);
- frame->extended_data = frame->data;
- }
-
- frame->color_range = AVCOL_RANGE_JPEG;
-
- if(window_pixmap.texture_width < record_width)
- frame->width = window_pixmap.texture_width & ~1;
- else
- frame->width = record_width & ~1;
-
- if(window_pixmap.texture_height < record_height)
- frame->height = window_pixmap.texture_height & ~1;
- else
- frame->height = record_height & ~1;
+ bool paused = false;
+ double paused_time_offset = 0.0;
+ double paused_time_start = 0.0;
std::mutex write_output_mutex;
+ std::mutex audio_filter_mutex;
const double record_start_time = clock_get_monotonic_seconds();
- std::deque<AVPacket> frame_data_queue;
+ std::deque<std::shared_ptr<PacketData>> frame_data_queue;
bool frames_erased = false;
- const size_t audio_buffer_size = 1024 * 2 * 2; // 2 bytes/sample, 2 channels
+ const size_t audio_buffer_size = 1024 * 4 * 2; // max 4 bytes/sample, 2 channels
uint8_t *empty_audio = (uint8_t*)malloc(audio_buffer_size);
if(!empty_audio) {
fprintf(stderr, "Error: failed to create empty audio\n");
- exit(1);
+ _exit(1);
}
memset(empty_audio, 0, audio_buffer_size);
- for(AudioTrack &audio_track : audio_tracks) {
- audio_track.thread = std::thread([record_start_time, replay_buffer_size_secs, &frame_data_queue, &frames_erased, &audio_track, empty_audio](AVFormatContext *av_format_context, std::mutex *write_output_mutex) mutable {
- #if LIBAVCODEC_VERSION_MAJOR < 60
- const int num_channels = audio_track.codec_context->channels;
- #else
- const int num_channels = audio_track.codec_context->ch_layout.nb_channels;
- #endif
-
- if(audio_track.audio_input.name.empty()) {
- audio_track.sound_device.handle = NULL;
- audio_track.sound_device.frames = 0;
- } else {
- if(sound_device_get_by_name(&audio_track.sound_device, audio_track.audio_input.name.c_str(), audio_track.audio_input.description.c_str(), num_channels, audio_track.codec_context->frame_size) != 0) {
- fprintf(stderr, "failed to get 'pulse' sound device\n");
- exit(1);
- }
- }
+ const double audio_startup_time_seconds = std::max(0.0, 0.089166 - target_fps);
- SwrContext *swr = swr_alloc();
- if(!swr) {
- fprintf(stderr, "Failed to create SwrContext\n");
- exit(1);
- }
- av_opt_set_int(swr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
- av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
- av_opt_set_int(swr, "in_sample_rate", audio_track.codec_context->sample_rate, 0);
- av_opt_set_int(swr, "out_sample_rate", audio_track.codec_context->sample_rate, 0);
- av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
- swr_init(swr);
-
- int64_t pts = 0;
- const double target_audio_hz = 1.0 / (double)audio_track.codec_context->sample_rate;
- double received_audio_time = clock_get_monotonic_seconds();
- const int64_t timeout_ms = std::round((1000.0 / (double)audio_track.codec_context->sample_rate) * 1000.0);
-
- while(running) {
- void *sound_buffer;
- int sound_buffer_size = -1;
- if(audio_track.sound_device.handle)
- sound_buffer_size = sound_device_read_next_chunk(&audio_track.sound_device, &sound_buffer);
- const bool got_audio_data = sound_buffer_size >= 0;
-
- const double this_audio_frame_time = clock_get_monotonic_seconds();
- if(got_audio_data)
- received_audio_time = this_audio_frame_time;
-
- int ret = av_frame_make_writable(audio_track.frame);
- if (ret < 0) {
- fprintf(stderr, "Failed to make audio frame writable\n");
- break;
+ for(AudioTrack &audio_track : audio_tracks) {
+ for(AudioDevice &audio_device : audio_track.audio_devices) {
+ audio_device.thread = std::thread([&]() mutable {
+ const AVSampleFormat sound_device_sample_format = audio_format_to_sample_format(audio_codec_context_get_audio_format(audio_track.codec_context));
+ // TODO: Always do conversion for now. This fixes issue with stuttering audio on pulseaudio with opus + multiple audio sources merged
+ const bool needs_audio_conversion = true;//audio_track.codec_context->sample_fmt != sound_device_sample_format;
+ SwrContext *swr = nullptr;
+ if(needs_audio_conversion) {
+ swr = swr_alloc();
+ if(!swr) {
+ fprintf(stderr, "Failed to create SwrContext\n");
+ _exit(1);
+ }
+ #if LIBAVUTIL_VERSION_MAJOR <= 56
+ av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
+ av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
+ #else
+ av_opt_set_chlayout(swr, "in_channel_layout", &audio_track.codec_context->ch_layout, 0);
+ av_opt_set_chlayout(swr, "out_channel_layout", &audio_track.codec_context->ch_layout, 0);
+ #endif
+ av_opt_set_int(swr, "in_sample_rate", audio_track.codec_context->sample_rate, 0);
+ av_opt_set_int(swr, "out_sample_rate", audio_track.codec_context->sample_rate, 0);
+ av_opt_set_sample_fmt(swr, "in_sample_fmt", sound_device_sample_format, 0);
+ av_opt_set_sample_fmt(swr, "out_sample_fmt", audio_track.codec_context->sample_fmt, 0);
+ swr_init(swr);
}
- int64_t num_missing_frames = std::round((this_audio_frame_time - received_audio_time) / target_audio_hz / (int64_t)audio_track.frame->nb_samples);
- if(got_audio_data)
- num_missing_frames = std::max((int64_t)0, num_missing_frames - 1);
-
- if(!audio_track.sound_device.handle)
- num_missing_frames = std::max((int64_t)1, num_missing_frames);
-
- // Jesus is there a better way to do this? I JUST WANT TO KEEP VIDEO AND AUDIO SYNCED HOLY FUCK I WANT TO KILL MYSELF NOW.
- // THIS PIECE OF SHIT WANTS EMPTY FRAMES OTHERWISE VIDEO PLAYS TOO FAST TO KEEP UP WITH AUDIO OR THE AUDIO PLAYS TOO EARLY.
- // BUT WE CANT USE DELAYS TO GIVE DUMMY DATA BECAUSE PULSEAUDIO MIGHT GIVE AUDIO A BIG DELAYED!!!
- if(num_missing_frames >= 5 || !audio_track.sound_device.handle) {
- // TODO:
- //audio_track.frame->data[0] = empty_audio;
- received_audio_time = this_audio_frame_time;
- swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&empty_audio, audio_track.codec_context->frame_size);
- // TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
- for(int i = 0; i < num_missing_frames; ++i) {
- audio_track.frame->pts = pts;
- pts += audio_track.frame->nb_samples;
- ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame);
- if(ret >= 0){
- receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, *write_output_mutex);
- } else {
- fprintf(stderr, "Failed to encode audio!\n");
- }
+ double received_audio_time = clock_get_monotonic_seconds();
+ const double timeout_sec = 1000.0 / (double)audio_track.codec_context->sample_rate;
+ const int64_t timeout_ms = std::round(timeout_sec * 1000.0);
+
+ while(running) {
+ void *sound_buffer;
+ int sound_buffer_size = -1;
+ //const double time_before_read_seconds = clock_get_monotonic_seconds();
+ if(audio_device.sound_device.handle) {
+ // TODO: use this instead of calculating time to read. But this can fluctuate and we dont want to go back in time,
+ // also it's 0.0 for some users???
+ double latency_seconds = 0.0;
+ sound_buffer_size = sound_device_read_next_chunk(&audio_device.sound_device, &sound_buffer, timeout_sec, &latency_seconds);
}
- }
- if(!audio_track.sound_device.handle)
- usleep(timeout_ms * 1000);
+ const bool got_audio_data = sound_buffer_size >= 0;
+ //const double time_after_read_seconds = clock_get_monotonic_seconds();
+ //const double time_to_read_seconds = time_after_read_seconds - time_before_read_seconds;
+ const double this_audio_frame_time = (clock_get_monotonic_seconds() - audio_startup_time_seconds) - paused_time_offset;
- if(got_audio_data) {
- // TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format?
- swr_convert(swr, &audio_track.frame->data[0], audio_track.frame->nb_samples, (const uint8_t**)&sound_buffer, audio_track.codec_context->frame_size);
+ if(paused) {
+ if(got_audio_data)
+ received_audio_time = this_audio_frame_time;
- audio_track.frame->pts = pts;
- pts += audio_track.frame->nb_samples;
+ if(!audio_device.sound_device.handle)
+ usleep(timeout_ms * 1000);
- ret = avcodec_send_frame(audio_track.codec_context, audio_track.frame);
- if(ret >= 0){
- receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_track.frame, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, *write_output_mutex);
- } else {
- fprintf(stderr, "Failed to encode audio!\n");
+ continue;
+ }
+
+ int ret = av_frame_make_writable(audio_device.frame);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to make audio frame writable\n");
+ break;
+ }
+
+ // TODO: Is this |received_audio_time| really correct?
+ const double prev_audio_time = received_audio_time;
+ const double audio_receive_time_diff = this_audio_frame_time - received_audio_time;
+ int64_t num_missing_frames = std::round(audio_receive_time_diff / timeout_sec);
+ if(got_audio_data)
+ num_missing_frames = std::max((int64_t)0, num_missing_frames - 1);
+
+ if(!audio_device.sound_device.handle)
+ num_missing_frames = std::max((int64_t)1, num_missing_frames);
+
+ if(got_audio_data)
+ received_audio_time = this_audio_frame_time;
+
+ // Fucking hell is there a better way to do this? I JUST WANT TO KEEP VIDEO AND AUDIO SYNCED HOLY FUCK I WANT TO KILL MYSELF NOW.
+ // THIS PIECE OF SHIT WANTS EMPTY FRAMES OTHERWISE VIDEO PLAYS TOO FAST TO KEEP UP WITH AUDIO OR THE AUDIO PLAYS TOO EARLY.
+ // BUT WE CANT USE DELAYS TO GIVE DUMMY DATA BECAUSE PULSEAUDIO MIGHT GIVE AUDIO A BIG DELAYED!!!
+ // This garbage is needed because we want to produce constant frame rate videos instead of variable frame rate
+ // videos because bad software such as video editing software and VLC do not support variable frame rate software,
+ // despite nvidia shadowplay and xbox game bar producing variable frame rate videos.
+ // So we have to make sure we produce frames at the same relative rate as the video.
+ if(num_missing_frames >= 5 || !audio_device.sound_device.handle) {
+ // TODO:
+ //audio_track.frame->data[0] = empty_audio;
+ received_audio_time = this_audio_frame_time;
+ if(needs_audio_conversion)
+ swr_convert(swr, &audio_device.frame->data[0], audio_track.codec_context->frame_size, (const uint8_t**)&empty_audio, audio_track.codec_context->frame_size);
+ else
+ audio_device.frame->data[0] = empty_audio;
+
+ // TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
+ std::lock_guard<std::mutex> lock(audio_filter_mutex);
+ for(int i = 0; i < num_missing_frames; ++i) {
+ const int64_t new_pts = ((prev_audio_time - record_start_time) + timeout_sec * i) * AV_TIME_BASE;
+ if(new_pts == audio_device.frame->pts)
+ continue;
+
+ audio_device.frame->pts = new_pts;
+ if(audio_track.graph) {
+ // TODO: av_buffersrc_add_frame
+ if(av_buffersrc_write_frame(audio_device.src_filter_ctx, audio_device.frame) < 0) {
+ fprintf(stderr, "Error: failed to add audio frame to filter\n");
+ }
+ } else {
+ ret = avcodec_send_frame(audio_track.codec_context, audio_device.frame);
+ if(ret >= 0) {
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_device.frame->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Failed to encode audio!\n");
+ }
+ }
+ }
+ }
+
+ if(!audio_device.sound_device.handle)
+ usleep(timeout_ms * 1000);
+
+ if(got_audio_data) {
+ // TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format?
+ if(needs_audio_conversion)
+ swr_convert(swr, &audio_device.frame->data[0], audio_track.codec_context->frame_size, (const uint8_t**)&sound_buffer, audio_track.codec_context->frame_size);
+ else
+ audio_device.frame->data[0] = (uint8_t*)sound_buffer;
+
+ const int64_t new_pts = (this_audio_frame_time - record_start_time) * AV_TIME_BASE;
+ if(new_pts != audio_device.frame->pts) {
+ audio_device.frame->pts = new_pts;
+
+ if(audio_track.graph) {
+ std::lock_guard<std::mutex> lock(audio_filter_mutex);
+ // TODO: av_buffersrc_add_frame
+ if(av_buffersrc_write_frame(audio_device.src_filter_ctx, audio_device.frame) < 0) {
+ fprintf(stderr, "Error: failed to add audio frame to filter\n");
+ }
+ } else {
+ ret = avcodec_send_frame(audio_track.codec_context, audio_device.frame);
+ if(ret >= 0) {
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, audio_device.frame->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Failed to encode audio!\n");
+ }
+ }
+ }
}
}
- }
- sound_device_close(&audio_track.sound_device);
- swr_free(&swr);
- }, av_format_context, &write_output_mutex);
+ if(swr)
+ swr_free(&swr);
+ });
+ }
}
// Set update_fps to 24 to test if duplicate/delayed frames cause video/audio desync or too fast/slow video.
const double update_fps = fps + 190;
- int64_t video_pts_counter = 0;
-
- XEvent e;
- while (running) {
- double frame_start = clock_get_monotonic_seconds();
- if(window)
- gl.glClear(GL_COLOR_BUFFER_BIT);
+ bool should_stop_error = false;
- if(src_window_id) {
- if (XCheckTypedWindowEvent(dpy, src_window_id, DestroyNotify, &e)) {
- running = 0;
- }
-
- if (XCheckTypedWindowEvent(dpy, src_window_id, Expose, &e) && e.xexpose.count == 0) {
- window_resize_timer = clock_get_monotonic_seconds();
- window_resized = true;
- }
+ AVFrame *aframe = av_frame_alloc();
- if (XCheckTypedWindowEvent(dpy, src_window_id, ConfigureNotify, &e) && e.xconfigure.window == src_window_id) {
- while(XCheckTypedWindowEvent(dpy, src_window_id, ConfigureNotify, &e)) {}
- window_x = e.xconfigure.x;
- window_y = e.xconfigure.y;
- Window c;
- XTranslateCoordinates(dpy, src_window_id, DefaultRootWindow(dpy), 0, 0, &window_x, &window_y, &c);
- // Window resize
- if(e.xconfigure.width != (int)window_width || e.xconfigure.height != (int)window_height) {
- window_width = std::max(0, e.xconfigure.width);
- window_height = std::max(0, e.xconfigure.height);
- window_resize_timer = clock_get_monotonic_seconds();
- window_resized = true;
- }
- }
-
- const double window_resize_timeout = 1.0; // 1 second
- if(window_resized && clock_get_monotonic_seconds() - window_resize_timer >= window_resize_timeout) {
- window_resized = false;
- fprintf(stderr, "Resize window!\n");
- recreate_window_pixmap(dpy, src_window_id, window_pixmap);
- // Resolution must be a multiple of two
- //video_stream->codec->width = window_pixmap.texture_width & ~1;
- //video_stream->codec->height = window_pixmap.texture_height & ~1;
-
- cuda.cuGraphicsUnregisterResource(cuda_graphics_resource);
- res = cuda.cuGraphicsGLRegisterImage(
- &cuda_graphics_resource, window_pixmap.target_texture_id, GL_TEXTURE_2D,
- CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY);
- if (res != CUDA_SUCCESS) {
- const char *err_str;
- cuda.cuGetErrorString(res, &err_str);
- fprintf(stderr,
- "Error: cuda.cuGraphicsGLRegisterImage failed, error %s, texture "
- "id: %u\n",
- err_str, window_pixmap.target_texture_id);
- running = false;
- break;
- }
+ int64_t video_pts_counter = 0;
+ int64_t video_prev_pts = 0;
- res = cuda.cuGraphicsResourceSetMapFlags(
- cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY);
- res = cuda.cuGraphicsMapResources(1, &cuda_graphics_resource, 0);
- res = cuda.cuGraphicsSubResourceGetMappedArray(&mapped_array, cuda_graphics_resource, 0, 0);
+ while(running) {
+ double frame_start = clock_get_monotonic_seconds();
- av_frame_free(&frame);
- frame = av_frame_alloc();
- if (!frame) {
- fprintf(stderr, "Error: Failed to allocate frame\n");
- running = false;
- break;
- }
- frame->format = video_codec_context->pix_fmt;
- frame->width = video_codec_context->width;
- frame->height = video_codec_context->height;
+ gsr_capture_tick(capture, video_codec_context);
+ should_stop_error = false;
+ if(gsr_capture_should_stop(capture, &should_stop_error)) {
+ running = 0;
+ break;
+ }
+ ++fps_counter;
- if (av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0) < 0) {
- fprintf(stderr, "Error: av_hwframe_get_buffer failed\n");
- running = false;
- break;
+ {
+ std::lock_guard<std::mutex> lock(audio_filter_mutex);
+ for(AudioTrack &audio_track : audio_tracks) {
+ if(!audio_track.sink)
+ continue;
+
+ int err = 0;
+ while ((err = av_buffersink_get_frame(audio_track.sink, aframe)) >= 0) {
+ const double this_audio_frame_time = (clock_get_monotonic_seconds() - audio_startup_time_seconds) - paused_time_offset;
+ const int64_t new_pts = (this_audio_frame_time - record_start_time) * AV_TIME_BASE;
+ if(new_pts == aframe->pts)
+ continue;
+ aframe->pts = new_pts;
+ err = avcodec_send_frame(audio_track.codec_context, aframe);
+ if(err >= 0){
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Failed to encode audio!\n");
+ }
+ av_frame_unref(aframe);
}
-
- if(window_pixmap.texture_width < record_width)
- frame->width = window_pixmap.texture_width & ~1;
- else
- frame->width = record_width & ~1;
-
- if(window_pixmap.texture_height < record_height)
- frame->height = window_pixmap.texture_height & ~1;
- else
- frame->height = record_height & ~1;
-
- // Make the new completely black to clear unused parts
- // TODO: cuMemsetD32?
- cuda.cuMemsetD8_v2((CUdeviceptr)frame->data[0], 0, record_width * record_height * 4);
}
}
- ++fps_counter;
-
double time_now = clock_get_monotonic_seconds();
double frame_timer_elapsed = time_now - frame_timer_start;
double elapsed = time_now - start_time;
if (elapsed >= 1.0) {
- fprintf(stderr, "update fps: %d\n", fps_counter);
+ if(verbose) {
+ fprintf(stderr, "update fps: %d\n", fps_counter);
+ }
start_time = time_now;
fps_counter = 0;
}
double frame_time_overflow = frame_timer_elapsed - target_fps;
if (frame_time_overflow >= 0.0) {
+ frame_time_overflow = std::min(frame_time_overflow, target_fps);
frame_timer_start = time_now - frame_time_overflow;
- if(src_window_id) {
- // TODO: Use a framebuffer instead. glCopyImageSubData requires
- // opengl 4.2
- int source_x = 0;
- int source_y = 0;
-
- int source_width = window_pixmap.texture_width;
- int source_height = window_pixmap.texture_height;
-
- bool clamped = false;
-
- if(window_pixmap.composite_window) {
- source_x = window_x;
- source_y = window_y;
-
- int underflow_x = 0;
- int underflow_y = 0;
-
- if(source_x < 0) {
- underflow_x = -source_x;
- source_x = 0;
- source_width += source_x;
- }
-
- if(source_y < 0) {
- underflow_y = -source_y;
- source_y = 0;
- source_height += source_y;
- }
-
- const int clamped_source_width = std::max(0, window_pixmap.texture_real_width - source_x - underflow_x);
- const int clamped_source_height = std::max(0, window_pixmap.texture_real_height - source_y - underflow_y);
+ const double this_video_frame_time = clock_get_monotonic_seconds() - paused_time_offset;
+ const int64_t expected_frames = std::round((this_video_frame_time - start_time_pts) / target_fps);
+ const int num_frames = framerate_mode == FramerateMode::CONSTANT ? std::max((int64_t)0LL, expected_frames - video_pts_counter) : 1;
- if(clamped_source_width < source_width) {
- source_width = clamped_source_width;
- clamped = true;
- }
+ if(num_frames > 0 && !paused) {
+ gsr_capture_capture(capture, video_frame);
- if(clamped_source_height < source_height) {
- source_height = clamped_source_height;
- clamped = true;
+ // TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
+ for(int i = 0; i < num_frames; ++i) {
+ if(framerate_mode == FramerateMode::CONSTANT) {
+ video_frame->pts = video_pts_counter + i;
+ } else {
+ video_frame->pts = (this_video_frame_time - record_start_time) * (double)AV_TIME_BASE;
+ const bool same_pts = video_frame->pts == video_prev_pts;
+ video_prev_pts = video_frame->pts;
+ if(same_pts)
+ continue;
}
- }
- if(clamped) {
- // Requires opengl 4.4... TODO: Replace with earlier opengl if opengl < 4.2
- if(gl.glClearTexImage)
- gl.glClearTexImage(window_pixmap.target_texture_id, 0, GL_RGB, GL_UNSIGNED_BYTE, nullptr);
- }
-
- // Requires opengl 4.2... TODO: Replace with earlier opengl if opengl < 4.2
- gl.glCopyImageSubData(
- window_pixmap.texture_id, GL_TEXTURE_2D, 0, source_x, source_y, 0,
- window_pixmap.target_texture_id, GL_TEXTURE_2D, 0, 0, 0, 0,
- source_width, source_height, 1);
- unsigned int err = gl.glGetError();
- if(err != 0) {
- static bool error_shown = false;
- if(!error_shown) {
- error_shown = true;
- fprintf(stderr, "Error: glCopyImageSubData failed, gl error: %d\n", err);
+ int ret = avcodec_send_frame(video_codec_context, video_frame);
+ if(ret == 0) {
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, video_frame->pts, av_format_context,
+ record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Error: avcodec_send_frame failed, error: %s\n", av_error_to_string(ret));
}
}
- gl.glXSwapBuffers(dpy, window);
- // int err = gl.glGetError();
- // fprintf(stderr, "error: %d\n", err);
-
- // TODO: Remove this copy, which is only possible by using nvenc directly and encoding window_pixmap.target_texture_id
-
- frame->linesize[0] = frame->width * 4;
- CUDA_MEMCPY2D memcpy_struct;
- memcpy_struct.srcXInBytes = 0;
- memcpy_struct.srcY = 0;
- memcpy_struct.srcMemoryType = CUmemorytype::CU_MEMORYTYPE_ARRAY;
-
- memcpy_struct.dstXInBytes = 0;
- memcpy_struct.dstY = 0;
- memcpy_struct.dstMemoryType = CUmemorytype::CU_MEMORYTYPE_DEVICE;
+ gsr_capture_end(capture, video_frame);
+ video_pts_counter += num_frames;
+ }
+ }
- memcpy_struct.srcArray = mapped_array;
- memcpy_struct.dstDevice = (CUdeviceptr)frame->data[0];
- memcpy_struct.dstPitch = frame->linesize[0];
- memcpy_struct.WidthInBytes = frame->width * 4;
- memcpy_struct.Height = frame->height;
- cuda.cuMemcpy2D_v2(&memcpy_struct);
+ if(toggle_pause == 1) {
+ const bool new_paused_state = !paused;
+ if(new_paused_state) {
+ paused_time_start = clock_get_monotonic_seconds();
+ fprintf(stderr, "Paused\n");
} else {
- // TODO: Check when src_cu_device_ptr changes and re-register resource
- frame->linesize[0] = frame->width * 4;
-
- uint32_t byte_size = 0;
- CUdeviceptr src_cu_device_ptr = 0;
- nv_fbc_library.capture(&src_cu_device_ptr, &byte_size);
- frame->data[0] = (uint8_t*)src_cu_device_ptr;
+ paused_time_offset += (clock_get_monotonic_seconds() - paused_time_start);
+ fprintf(stderr, "Unpaused\n");
}
- // res = cuda.cuCtxPopCurrent_v2(&old_ctx);
-
- const double this_video_frame_time = clock_get_monotonic_seconds();
- const int64_t expected_frames = std::round((this_video_frame_time - start_time_pts) / target_fps);
- const int num_frames = std::max(0L, expected_frames - video_pts_counter);
-
- frame->flags &= ~AV_FRAME_FLAG_DISCARD;
- // TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
- for(int i = 0; i < num_frames; ++i) {
- if(i > 0)
- frame->flags |= AV_FRAME_FLAG_DISCARD;
-
- frame->pts = video_pts_counter + i;
- if (avcodec_send_frame(video_codec_context, frame) >= 0) {
- receive_frames(video_codec_context, VIDEO_STREAM_INDEX, video_stream, frame, av_format_context,
- record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex);
- } else {
- fprintf(stderr, "Error: avcodec_send_frame failed\n");
- }
- }
- video_pts_counter += num_frames;
+ toggle_pause = 0;
+ paused = !paused;
}
if(save_replay_thread.valid() && save_replay_thread.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
save_replay_thread.get();
puts(save_replay_output_filepath.c_str());
+ fflush(stdout);
+ if(recording_saved_script)
+ run_recording_saved_script_async(recording_saved_script, save_replay_output_filepath.c_str(), "replay");
+ std::lock_guard<std::mutex> lock(write_output_mutex);
save_replay_packets.clear();
}
if(save_replay == 1 && !save_replay_thread.valid() && replay_buffer_size_secs != -1) {
save_replay = 0;
- save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex);
+ save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex, make_folders);
}
- // av_frame_free(&frame);
double frame_end = clock_get_monotonic_seconds();
double frame_sleep_fps = 1.0 / update_fps;
double sleep_time = frame_sleep_fps - (frame_end - frame_start);
@@ -2068,17 +2681,27 @@ int main(int argc, char **argv) {
usleep(sleep_time * 1000.0 * 1000.0);
}
- running = 0;
+ running = 0;
if(save_replay_thread.valid()) {
save_replay_thread.get();
puts(save_replay_output_filepath.c_str());
+ fflush(stdout);
+ if(recording_saved_script)
+ run_recording_saved_script_async(recording_saved_script, save_replay_output_filepath.c_str(), "replay");
+ std::lock_guard<std::mutex> lock(write_output_mutex);
+ save_replay_packets.clear();
}
for(AudioTrack &audio_track : audio_tracks) {
- audio_track.thread.join();
+ for(AudioDevice &audio_device : audio_track.audio_devices) {
+ audio_device.thread.join();
+ sound_device_close(&audio_device.sound_device);
+ }
}
+ av_frame_free(&aframe);
+
if (replay_buffer_size_secs == -1 && av_write_trailer(av_format_context) != 0) {
fprintf(stderr, "Failed to write trailer\n");
}
@@ -2086,8 +2709,24 @@ int main(int argc, char **argv) {
if(replay_buffer_size_secs == -1 && !(output_format->flags & AVFMT_NOFILE))
avio_close(av_format_context->pb);
- if(dpy)
- XCloseDisplay(dpy);
+ gsr_capture_destroy(capture, video_codec_context);
+
+ if(replay_buffer_size_secs == -1 && recording_saved_script)
+ run_recording_saved_script_async(recording_saved_script, filename, "regular");
+
+ if(dpy) {
+ // TODO: This causes a crash, why? maybe some other library dlclose xlib and that also happened to unload this???
+ //XCloseDisplay(dpy);
+ }
+ //av_frame_free(&video_frame);
+ free((void*)window_str);
free(empty_audio);
+ // We do an _exit here because cuda uses at_exit to do _something_ that causes the program to freeze,
+ // but only on some nvidia driver versions on some gpus (RTX?), and _exit exits the program without calling
+ // the at_exit registered functions.
+ // Cuda (cuvid library in this case) seems to be waiting for a thread that never finishes execution.
+ // Maybe this happens because we dont clean up all ffmpeg resources?
+ // TODO: Investigate this.
+ _exit(should_stop_error ? 3 : 0);
}