aboutsummaryrefslogtreecommitdiff
path: root/src/capture
diff options
context:
space:
mode:
Diffstat (limited to 'src/capture')
-rw-r--r--src/capture/capture.c404
-rw-r--r--src/capture/kms.c369
-rw-r--r--src/capture/kms_cuda.c181
-rw-r--r--src/capture/kms_vaapi.c135
-rw-r--r--src/capture/nvfbc.c535
-rw-r--r--src/capture/xcomposite.c299
-rw-r--r--src/capture/xcomposite_cuda.c155
-rw-r--r--src/capture/xcomposite_vaapi.c109
8 files changed, 2187 insertions, 0 deletions
diff --git a/src/capture/capture.c b/src/capture/capture.c
new file mode 100644
index 0000000..670114e
--- /dev/null
+++ b/src/capture/capture.c
@@ -0,0 +1,404 @@
+#include "../../include/capture/capture.h"
+#include "../../include/egl.h"
+#include "../../include/cuda.h"
+#include "../../include/utils.h"
+#include <stdio.h>
+#include <stdint.h>
+#include <va/va.h>
+#include <va/va_drmcommon.h>
+#include <libavutil/frame.h>
+#include <libavutil/hwcontext_vaapi.h>
+#include <libavutil/hwcontext_cuda.h>
+#include <libavcodec/avcodec.h>
+
+#define FOURCC_NV12 842094158
+#define FOURCC_P010 808530000
+
+int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ if(cap->started)
+ return -1;
+
+ int res = cap->start(cap, video_codec_context, frame);
+ if(res == 0)
+ cap->started = true;
+
+ return res;
+}
+
+void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ if(!cap->started) {
+ fprintf(stderr, "gsr error: gsp_capture_tick failed: the gsr capture has not been started\n");
+ return;
+ }
+
+ if(cap->tick)
+ cap->tick(cap, video_codec_context);
+}
+
+bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
+ if(!cap->started) {
+ fprintf(stderr, "gsr error: gsr_capture_should_stop failed: the gsr capture has not been started\n");
+ return false;
+ }
+
+ if(!cap->should_stop)
+ return false;
+
+ return cap->should_stop(cap, err);
+}
+
+int gsr_capture_capture(gsr_capture *cap, AVFrame *frame) {
+ if(!cap->started) {
+ fprintf(stderr, "gsr error: gsr_capture_capture failed: the gsr capture has not been started\n");
+ return -1;
+ }
+ return cap->capture(cap, frame);
+}
+
+void gsr_capture_end(gsr_capture *cap, AVFrame *frame) {
+ if(!cap->started) {
+ fprintf(stderr, "gsr error: gsr_capture_end failed: the gsr capture has not been started\n");
+ return;
+ }
+
+ if(!cap->capture_end)
+ return;
+
+ cap->capture_end(cap, frame);
+}
+
+void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ cap->destroy(cap, video_codec_context);
+}
+
+static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ return (d << 24) | (c << 16) | (b << 8) | a;
+}
+
+bool gsr_capture_base_setup_vaapi_textures(gsr_capture_base *self, AVFrame *frame, VADisplay va_dpy, VADRMPRIMESurfaceDescriptor *prime, gsr_color_range color_range) {
+ const int res = av_hwframe_get_buffer(self->video_codec_context->hw_frames_ctx, frame, 0);
+ if(res < 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: av_hwframe_get_buffer failed: %d\n", res);
+ return false;
+ }
+
+ VASurfaceID target_surface_id = (uintptr_t)frame->data[3];
+
+ VAStatus va_status = vaExportSurfaceHandle(va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, prime);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: vaExportSurfaceHandle failed, error: %d\n", va_status);
+ return false;
+ }
+ vaSyncSurface(va_dpy, target_surface_id);
+
+ self->egl->glGenTextures(1, &self->input_texture);
+ self->egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->egl->glGenTextures(1, &self->cursor_texture);
+ self->egl->glBindTexture(GL_TEXTURE_2D, self->cursor_texture);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ const uint32_t formats_nv12[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
+ const uint32_t formats_p010[2] = { fourcc('R', '1', '6', ' '), fourcc('G', 'R', '3', '2') };
+
+ if(prime->fourcc == FOURCC_NV12 || prime->fourcc == FOURCC_P010) {
+ const uint32_t *formats = prime->fourcc == FOURCC_NV12 ? formats_nv12 : formats_p010;
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+
+ self->egl->glGenTextures(2, self->target_textures);
+ for(int i = 0; i < 2; ++i) {
+ const int layer = i;
+ const int plane = 0;
+
+ //const uint64_t modifier = prime->objects[prime->layers[layer].object_index[plane]].drm_format_modifier;
+
+ const intptr_t img_attr[] = {
+ EGL_LINUX_DRM_FOURCC_EXT, formats[i],
+ EGL_WIDTH, prime->width / div[i],
+ EGL_HEIGHT, prime->height / div[i],
+ EGL_DMA_BUF_PLANE0_FD_EXT, prime->objects[prime->layers[layer].object_index[plane]].fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, prime->layers[layer].offset[plane],
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, prime->layers[layer].pitch[plane],
+ // TODO:
+ //EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
+ //EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
+ EGL_NONE
+ };
+
+ while(self->egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->egl->eglCreateImage(self->egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create egl image from drm fd for output drm fd, error: %d\n", self->egl->eglGetError());
+ return false;
+ }
+
+ self->egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+
+ while(self->egl->glGetError()) {}
+ while(self->egl->eglGetError() != EGL_SUCCESS){}
+ self->egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+ if(self->egl->glGetError() != 0 || self->egl->eglGetError() != EGL_SUCCESS) {
+ // TODO: Get the error properly
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to bind egl image to gl texture, error: %d\n", self->egl->eglGetError());
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+ return false;
+ }
+
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+ }
+
+ gsr_color_conversion_params color_conversion_params = {0};
+ color_conversion_params.color_range = color_range;
+ color_conversion_params.egl = self->egl;
+ color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
+ if(prime->fourcc == FOURCC_NV12)
+ color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
+ else
+ color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
+
+ color_conversion_params.destination_textures[0] = self->target_textures[0];
+ color_conversion_params.destination_textures[1] = self->target_textures[1];
+ color_conversion_params.num_destination_textures = 2;
+
+ if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create color conversion\n");
+ return false;
+ }
+
+ gsr_color_conversion_clear(&self->color_conversion);
+
+ return true;
+ } else {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: unexpected fourcc %u for output drm fd, expected nv12 or p010\n", prime->fourcc);
+ return false;
+ }
+}
+
+static unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format) {
+ unsigned int texture_id = 0;
+ egl->glGenTextures(1, &texture_id);
+ egl->glBindTexture(GL_TEXTURE_2D, texture_id);
+ egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
+
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ egl->glBindTexture(GL_TEXTURE_2D, 0);
+ return texture_id;
+}
+
+static bool cuda_register_opengl_texture(gsr_cuda *cuda, CUgraphicsResource *cuda_graphics_resource, CUarray *mapped_array, unsigned int texture_id) {
+ CUresult res;
+ res = cuda->cuGraphicsGLRegisterImage(cuda_graphics_resource, texture_id, GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_NONE);
+ if (res != CUDA_SUCCESS) {
+ const char *err_str = "unknown";
+ cuda->cuGetErrorString(res, &err_str);
+ fprintf(stderr, "gsr error: cuda_register_opengl_texture: cuGraphicsGLRegisterImage failed, error: %s, texture " "id: %u\n", err_str, texture_id);
+ return false;
+ }
+
+ res = cuda->cuGraphicsResourceSetMapFlags(*cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
+ res = cuda->cuGraphicsMapResources(1, cuda_graphics_resource, 0);
+
+ res = cuda->cuGraphicsSubResourceGetMappedArray(mapped_array, *cuda_graphics_resource, 0, 0);
+ return true;
+}
+
+bool gsr_capture_base_setup_cuda_textures(gsr_capture_base *self, AVFrame *frame, gsr_cuda_context *cuda_context, gsr_color_range color_range, gsr_source_color source_color, bool hdr) {
+ // TODO:
+ const int res = av_hwframe_get_buffer(self->video_codec_context->hw_frames_ctx, frame, 0);
+ if(res < 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: av_hwframe_get_buffer failed: %d\n", res);
+ return false;
+ }
+
+ self->egl->glGenTextures(1, &self->input_texture);
+ self->egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->egl->glGenTextures(1, &self->cursor_texture);
+ self->egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->cursor_texture);
+ self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ const unsigned int internal_formats_nv12[2] = { GL_R8, GL_RG8 };
+ const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
+ const unsigned int formats[2] = { GL_RED, GL_RG };
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+
+ for(int i = 0; i < 2; ++i) {
+ self->target_textures[i] = gl_create_texture(self->egl, self->video_codec_context->width / div[i], self->video_codec_context->height / div[i], !hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ if(self->target_textures[i] == 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
+ return false;
+ }
+
+ if(!cuda_register_opengl_texture(cuda_context->cuda, &cuda_context->cuda_graphics_resources[i], &cuda_context->mapped_arrays[i], self->target_textures[i])) {
+ return false;
+ }
+ }
+
+ gsr_color_conversion_params color_conversion_params = {0};
+ color_conversion_params.color_range = color_range;
+ color_conversion_params.egl = self->egl;
+ color_conversion_params.source_color = source_color;
+ if(!hdr)
+ color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
+ else
+ color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
+
+ color_conversion_params.destination_textures[0] = self->target_textures[0];
+ color_conversion_params.destination_textures[1] = self->target_textures[1];
+ color_conversion_params.num_destination_textures = 2;
+ color_conversion_params.load_external_image_shader = true;
+
+ if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create color conversion\n");
+ return false;
+ }
+
+ gsr_color_conversion_clear(&self->color_conversion);
+
+ return true;
+}
+
+void gsr_capture_base_stop(gsr_capture_base *self) {
+ gsr_color_conversion_deinit(&self->color_conversion);
+
+ if(self->egl->egl_context) {
+ if(self->input_texture) {
+ self->egl->glDeleteTextures(1, &self->input_texture);
+ self->input_texture = 0;
+ }
+
+ if(self->cursor_texture) {
+ self->egl->glDeleteTextures(1, &self->cursor_texture);
+ self->cursor_texture = 0;
+ }
+
+ self->egl->glDeleteTextures(2, self->target_textures);
+ self->target_textures[0] = 0;
+ self->target_textures[1] = 0;
+ }
+
+ if(self->video_codec_context->hw_device_ctx)
+ av_buffer_unref(&self->video_codec_context->hw_device_ctx);
+ if(self->video_codec_context->hw_frames_ctx)
+ av_buffer_unref(&self->video_codec_context->hw_frames_ctx);
+}
+
+bool drm_create_codec_context(const char *card_path, AVCodecContext *video_codec_context, int width, int height, bool hdr, VADisplay *va_dpy) {
+ char render_path[128];
+ if(!gsr_card_path_get_render_path(card_path, render_path)) {
+ fprintf(stderr, "gsr error: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
+ return false;
+ }
+
+ AVBufferRef *device_ctx;
+ if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
+ fprintf(stderr, "Error: Failed to create hardware device context\n");
+ return false;
+ }
+
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ if(!frame_context) {
+ fprintf(stderr, "Error: Failed to create hwframe context\n");
+ av_buffer_unref(&device_ctx);
+ return false;
+ }
+
+ AVHWFramesContext *hw_frame_context =
+ (AVHWFramesContext *)frame_context->data;
+ hw_frame_context->width = width;
+ hw_frame_context->height = height;
+ hw_frame_context->sw_format = hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->format = video_codec_context->pix_fmt;
+ hw_frame_context->device_ref = device_ctx;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+
+ //hw_frame_context->initial_pool_size = 20;
+
+ AVVAAPIDeviceContext *vactx =((AVHWDeviceContext*)device_ctx->data)->hwctx;
+ *va_dpy = vactx->display;
+
+ if (av_hwframe_ctx_init(frame_context) < 0) {
+ fprintf(stderr, "Error: Failed to initialize hardware frame context "
+ "(note: ffmpeg version needs to be > 4.0)\n");
+ av_buffer_unref(&device_ctx);
+ //av_buffer_unref(&frame_context);
+ return false;
+ }
+
+ video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
+ video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ return true;
+}
+
+bool cuda_create_codec_context(CUcontext cu_ctx, AVCodecContext *video_codec_context, int width, int height, bool hdr, CUstream *cuda_stream) {
+ AVBufferRef *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
+ if(!device_ctx) {
+ fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hardware device context\n");
+ return false;
+ }
+
+ AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)device_ctx->data;
+ AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext*)hw_device_context->hwctx;
+ cuda_device_context->cuda_ctx = cu_ctx;
+ if(av_hwdevice_ctx_init(device_ctx) < 0) {
+ fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hardware device context\n");
+ av_buffer_unref(&device_ctx);
+ return false;
+ }
+
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ if(!frame_context) {
+ fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hwframe context\n");
+ av_buffer_unref(&device_ctx);
+ return false;
+ }
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
+ hw_frame_context->width = width;
+ hw_frame_context->height = height;
+ hw_frame_context->sw_format = hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->format = video_codec_context->pix_fmt;
+ hw_frame_context->device_ref = device_ctx;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+
+ if (av_hwframe_ctx_init(frame_context) < 0) {
+ fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to initialize hardware frame context "
+ "(note: ffmpeg version needs to be > 4.0)\n");
+ av_buffer_unref(&device_ctx);
+ //av_buffer_unref(&frame_context);
+ return false;
+ }
+
+ *cuda_stream = cuda_device_context->stream;
+ video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
+ video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ return true;
+}
diff --git a/src/capture/kms.c b/src/capture/kms.c
new file mode 100644
index 0000000..16b20b7
--- /dev/null
+++ b/src/capture/kms.c
@@ -0,0 +1,369 @@
+#include "../../include/capture/kms.h"
+#include "../../include/capture/capture.h"
+#include "../../include/utils.h"
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/mastering_display_metadata.h>
+
+#define HDMI_STATIC_METADATA_TYPE1 0
+#define HDMI_EOTF_SMPTE_ST2084 2
+
+/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
+
+typedef struct {
+ MonitorId *monitor_id;
+ const char *monitor_to_capture;
+ int monitor_to_capture_len;
+ int num_monitors;
+} MonitorCallbackUserdata;
+
+static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
+ MonitorCallbackUserdata *monitor_callback_userdata = userdata;
+ ++monitor_callback_userdata->num_monitors;
+
+ if(monitor_callback_userdata->monitor_to_capture_len != monitor->name_len || memcmp(monitor_callback_userdata->monitor_to_capture, monitor->name, monitor->name_len) != 0)
+ return;
+
+ if(monitor_callback_userdata->monitor_id->num_connector_ids < MAX_CONNECTOR_IDS) {
+ monitor_callback_userdata->monitor_id->connector_ids[monitor_callback_userdata->monitor_id->num_connector_ids] = monitor->connector_id;
+ ++monitor_callback_userdata->monitor_id->num_connector_ids;
+ }
+
+ if(monitor_callback_userdata->monitor_id->num_connector_ids == MAX_CONNECTOR_IDS)
+ fprintf(stderr, "gsr warning: reached max connector ids\n");
+}
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+int gsr_capture_kms_start(gsr_capture_kms *self, const char *display_to_capture, gsr_egl *egl, AVCodecContext *video_codec_context, AVFrame *frame) {
+ memset(self, 0, sizeof(*self));
+ self->base.video_codec_context = video_codec_context;
+ self->base.egl = egl;
+
+ gsr_monitor monitor;
+ self->monitor_id.num_connector_ids = 0;
+
+ int kms_init_res = gsr_kms_client_init(&self->kms_client, egl->card_path);
+ if(kms_init_res != 0)
+ return kms_init_res;
+
+ MonitorCallbackUserdata monitor_callback_userdata = {
+ &self->monitor_id,
+ display_to_capture, strlen(display_to_capture),
+ 0,
+ };
+ for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
+
+ if(!get_monitor_by_name(egl, GSR_CONNECTION_DRM, display_to_capture, &monitor)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", display_to_capture);
+ return -1;
+ }
+
+ monitor.name = display_to_capture;
+ self->monitor_rotation = drm_monitor_get_display_server_rotation(egl, &monitor);
+
+ self->capture_pos = monitor.pos;
+ if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
+ self->capture_size.x = monitor.size.y;
+ self->capture_size.y = monitor.size.x;
+ } else {
+ self->capture_size = monitor.size;
+ }
+
+ /* Disable vsync */
+ egl->eglSwapInterval(egl->egl_display, 0);
+
+ self->base.video_codec_context->width = max_int(2, even_number_ceil(self->capture_size.x));
+ self->base.video_codec_context->height = max_int(2, even_number_ceil(self->capture_size.y));
+
+ frame->width = self->base.video_codec_context->width;
+ frame->height = self->base.video_codec_context->height;
+ return 0;
+}
+
+void gsr_capture_kms_stop(gsr_capture_kms *self) {
+ gsr_capture_kms_cleanup_kms_fds(self);
+ gsr_kms_client_deinit(&self->kms_client);
+ gsr_capture_base_stop(&self->base);
+}
+
+static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
+ switch(rot) {
+ case GSR_MONITOR_ROT_0: return 0.0f;
+ case GSR_MONITOR_ROT_90: return M_PI_2;
+ case GSR_MONITOR_ROT_180: return M_PI;
+ case GSR_MONITOR_ROT_270: return M_PI + M_PI_2;
+ }
+ return 0.0f;
+}
+
+/* Prefer non combined planes */
+static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
+ int index_combined = -1;
+ for(int i = 0; i < kms_response->num_fds; ++i) {
+ if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
+ if(kms_response->fds[i].is_combined_plane)
+ index_combined = i;
+ else
+ return &kms_response->fds[i];
+ }
+ }
+
+ if(index_combined != -1)
+ return &kms_response->fds[index_combined];
+ else
+ return NULL;
+}
+
+static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
+ for(int i = 0; i < kms_response->num_fds; ++i) {
+ if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
+ return &kms_response->fds[i];
+ }
+ return NULL;
+}
+
+static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
+ if(kms_response->num_fds == 0)
+ return NULL;
+
+ int64_t largest_size = 0;
+ gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
+ for(int i = 0; i < kms_response->num_fds; ++i) {
+ const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
+ if(size > largest_size && !kms_response->fds[i].is_cursor) {
+ largest_size = size;
+ largest_drm = &kms_response->fds[i];
+ }
+ }
+ return largest_drm;
+}
+
+static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
+ for(int i = 0; i < kms_response->num_fds; ++i) {
+ if(kms_response->fds[i].is_cursor)
+ return &kms_response->fds[i];
+ }
+ return NULL;
+}
+
+static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
+ return hdr_metadata->metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
+ hdr_metadata->hdmi_metadata_type1.metadata_type == HDMI_STATIC_METADATA_TYPE1 &&
+ hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
+}
+
+static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
+ if(!self->mastering_display_metadata)
+ self->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
+
+ if(!self->light_metadata)
+ self->light_metadata = av_content_light_metadata_create_side_data(frame);
+
+ if(self->mastering_display_metadata) {
+ for(int i = 0; i < 3; ++i) {
+ self->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
+ self->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
+ }
+
+ self->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
+ self->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
+
+ self->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
+ self->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
+
+ self->mastering_display_metadata->has_primaries = self->mastering_display_metadata->display_primaries[0][0].num > 0;
+ self->mastering_display_metadata->has_luminance = self->mastering_display_metadata->max_luminance.num > 0;
+ }
+
+ if(self->light_metadata) {
+ self->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
+ self->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
+ }
+}
+
+static vec2i swap_vec2i(vec2i value) {
+ int tmp = value.x;
+ value.x = value.y;
+ value.y = tmp;
+ return value;
+}
+
+bool gsr_capture_kms_capture(gsr_capture_kms *self, AVFrame *frame, bool hdr, bool screen_plane_use_modifiers, bool cursor_texture_is_external, bool record_cursor) {
+ //egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ self->base.egl->glClear(0);
+
+ gsr_capture_kms_cleanup_kms_fds(self);
+
+ gsr_kms_response_fd *drm_fd = NULL;
+ gsr_kms_response_fd *cursor_drm_fd = NULL;
+ bool capture_is_combined_plane = false;
+
+ if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
+ return false;
+ }
+
+ if(self->kms_response.num_fds == 0) {
+ static bool error_shown = false;
+ if(!error_shown) {
+ error_shown = true;
+ fprintf(stderr, "gsr error: no drm found, capture will fail\n");
+ }
+ return false;
+ }
+
+ for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
+ drm_fd = find_drm_by_connector_id(&self->kms_response, self->monitor_id.connector_ids[i]);
+ if(drm_fd)
+ break;
+ }
+
+ // Will never happen on wayland unless the target monitor has been disconnected
+ if(!drm_fd) {
+ drm_fd = find_first_combined_drm(&self->kms_response);
+ if(!drm_fd)
+ drm_fd = find_largest_drm(&self->kms_response);
+ capture_is_combined_plane = true;
+ }
+
+ cursor_drm_fd = find_cursor_drm(&self->kms_response);
+
+ if(!drm_fd)
+ return false;
+
+ if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
+ cursor_drm_fd = NULL;
+
+ if(drm_fd->has_hdr_metadata && hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
+ gsr_kms_set_hdr_metadata(self, frame, drm_fd);
+
+ // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
+ // Even ffmpeg kmsgrab causes this crash. The error is:
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
+ // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
+ // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
+ // Error: avcodec_send_frame failed, error: Input/output error
+ // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
+ // kms server info: kms client shutdown, shutting down the server
+ intptr_t img_attr[18] = {
+ EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
+ EGL_WIDTH, drm_fd->width,
+ EGL_HEIGHT, drm_fd->height,
+ EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
+ };
+
+ if(screen_plane_use_modifiers) {
+ img_attr[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
+ img_attr[13] = drm_fd->modifier & 0xFFFFFFFFULL;
+
+ img_attr[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
+ img_attr[15] = drm_fd->modifier >> 32ULL;
+
+ img_attr[16] = EGL_NONE;
+ img_attr[17] = EGL_NONE;
+ } else {
+ img_attr[12] = EGL_NONE;
+ img_attr[13] = EGL_NONE;
+ }
+
+ EGLImage image = self->base.egl->eglCreateImage(self->base.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ self->base.egl->glBindTexture(GL_TEXTURE_2D, self->base.input_texture);
+ self->base.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+ self->base.egl->eglDestroyImage(self->base.egl->egl_display, image);
+ self->base.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ vec2i capture_pos = self->capture_pos;
+ if(!capture_is_combined_plane)
+ capture_pos = (vec2i){drm_fd->x, drm_fd->y};
+
+ const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+
+ gsr_color_conversion_draw(&self->base.color_conversion, self->base.input_texture,
+ (vec2i){0, 0}, self->capture_size,
+ capture_pos, self->capture_size,
+ texture_rotation, false);
+
+ if(record_cursor && cursor_drm_fd) {
+ const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
+ vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
+ switch(self->monitor_rotation) {
+ case GSR_MONITOR_ROT_0:
+ break;
+ case GSR_MONITOR_ROT_90:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ break;
+ case GSR_MONITOR_ROT_180:
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ cursor_pos.y -= cursor_size.y;
+ break;
+ case GSR_MONITOR_ROT_270:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.y -= cursor_size.y;
+ break;
+ }
+
+ const intptr_t img_attr_cursor[] = {
+ EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
+ EGL_WIDTH, cursor_drm_fd->width,
+ EGL_HEIGHT, cursor_drm_fd->height,
+ EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
+ EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
+ EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
+ EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
+ EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
+ EGL_NONE
+ };
+
+ EGLImage cursor_image = self->base.egl->eglCreateImage(self->base.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
+ const int target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ self->base.egl->glBindTexture(target, self->base.cursor_texture);
+ self->base.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
+ self->base.egl->eglDestroyImage(self->base.egl->egl_display, cursor_image);
+ self->base.egl->glBindTexture(target, 0);
+
+ gsr_color_conversion_draw(&self->base.color_conversion, self->base.cursor_texture,
+ cursor_pos, cursor_size,
+ (vec2i){0, 0}, cursor_size,
+ texture_rotation, cursor_texture_is_external);
+ }
+
+ self->base.egl->eglSwapBuffers(self->base.egl->egl_display, self->base.egl->egl_surface);
+ //self->base.egl->glFlush();
+ //self->base.egl->glFinish();
+
+ return true;
+}
+
+void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
+ for(int i = 0; i < self->kms_response.num_fds; ++i) {
+ if(self->kms_response.fds[i].fd > 0)
+ close(self->kms_response.fds[i].fd);
+ self->kms_response.fds[i].fd = 0;
+ }
+ self->kms_response.num_fds = 0;
+}
diff --git a/src/capture/kms_cuda.c b/src/capture/kms_cuda.c
new file mode 100644
index 0000000..a9f1f8e
--- /dev/null
+++ b/src/capture/kms_cuda.c
@@ -0,0 +1,181 @@
+#include "../../include/capture/kms_cuda.h"
+#include "../../include/capture/kms.h"
+#include "../../include/cuda.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/hwcontext_cuda.h>
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_kms kms;
+
+ gsr_capture_kms_cuda_params params;
+
+ gsr_cuda cuda;
+ CUgraphicsResource cuda_graphics_resources[2];
+ CUarray mapped_arrays[2];
+ CUstream cuda_stream;
+} gsr_capture_kms_cuda;
+
+static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
+
+static int gsr_capture_kms_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+
+ const int res = gsr_capture_kms_start(&cap_kms->kms, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context, frame);
+ if(res != 0) {
+ gsr_capture_kms_cuda_stop(cap, video_codec_context);
+ return res;
+ }
+
+ // TODO: overclocking is not supported on wayland...
+ if(!gsr_cuda_load(&cap_kms->cuda, NULL, false)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to load cuda\n");
+ gsr_capture_kms_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ if(!cuda_create_codec_context(cap_kms->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, cap_kms->params.hdr, &cap_kms->cuda_stream)) {
+ gsr_capture_kms_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ gsr_cuda_context cuda_context = {
+ .cuda = &cap_kms->cuda,
+ .cuda_graphics_resources = cap_kms->cuda_graphics_resources,
+ .mapped_arrays = cap_kms->mapped_arrays
+ };
+
+ if(!gsr_capture_base_setup_cuda_textures(&cap_kms->kms.base, frame, &cuda_context, cap_kms->params.color_range, GSR_SOURCE_COLOR_RGB, cap_kms->params.hdr)) {
+ gsr_capture_kms_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool gsr_capture_kms_cuda_should_stop(gsr_capture *cap, bool *err) {
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+ if(cap_kms->kms.should_stop) {
+ if(err)
+ *err = cap_kms->kms.stop_is_error;
+ return true;
+ }
+
+ if(err)
+ *err = false;
+ return false;
+}
+
+static void gsr_capture_kms_unload_cuda_graphics(gsr_capture_kms_cuda *cap_kms) {
+ if(cap_kms->cuda.cu_ctx) {
+ for(int i = 0; i < 2; ++i) {
+ if(cap_kms->cuda_graphics_resources[i]) {
+ cap_kms->cuda.cuGraphicsUnmapResources(1, &cap_kms->cuda_graphics_resources[i], 0);
+ cap_kms->cuda.cuGraphicsUnregisterResource(cap_kms->cuda_graphics_resources[i]);
+ cap_kms->cuda_graphics_resources[i] = 0;
+ }
+ }
+ }
+}
+
+static int gsr_capture_kms_cuda_capture(gsr_capture *cap, AVFrame *frame) {
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+
+ gsr_capture_kms_capture(&cap_kms->kms, frame, cap_kms->params.hdr, true, true, cap_kms->params.record_cursor);
+
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+ for(int i = 0; i < 2; ++i) {
+ CUDA_MEMCPY2D memcpy_struct;
+ memcpy_struct.srcXInBytes = 0;
+ memcpy_struct.srcY = 0;
+ memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+
+ memcpy_struct.dstXInBytes = 0;
+ memcpy_struct.dstY = 0;
+ memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
+
+ memcpy_struct.srcArray = cap_kms->mapped_arrays[i];
+ memcpy_struct.srcPitch = frame->width / div[i];
+ memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
+ memcpy_struct.dstPitch = frame->linesize[i];
+ memcpy_struct.WidthInBytes = frame->width * (cap_kms->params.hdr ? 2 : 1);
+ memcpy_struct.Height = frame->height / div[i];
+ // TODO: Remove this copy if possible
+ cap_kms->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_kms->cuda_stream);
+ }
+
+ // TODO: needed?
+ cap_kms->cuda.cuStreamSynchronize(cap_kms->cuda_stream);
+
+ return 0;
+}
+
+static void gsr_capture_kms_cuda_capture_end(gsr_capture *cap, AVFrame *frame) {
+ (void)frame;
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+ gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
+}
+
+static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+ gsr_capture_kms_unload_cuda_graphics(cap_kms);
+ gsr_cuda_unload(&cap_kms->cuda);
+ gsr_capture_kms_stop(&cap_kms->kms);
+}
+
+static void gsr_capture_kms_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_kms_cuda *cap_kms = cap->priv;
+ if(cap->priv) {
+ gsr_capture_kms_cuda_stop(cap, video_codec_context);
+ free((void*)cap_kms->params.display_to_capture);
+ cap_kms->params.display_to_capture = NULL;
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_kms_cuda_create(const gsr_capture_kms_cuda_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_cuda_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_kms_cuda *cap_kms = calloc(1, sizeof(gsr_capture_kms_cuda));
+ if(!cap_kms) {
+ free(cap);
+ return NULL;
+ }
+
+ const char *display_to_capture = strdup(params->display_to_capture);
+ if(!display_to_capture) {
+ free(cap);
+ free(cap_kms);
+ return NULL;
+ }
+
+ cap_kms->params = *params;
+ cap_kms->params.display_to_capture = display_to_capture;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_kms_cuda_start,
+ .tick = NULL,
+ .should_stop = gsr_capture_kms_cuda_should_stop,
+ .capture = gsr_capture_kms_cuda_capture,
+ .capture_end = gsr_capture_kms_cuda_capture_end,
+ .destroy = gsr_capture_kms_cuda_destroy,
+ .priv = cap_kms
+ };
+
+ return cap;
+}
diff --git a/src/capture/kms_vaapi.c b/src/capture/kms_vaapi.c
new file mode 100644
index 0000000..a7e8182
--- /dev/null
+++ b/src/capture/kms_vaapi.c
@@ -0,0 +1,135 @@
+#include "../../include/capture/kms_vaapi.h"
+#include "../../include/capture/kms.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/hwcontext_vaapi.h>
+#include <libavcodec/avcodec.h>
+#include <va/va_drmcommon.h>
+
+typedef struct {
+ gsr_capture_kms kms;
+
+ gsr_capture_kms_vaapi_params params;
+
+ VADisplay va_dpy;
+ VADRMPRIMESurfaceDescriptor prime;
+} gsr_capture_kms_vaapi;
+
+static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
+
+static int gsr_capture_kms_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+
+ int res = gsr_capture_kms_start(&cap_kms->kms, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context, frame);
+ if(res != 0) {
+ gsr_capture_kms_vaapi_stop(cap, video_codec_context);
+ return res;
+ }
+
+ if(!drm_create_codec_context(cap_kms->params.egl->card_path, video_codec_context, video_codec_context->width, video_codec_context->height, cap_kms->params.hdr, &cap_kms->va_dpy)) {
+ gsr_capture_kms_vaapi_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ if(!gsr_capture_base_setup_vaapi_textures(&cap_kms->kms.base, frame, cap_kms->va_dpy, &cap_kms->prime, cap_kms->params.color_range)) {
+ gsr_capture_kms_vaapi_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ return 0;
+}
+
+static bool gsr_capture_kms_vaapi_should_stop(gsr_capture *cap, bool *err) {
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+ if(cap_kms->kms.should_stop) {
+ if(err)
+ *err = cap_kms->kms.stop_is_error;
+ return true;
+ }
+
+ if(err)
+ *err = false;
+ return false;
+}
+
+static int gsr_capture_kms_vaapi_capture(gsr_capture *cap, AVFrame *frame) {
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+ gsr_capture_kms_capture(&cap_kms->kms, frame, cap_kms->params.hdr, false, false, cap_kms->params.record_cursor);
+ return 0;
+}
+
+static void gsr_capture_kms_vaapi_capture_end(gsr_capture *cap, AVFrame *frame) {
+ (void)frame;
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+ gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
+}
+
+static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+
+ for(uint32_t i = 0; i < cap_kms->prime.num_objects; ++i) {
+ if(cap_kms->prime.objects[i].fd > 0) {
+ close(cap_kms->prime.objects[i].fd);
+ cap_kms->prime.objects[i].fd = 0;
+ }
+ }
+
+ gsr_capture_kms_stop(&cap_kms->kms);
+}
+
+static void gsr_capture_kms_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_kms_vaapi *cap_kms = cap->priv;
+ if(cap->priv) {
+ gsr_capture_kms_vaapi_stop(cap, video_codec_context);
+ free((void*)cap_kms->params.display_to_capture);
+ cap_kms->params.display_to_capture = NULL;
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_kms_vaapi_create(const gsr_capture_kms_vaapi_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_kms_vaapi *cap_kms = calloc(1, sizeof(gsr_capture_kms_vaapi));
+ if(!cap_kms) {
+ free(cap);
+ return NULL;
+ }
+
+ const char *display_to_capture = strdup(params->display_to_capture);
+ if(!display_to_capture) {
+ /* TODO XCloseDisplay */
+ free(cap);
+ free(cap_kms);
+ return NULL;
+ }
+
+ cap_kms->params = *params;
+ cap_kms->params.display_to_capture = display_to_capture;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_kms_vaapi_start,
+ .tick = NULL,
+ .should_stop = gsr_capture_kms_vaapi_should_stop,
+ .capture = gsr_capture_kms_vaapi_capture,
+ .capture_end = gsr_capture_kms_vaapi_capture_end,
+ .destroy = gsr_capture_kms_vaapi_destroy,
+ .priv = cap_kms
+ };
+
+ return cap;
+}
diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c
new file mode 100644
index 0000000..9eabb18
--- /dev/null
+++ b/src/capture/nvfbc.c
@@ -0,0 +1,535 @@
+#include "../../include/capture/nvfbc.h"
+#include "../../external/NvFBC.h"
+#include "../../include/cuda.h"
+#include "../../include/egl.h"
+#include "../../include/utils.h"
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <X11/Xlib.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/hwcontext_cuda.h>
+#include <libavutil/frame.h>
+#include <libavutil/version.h>
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_base base;
+ gsr_capture_nvfbc_params params;
+ void *library;
+
+ NVFBC_SESSION_HANDLE nv_fbc_handle;
+ PNVFBCCREATEINSTANCE nv_fbc_create_instance;
+ NVFBC_API_FUNCTION_LIST nv_fbc_function_list;
+ bool fbc_handle_created;
+ bool capture_session_created;
+
+ gsr_cuda cuda;
+ CUgraphicsResource cuda_graphics_resources[2];
+ CUarray mapped_arrays[2];
+ CUstream cuda_stream; // TODO: asdasdsa
+ NVFBC_TOGL_SETUP_PARAMS setup_params;
+
+ bool direct_capture;
+ bool supports_direct_cursor;
+ bool capture_region;
+ uint32_t x, y, width, height;
+ NVFBC_TRACKING_TYPE tracking_type;
+ uint32_t output_id;
+ uint32_t tracking_width, tracking_height;
+ bool nvfbc_needs_recreate;
+ double nvfbc_dead_start;
+} gsr_capture_nvfbc;
+
+#if defined(_WIN64) || defined(__LP64__)
+typedef unsigned long long CUdeviceptr_v2;
+#else
+typedef unsigned int CUdeviceptr_v2;
+#endif
+typedef CUdeviceptr_v2 CUdeviceptr;
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+/* Returns 0 on failure */
+static uint32_t get_output_id_from_display_name(NVFBC_RANDR_OUTPUT_INFO *outputs, uint32_t num_outputs, const char *display_name, uint32_t *width, uint32_t *height) {
+ if(!outputs)
+ return 0;
+
+ for(uint32_t i = 0; i < num_outputs; ++i) {
+ if(strcmp(outputs[i].name, display_name) == 0) {
+ *width = outputs[i].trackedBox.w;
+ *height = outputs[i].trackedBox.h;
+ return outputs[i].dwId;
+ }
+ }
+
+ return 0;
+}
+
+/* TODO: Test with optimus and open kernel modules */
+static bool get_driver_version(int *major, int *minor) {
+ *major = 0;
+ *minor = 0;
+
+ FILE *f = fopen("/proc/driver/nvidia/version", "rb");
+ if(!f) {
+ fprintf(stderr, "gsr warning: failed to get nvidia driver version (failed to read /proc/driver/nvidia/version)\n");
+ return false;
+ }
+
+ char buffer[2048];
+ size_t bytes_read = fread(buffer, 1, sizeof(buffer) - 1, f);
+ buffer[bytes_read] = '\0';
+
+ bool success = false;
+ const char *p = strstr(buffer, "Kernel Module");
+ if(p) {
+ p += 13;
+ int driver_major_version = 0, driver_minor_version = 0;
+ if(sscanf(p, "%d.%d", &driver_major_version, &driver_minor_version) == 2) {
+ *major = driver_major_version;
+ *minor = driver_minor_version;
+ success = true;
+ }
+ }
+
+ if(!success)
+ fprintf(stderr, "gsr warning: failed to get nvidia driver version\n");
+
+ fclose(f);
+ return success;
+}
+
+static bool version_at_least(int major, int minor, int expected_major, int expected_minor) {
+ return major > expected_major || (major == expected_major && minor >= expected_minor);
+}
+
+static bool version_less_than(int major, int minor, int expected_major, int expected_minor) {
+ return major < expected_major || (major == expected_major && minor < expected_minor);
+}
+
+static void set_func_ptr(void **dst, void *src) {
+ *dst = src;
+}
+
+static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
+ gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+
+ dlerror(); /* clear */
+ void *lib = dlopen("libnvidia-fbc.so.1", RTLD_LAZY);
+ if(!lib) {
+ fprintf(stderr, "gsr error: failed to load libnvidia-fbc.so.1, error: %s\n", dlerror());
+ return false;
+ }
+
+ set_func_ptr((void**)&cap_nvfbc->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
+ if(!cap_nvfbc->nv_fbc_create_instance) {
+ fprintf(stderr, "gsr error: unable to resolve symbol 'NvFBCCreateInstance'\n");
+ dlclose(lib);
+ return false;
+ }
+
+ memset(&cap_nvfbc->nv_fbc_function_list, 0, sizeof(cap_nvfbc->nv_fbc_function_list));
+ cap_nvfbc->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
+ NVFBCSTATUS status = cap_nvfbc->nv_fbc_create_instance(&cap_nvfbc->nv_fbc_function_list);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: failed to create NvFBC instance (status: %d)\n", status);
+ dlclose(lib);
+ return false;
+ }
+
+ cap_nvfbc->library = lib;
+ return true;
+}
+
+/* TODO: check for glx swap control extension string (GLX_EXT_swap_control, etc) */
+static void set_vertical_sync_enabled(gsr_egl *egl, int enabled) {
+ int result = 0;
+
+ if(egl->glXSwapIntervalEXT) {
+ egl->glXSwapIntervalEXT(egl->x11.dpy, egl->x11.window, enabled ? 1 : 0);
+ } else if(egl->glXSwapIntervalMESA) {
+ result = egl->glXSwapIntervalMESA(enabled ? 1 : 0);
+ } else if(egl->glXSwapIntervalSGI) {
+ result = egl->glXSwapIntervalSGI(enabled ? 1 : 0);
+ } else {
+ static int warned = 0;
+ if (!warned) {
+ warned = 1;
+ fprintf(stderr, "gsr warning: setting vertical sync not supported\n");
+ }
+ }
+
+ if(result != 0)
+ fprintf(stderr, "gsr warning: setting vertical sync failed\n");
+}
+
+static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *cap_nvfbc) {
+ if(cap_nvfbc->fbc_handle_created && cap_nvfbc->capture_session_created) {
+ NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroy_capture_params;
+ memset(&destroy_capture_params, 0, sizeof(destroy_capture_params));
+ destroy_capture_params.dwVersion = NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER;
+ cap_nvfbc->nv_fbc_function_list.nvFBCDestroyCaptureSession(cap_nvfbc->nv_fbc_handle, &destroy_capture_params);
+ cap_nvfbc->capture_session_created = false;
+ }
+}
+
+static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *cap_nvfbc) {
+ if(cap_nvfbc->fbc_handle_created) {
+ NVFBC_DESTROY_HANDLE_PARAMS destroy_params;
+ memset(&destroy_params, 0, sizeof(destroy_params));
+ destroy_params.dwVersion = NVFBC_DESTROY_HANDLE_PARAMS_VER;
+ cap_nvfbc->nv_fbc_function_list.nvFBCDestroyHandle(cap_nvfbc->nv_fbc_handle, &destroy_params);
+ cap_nvfbc->fbc_handle_created = false;
+ cap_nvfbc->nv_fbc_handle = 0;
+ }
+}
+
+static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *cap_nvfbc) {
+ gsr_capture_nvfbc_destroy_session(cap_nvfbc);
+ gsr_capture_nvfbc_destroy_handle(cap_nvfbc);
+}
+
+static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
+ NVFBCSTATUS status;
+
+ NVFBC_CREATE_HANDLE_PARAMS create_params;
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
+ create_params.bExternallyManagedContext = NVFBC_TRUE;
+ create_params.glxCtx = cap_nvfbc->params.egl->glx_context;
+ create_params.glxFBConfig = cap_nvfbc->params.egl->glx_fb_config;
+
+ status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ if(status != NVFBC_SUCCESS) {
+ // Reverse engineering for interoperability
+ const uint8_t enable_key[] = { 0xac, 0x10, 0xc9, 0x2e, 0xa5, 0xe6, 0x87, 0x4f, 0x8f, 0x4b, 0xf4, 0x61, 0xf8, 0x56, 0x27, 0xe9 };
+ create_params.privateData = enable_key;
+ create_params.privateDataSize = 16;
+
+ status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ goto error_cleanup;
+ }
+ }
+ cap_nvfbc->fbc_handle_created = true;
+
+ NVFBC_GET_STATUS_PARAMS status_params;
+ memset(&status_params, 0, sizeof(status_params));
+ status_params.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
+
+ status = cap_nvfbc->nv_fbc_function_list.nvFBCGetStatus(cap_nvfbc->nv_fbc_handle, &status_params);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ goto error_cleanup;
+ }
+
+ if(status_params.bCanCreateNow == NVFBC_FALSE) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: it's not possible to create a capture session on this system\n");
+ goto error_cleanup;
+ }
+
+ cap_nvfbc->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
+ cap_nvfbc->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
+ cap_nvfbc->tracking_type = strcmp(cap_nvfbc->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
+ if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT) {
+ if(!status_params.bXRandRAvailable) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the xrandr extension is not available\n");
+ goto error_cleanup;
+ }
+
+ if(status_params.bInModeset) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the x server is in modeset, unable to record\n");
+ goto error_cleanup;
+ }
+
+ cap_nvfbc->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, cap_nvfbc->params.display_to_capture, &cap_nvfbc->tracking_width, &cap_nvfbc->tracking_height);
+ if(cap_nvfbc->output_id == 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", cap_nvfbc->params.display_to_capture);
+ goto error_cleanup;
+ }
+ }
+
+ return 0;
+
+ error_cleanup:
+ gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ return -1;
+}
+
+static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *cap_nvfbc) {
+ NVFBC_CREATE_CAPTURE_SESSION_PARAMS create_capture_params;
+ memset(&create_capture_params, 0, sizeof(create_capture_params));
+ create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
+ create_capture_params.eCaptureType = NVFBC_CAPTURE_TO_GL;
+ create_capture_params.bWithCursor = (!cap_nvfbc->direct_capture || cap_nvfbc->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
+ if(!cap_nvfbc->params.record_cursor)
+ create_capture_params.bWithCursor = false;
+ if(cap_nvfbc->capture_region)
+ create_capture_params.captureBox = (NVFBC_BOX){ cap_nvfbc->x, cap_nvfbc->y, cap_nvfbc->width, cap_nvfbc->height };
+ create_capture_params.eTrackingType = cap_nvfbc->tracking_type;
+ create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)cap_nvfbc->params.fps);
+ create_capture_params.bAllowDirectCapture = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ create_capture_params.bPushModel = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ create_capture_params.bDisableAutoModesetRecovery = true;
+ if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT)
+ create_capture_params.dwOutputId = cap_nvfbc->output_id;
+
+ NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateCaptureSession(cap_nvfbc->nv_fbc_handle, &create_capture_params);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ return -1;
+ }
+ cap_nvfbc->capture_session_created = true;
+
+ memset(&cap_nvfbc->setup_params, 0, sizeof(cap_nvfbc->setup_params));
+ cap_nvfbc->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
+ cap_nvfbc->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
+
+ status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLSetUp(cap_nvfbc->nv_fbc_handle, &cap_nvfbc->setup_params);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ gsr_capture_nvfbc_destroy_session(cap_nvfbc);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+
+ cap_nvfbc->base.video_codec_context = video_codec_context;
+ cap_nvfbc->base.egl = cap_nvfbc->params.egl;
+
+ if(!gsr_cuda_load(&cap_nvfbc->cuda, cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.overclock))
+ return -1;
+
+ if(!gsr_capture_nvfbc_load_library(cap)) {
+ gsr_cuda_unload(&cap_nvfbc->cuda);
+ return -1;
+ }
+
+ cap_nvfbc->x = max_int(cap_nvfbc->params.pos.x, 0);
+ cap_nvfbc->y = max_int(cap_nvfbc->params.pos.y, 0);
+ cap_nvfbc->width = max_int(cap_nvfbc->params.size.x, 0);
+ cap_nvfbc->height = max_int(cap_nvfbc->params.size.y, 0);
+
+ cap_nvfbc->capture_region = (cap_nvfbc->x > 0 || cap_nvfbc->y > 0 || cap_nvfbc->width > 0 || cap_nvfbc->height > 0);
+
+ cap_nvfbc->supports_direct_cursor = false;
+ bool direct_capture = cap_nvfbc->params.direct_capture;
+ int driver_major_version = 0;
+ int driver_minor_version = 0;
+ if(direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
+ fprintf(stderr, "Info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
+
+ // TODO:
+ if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
+ direct_capture = false;
+ fprintf(stderr, "Warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
+ }
+
+ // TODO:
+ // Cursor capture disabled because moving the cursor doesn't update capture rate to monitor hz and instead captures at 10-30 hz
+ /*
+ if(direct_capture) {
+ if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
+ supports_direct_cursor = true;
+ else
+ fprintf(stderr, "Info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
+ }
+ */
+ }
+
+ if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ goto error_cleanup;
+ }
+
+ if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+ goto error_cleanup;
+ }
+
+ if(cap_nvfbc->capture_region) {
+ video_codec_context->width = cap_nvfbc->width & ~1;
+ video_codec_context->height = cap_nvfbc->height & ~1;
+ } else {
+ video_codec_context->width = cap_nvfbc->tracking_width & ~1;
+ video_codec_context->height = cap_nvfbc->tracking_height & ~1;
+ }
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ if(!cuda_create_codec_context(cap_nvfbc->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_nvfbc->cuda_stream))
+ goto error_cleanup;
+
+ gsr_cuda_context cuda_context = {
+ .cuda = &cap_nvfbc->cuda,
+ .cuda_graphics_resources = cap_nvfbc->cuda_graphics_resources,
+ .mapped_arrays = cap_nvfbc->mapped_arrays
+ };
+
+ // TODO: Remove this, it creates shit we dont need
+ if(!gsr_capture_base_setup_cuda_textures(&cap_nvfbc->base, frame, &cuda_context, cap_nvfbc->params.color_range, GSR_SOURCE_COLOR_BGR, cap_nvfbc->params.hdr)) {
+ goto error_cleanup;
+ }
+ /* Disable vsync */
+ set_vertical_sync_enabled(cap_nvfbc->params.egl, 0);
+
+ return 0;
+
+ error_cleanup:
+ gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ gsr_capture_base_stop(&cap_nvfbc->base);
+ gsr_cuda_unload(&cap_nvfbc->cuda);
+ return -1;
+}
+
+static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
+ gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+
+ const double nvfbc_recreate_retry_time_seconds = 1.0;
+ if(cap_nvfbc->nvfbc_needs_recreate) {
+ const double now = clock_get_monotonic_seconds();
+ if(now - cap_nvfbc->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
+ cap_nvfbc->nvfbc_dead_start = now;
+ gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+
+ if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc handle, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
+ return -1;
+ }
+
+ if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc session, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
+ return -1;
+ }
+
+ cap_nvfbc->nvfbc_needs_recreate = false;
+ } else {
+ return 0;
+ }
+ }
+
+ NVFBC_FRAME_GRAB_INFO frame_info;
+ memset(&frame_info, 0, sizeof(frame_info));
+
+ NVFBC_TOGL_GRAB_FRAME_PARAMS grab_params;
+ memset(&grab_params, 0, sizeof(grab_params));
+ grab_params.dwVersion = NVFBC_TOGL_GRAB_FRAME_PARAMS_VER;
+ grab_params.dwFlags = NVFBC_TOGL_GRAB_FLAGS_NOWAIT | NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH; // TODO: Remove NVFBC_TOGL_GRAB_FLAGS_FORCE_REFRESH
+ grab_params.pFrameGrabInfo = &frame_info;
+ grab_params.dwTimeoutMs = 0;
+
+ NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLGrabFrame(cap_nvfbc->nv_fbc_handle, &grab_params);
+ if(status != NVFBC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
+ cap_nvfbc->nvfbc_needs_recreate = true;
+ cap_nvfbc->nvfbc_dead_start = clock_get_monotonic_seconds();
+ return 0;
+ }
+
+ //cap_nvfbc->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ cap_nvfbc->params.egl->glClear(0);
+
+ gsr_color_conversion_draw(&cap_nvfbc->base.color_conversion, cap_nvfbc->setup_params.dwTextures[grab_params.dwTextureIndex],
+ (vec2i){0, 0}, (vec2i){frame->width, frame->height},
+ (vec2i){0, 0}, (vec2i){frame->width, frame->height},
+ 0.0f, false);
+
+ cap_nvfbc->params.egl->glXSwapBuffers(cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.egl->x11.window);
+
+ // TODO: HDR is broken
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+ for(int i = 0; i < 2; ++i) {
+ CUDA_MEMCPY2D memcpy_struct;
+ memcpy_struct.srcXInBytes = 0;
+ memcpy_struct.srcY = 0;
+ memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+
+ memcpy_struct.dstXInBytes = 0;
+ memcpy_struct.dstY = 0;
+ memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
+
+ memcpy_struct.srcArray = cap_nvfbc->mapped_arrays[i];
+ memcpy_struct.srcPitch = frame->width / div[i];
+ memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
+ memcpy_struct.dstPitch = frame->linesize[i];
+ memcpy_struct.WidthInBytes = frame->width * (cap_nvfbc->params.hdr ? 2 : 1);
+ memcpy_struct.Height = frame->height / div[i];
+ // TODO: Remove this copy if possible
+ cap_nvfbc->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_nvfbc->cuda_stream);
+ }
+
+ // TODO: needed?
+ cap_nvfbc->cuda.cuStreamSynchronize(cap_nvfbc->cuda_stream);
+
+ return 0;
+}
+
+static void gsr_capture_nvfbc_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ if(cap_nvfbc) {
+ gsr_capture_base_stop(&cap_nvfbc->base);
+ gsr_cuda_unload(&cap_nvfbc->cuda);
+ dlclose(cap_nvfbc->library);
+ free((void*)cap_nvfbc->params.display_to_capture);
+ cap_nvfbc->params.display_to_capture = NULL;
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_create params is NULL\n");
+ return NULL;
+ }
+
+ if(!params->display_to_capture) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_create params.display_to_capture is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_nvfbc *cap_nvfbc = calloc(1, sizeof(gsr_capture_nvfbc));
+ if(!cap_nvfbc) {
+ free(cap);
+ return NULL;
+ }
+
+ const char *display_to_capture = strdup(params->display_to_capture);
+ if(!display_to_capture) {
+ free(cap);
+ free(cap_nvfbc);
+ return NULL;
+ }
+
+ cap_nvfbc->params = *params;
+ cap_nvfbc->params.display_to_capture = display_to_capture;
+ cap_nvfbc->params.fps = max_int(cap_nvfbc->params.fps, 1);
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_nvfbc_start,
+ .tick = NULL,
+ .should_stop = NULL,
+ .capture = gsr_capture_nvfbc_capture,
+ .capture_end = NULL,
+ .destroy = gsr_capture_nvfbc_destroy,
+ .priv = cap_nvfbc
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
new file mode 100644
index 0000000..29b42d5
--- /dev/null
+++ b/src/capture/xcomposite.c
@@ -0,0 +1,299 @@
+#include "../../include/capture/xcomposite.h"
+#include "../../include/window_texture.h"
+#include "../../include/utils.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <X11/Xlib.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/frame.h>
+#include <libavcodec/avcodec.h>
+#include <va/va.h>
+#include <va/va_drmcommon.h>
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+static int min_int(int a, int b) {
+ return a < b ? a : b;
+}
+
+void gsr_capture_xcomposite_init(gsr_capture_xcomposite *self, const gsr_capture_xcomposite_params *params) {
+ memset(self, 0, sizeof(*self));
+ self->params = *params;
+}
+
+static Window get_focused_window(Display *display, Atom net_active_window_atom) {
+ Atom type;
+ int format = 0;
+ unsigned long num_items = 0;
+ unsigned long bytes_after = 0;
+ unsigned char *properties = NULL;
+ if(XGetWindowProperty(display, DefaultRootWindow(display), net_active_window_atom, 0, 1024, False, AnyPropertyType, &type, &format, &num_items, &bytes_after, &properties) == Success && properties) {
+ Window focused_window = *(unsigned long*)properties;
+ XFree(properties);
+ return focused_window;
+ }
+ return None;
+}
+
+int gsr_capture_xcomposite_start(gsr_capture_xcomposite *self, AVCodecContext *video_codec_context, AVFrame *frame) {
+ self->base.video_codec_context = video_codec_context;
+ self->base.egl = self->params.egl;
+
+ if(self->params.follow_focused) {
+ self->net_active_window_atom = XInternAtom(self->params.egl->x11.dpy, "_NET_ACTIVE_WINDOW", False);
+ if(!self->net_active_window_atom) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: failed to get _NET_ACTIVE_WINDOW atom\n");
+ return -1;
+ }
+ self->window = get_focused_window(self->params.egl->x11.dpy, self->net_active_window_atom);
+ } else {
+ self->window = self->params.window;
+ }
+
+ /* TODO: Do these in tick, and allow error if follow_focused */
+
+ XWindowAttributes attr;
+ if(!XGetWindowAttributes(self->params.egl->x11.dpy, self->window, &attr) && !self->params.follow_focused) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start failed: invalid window id: %lu\n", self->window);
+ return -1;
+ }
+
+ self->window_size.x = max_int(attr.width, 0);
+ self->window_size.y = max_int(attr.height, 0);
+
+ if(self->params.follow_focused)
+ XSelectInput(self->params.egl->x11.dpy, DefaultRootWindow(self->params.egl->x11.dpy), PropertyChangeMask);
+
+ // TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
+ XSelectInput(self->params.egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
+
+ if(!self->params.egl->eglExportDMABUFImageQueryMESA) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageQueryMESA\n");
+ return -1;
+ }
+
+ if(!self->params.egl->eglExportDMABUFImageMESA) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageMESA\n");
+ return -1;
+ }
+
+ /* Disable vsync */
+ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
+ if(window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to get window texture for window %ld\n", self->window);
+ return -1;
+ }
+
+ if(gsr_cursor_init(&self->cursor, self->params.egl, self->params.egl->x11.dpy) != 0) {
+ gsr_capture_xcomposite_stop(self);
+ return -1;
+ }
+
+ self->texture_size.x = 0;
+ self->texture_size.y = 0;
+
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&self->window_texture));
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &self->texture_size.x);
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->texture_size.x = max_int(2, even_number_ceil(self->texture_size.x));
+ self->texture_size.y = max_int(2, even_number_ceil(self->texture_size.y));
+
+ video_codec_context->width = self->texture_size.x;
+ video_codec_context->height = self->texture_size.y;
+
+ if(self->params.region_size.x > 0 && self->params.region_size.y > 0) {
+ video_codec_context->width = max_int(2, even_number_ceil(self->params.region_size.x));
+ video_codec_context->height = max_int(2, even_number_ceil(self->params.region_size.y));
+ }
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->clear_next_frame = true;
+ return 0;
+}
+
+void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
+ window_texture_deinit(&self->window_texture);
+ gsr_cursor_deinit(&self->cursor);
+ gsr_capture_base_stop(&self->base);
+}
+
+void gsr_capture_xcomposite_tick(gsr_capture_xcomposite *self, AVCodecContext *video_codec_context) {
+ //self->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
+ self->params.egl->glClear(0);
+
+ bool init_new_window = false;
+ while(XPending(self->params.egl->x11.dpy)) {
+ XNextEvent(self->params.egl->x11.dpy, &self->xev);
+
+ switch(self->xev.type) {
+ case DestroyNotify: {
+ /* Window died (when not following focused window), so we stop recording */
+ if(!self->params.follow_focused && self->xev.xdestroywindow.window == self->window) {
+ self->should_stop = true;
+ self->stop_is_error = false;
+ }
+ break;
+ }
+ case Expose: {
+ /* Requires window texture recreate */
+ if(self->xev.xexpose.count == 0 && self->xev.xexpose.window == self->window) {
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case ConfigureNotify: {
+ /* Window resized */
+ if(self->xev.xconfigure.window == self->window && (self->xev.xconfigure.width != self->window_size.x || self->xev.xconfigure.height != self->window_size.y)) {
+ self->window_size.x = max_int(self->xev.xconfigure.width, 0);
+ self->window_size.y = max_int(self->xev.xconfigure.height, 0);
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case PropertyNotify: {
+ /* Focused window changed */
+ if(self->params.follow_focused && self->xev.xproperty.atom == self->net_active_window_atom) {
+ init_new_window = true;
+ }
+ break;
+ }
+ }
+
+ gsr_cursor_update(&self->cursor, &self->xev);
+ }
+
+ if(self->params.follow_focused && !self->follow_focused_initialized) {
+ init_new_window = true;
+ }
+
+ if(init_new_window) {
+ Window focused_window = get_focused_window(self->params.egl->x11.dpy, self->net_active_window_atom);
+ if(focused_window != self->window || !self->follow_focused_initialized) {
+ self->follow_focused_initialized = true;
+ XSelectInput(self->params.egl->x11.dpy, self->window, 0);
+ self->window = focused_window;
+ XSelectInput(self->params.egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
+
+ XWindowAttributes attr;
+ attr.width = 0;
+ attr.height = 0;
+ if(!XGetWindowAttributes(self->params.egl->x11.dpy, self->window, &attr))
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick failed: invalid window id: %lu\n", self->window);
+
+ self->window_size.x = max_int(attr.width, 0);
+ self->window_size.y = max_int(attr.height, 0);
+ self->window_resized = true;
+
+ window_texture_deinit(&self->window_texture);
+ window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl); // TODO: Do not do the below window_texture_on_resize after this
+ }
+ }
+
+ const double window_resize_timeout = 1.0; // 1 second
+ if(self->window_resized && clock_get_monotonic_seconds() - self->window_resize_timer >= window_resize_timeout) {
+ self->window_resized = false;
+
+ if(window_texture_on_resize(&self->window_texture) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_tick: window_texture_on_resize failed\n");
+ //self->should_stop = true;
+ //self->stop_is_error = true;
+ return;
+ }
+
+ self->texture_size.x = 0;
+ self->texture_size.y = 0;
+
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&self->window_texture));
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &self->texture_size.x);
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->texture_size.x = min_int(video_codec_context->width, max_int(2, even_number_ceil(self->texture_size.x)));
+ self->texture_size.y = min_int(video_codec_context->height, max_int(2, even_number_ceil(self->texture_size.y)));
+
+ gsr_color_conversion_clear(&self->base.color_conversion);
+ }
+}
+
+bool gsr_capture_xcomposite_should_stop(gsr_capture_xcomposite *self, bool *err) {
+ if(self->should_stop) {
+ if(err)
+ *err = self->stop_is_error;
+ return true;
+ }
+
+ if(err)
+ *err = false;
+ return false;
+}
+
+int gsr_capture_xcomposite_capture(gsr_capture_xcomposite *self, AVFrame *frame) {
+ (void)frame;
+
+ const int target_x = max_int(0, frame->width / 2 - self->texture_size.x / 2);
+ const int target_y = max_int(0, frame->height / 2 - self->texture_size.y / 2);
+
+ // TODO: Can we do this a better way than to call it every capture?
+ if(self->params.record_cursor)
+ gsr_cursor_tick(&self->cursor, self->window);
+
+ const vec2i cursor_pos = {
+ target_x + self->cursor.position.x - self->cursor.hotspot.x,
+ target_y + self->cursor.position.y - self->cursor.hotspot.y
+ };
+
+ const bool cursor_completely_inside_window =
+ cursor_pos.x >= target_x &&
+ cursor_pos.x + self->cursor.size.x <= target_x + self->texture_size.x &&
+ cursor_pos.y >= target_y &&
+ cursor_pos.y + self->cursor.size.y <= target_y + self->texture_size.y;
+
+ const bool cursor_inside_window =
+ cursor_pos.x + self->cursor.size.x >= target_x &&
+ cursor_pos.x <= target_x + self->texture_size.x &&
+ cursor_pos.y + self->cursor.size.y >= target_y &&
+ cursor_pos.y <= target_y + self->texture_size.y;
+
+ if(self->clear_next_frame) {
+ self->clear_next_frame = false;
+ gsr_color_conversion_clear(&self->base.color_conversion);
+ }
+
+ /*
+ We dont draw the cursor if it's outside the window but if it's partially inside the window then the cursor area that is outside the window
+ will not get overdrawn the next frame causing a cursor trail to be visible since we dont clear the background.
+ To fix this we detect if the cursor is partially inside the window and clear the background only in that case.
+ */
+ if(!cursor_completely_inside_window && cursor_inside_window && self->params.record_cursor)
+ self->clear_next_frame = true;
+
+ gsr_color_conversion_draw(&self->base.color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
+ (vec2i){target_x, target_y}, self->texture_size,
+ (vec2i){0, 0}, self->texture_size,
+ 0.0f, false);
+
+ if(cursor_inside_window && self->params.record_cursor) {
+ gsr_color_conversion_draw(&self->base.color_conversion, self->cursor.texture_id,
+ cursor_pos, self->cursor.size,
+ (vec2i){0, 0}, self->cursor.size,
+ 0.0f, false);
+ }
+
+ self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
+ //self->params.egl->glFlush();
+ //self->params.egl->glFinish();
+
+ return 0;
+}
diff --git a/src/capture/xcomposite_cuda.c b/src/capture/xcomposite_cuda.c
new file mode 100644
index 0000000..6e13d2a
--- /dev/null
+++ b/src/capture/xcomposite_cuda.c
@@ -0,0 +1,155 @@
+#include "../../include/capture/xcomposite_cuda.h"
+#include "../../include/cuda.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <libavutil/frame.h>
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_xcomposite xcomposite;
+ bool overclock;
+
+ gsr_cuda cuda;
+ CUgraphicsResource cuda_graphics_resources[2];
+ CUarray mapped_arrays[2];
+ CUstream cuda_stream;
+} gsr_capture_xcomposite_cuda;
+
+static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
+
+static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
+
+ const int res = gsr_capture_xcomposite_start(&cap_xcomp->xcomposite, video_codec_context, frame);
+ if(res != 0) {
+ gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
+ return res;
+ }
+
+ if(!gsr_cuda_load(&cap_xcomp->cuda, cap_xcomp->xcomposite.params.egl->x11.dpy, cap_xcomp->overclock)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to load cuda\n");
+ gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ if(!cuda_create_codec_context(cap_xcomp->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_xcomp->cuda_stream)) {
+ gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ gsr_cuda_context cuda_context = {
+ .cuda = &cap_xcomp->cuda,
+ .cuda_graphics_resources = cap_xcomp->cuda_graphics_resources,
+ .mapped_arrays = cap_xcomp->mapped_arrays
+ };
+
+ if(!gsr_capture_base_setup_cuda_textures(&cap_xcomp->xcomposite.base, frame, &cuda_context, cap_xcomp->xcomposite.params.color_range, GSR_SOURCE_COLOR_RGB, false)) {
+ gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void gsr_capture_xcomposite_unload_cuda_graphics(gsr_capture_xcomposite_cuda *cap_xcomp) {
+ if(cap_xcomp->cuda.cu_ctx) {
+ for(int i = 0; i < 2; ++i) {
+ if(cap_xcomp->cuda_graphics_resources[i]) {
+ cap_xcomp->cuda.cuGraphicsUnmapResources(1, &cap_xcomp->cuda_graphics_resources[i], 0);
+ cap_xcomp->cuda.cuGraphicsUnregisterResource(cap_xcomp->cuda_graphics_resources[i]);
+ cap_xcomp->cuda_graphics_resources[i] = 0;
+ }
+ }
+ }
+}
+
+static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
+ gsr_capture_xcomposite_stop(&cap_xcomp->xcomposite);
+ gsr_capture_xcomposite_unload_cuda_graphics(cap_xcomp);
+ gsr_cuda_unload(&cap_xcomp->cuda);
+}
+
+static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
+ gsr_capture_xcomposite_tick(&cap_xcomp->xcomposite, video_codec_context);
+}
+
+static bool gsr_capture_xcomposite_cuda_should_stop(gsr_capture *cap, bool *err) {
+ gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
+ return gsr_capture_xcomposite_should_stop(&cap_xcomp->xcomposite, err);
+}
+
+static int gsr_capture_xcomposite_cuda_capture(gsr_capture *cap, AVFrame *frame) {
+ gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
+
+ gsr_capture_xcomposite_capture(&cap_xcomp->xcomposite, frame);
+
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+ for(int i = 0; i < 2; ++i) {
+ CUDA_MEMCPY2D memcpy_struct;
+ memcpy_struct.srcXInBytes = 0;
+ memcpy_struct.srcY = 0;
+ memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
+
+ memcpy_struct.dstXInBytes = 0;
+ memcpy_struct.dstY = 0;
+ memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
+
+ memcpy_struct.srcArray = cap_xcomp->mapped_arrays[i];
+ memcpy_struct.srcPitch = frame->width / div[i];
+ memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
+ memcpy_struct.dstPitch = frame->linesize[i];
+ memcpy_struct.WidthInBytes = frame->width;
+ memcpy_struct.Height = frame->height / div[i];
+ // TODO: Remove this copy if possible
+ cap_xcomp->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_xcomp->cuda_stream);
+ }
+
+ // TODO: needed?
+ cap_xcomp->cuda.cuStreamSynchronize(cap_xcomp->cuda_stream);
+
+ return 0;
+}
+
+static void gsr_capture_xcomposite_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ if(cap->priv) {
+ gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_xcomposite_cuda_create(const gsr_capture_xcomposite_cuda_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_xcomposite_cuda *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_cuda));
+ if(!cap_xcomp) {
+ free(cap);
+ return NULL;
+ }
+
+ gsr_capture_xcomposite_init(&cap_xcomp->xcomposite, &params->base);
+ cap_xcomp->overclock = params->overclock;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_xcomposite_cuda_start,
+ .tick = gsr_capture_xcomposite_cuda_tick,
+ .should_stop = gsr_capture_xcomposite_cuda_should_stop,
+ .capture = gsr_capture_xcomposite_cuda_capture,
+ .capture_end = NULL,
+ .destroy = gsr_capture_xcomposite_cuda_destroy,
+ .priv = cap_xcomp
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite_vaapi.c b/src/capture/xcomposite_vaapi.c
new file mode 100644
index 0000000..8c9c56c
--- /dev/null
+++ b/src/capture/xcomposite_vaapi.c
@@ -0,0 +1,109 @@
+#include "../../include/capture/xcomposite_vaapi.h"
+#include "../../include/capture/xcomposite.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <va/va.h>
+#include <va/va_drmcommon.h>
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_xcomposite xcomposite;
+
+ VADisplay va_dpy;
+ VADRMPRIMESurfaceDescriptor prime;
+} gsr_capture_xcomposite_vaapi;
+
+static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
+
+static int gsr_capture_xcomposite_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
+
+ const int res = gsr_capture_xcomposite_start(&cap_xcomp->xcomposite, video_codec_context, frame);
+ if(res != 0) {
+ gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
+ return res;
+ }
+
+ if(!drm_create_codec_context(cap_xcomp->xcomposite.params.egl->card_path, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_xcomp->va_dpy)) {
+ gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ if(!gsr_capture_base_setup_vaapi_textures(&cap_xcomp->xcomposite.base, frame, cap_xcomp->va_dpy, &cap_xcomp->prime, cap_xcomp->xcomposite.params.color_range)) {
+ gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
+ gsr_capture_xcomposite_tick(&cap_xcomp->xcomposite, video_codec_context);
+}
+
+static bool gsr_capture_xcomposite_vaapi_should_stop(gsr_capture *cap, bool *err) {
+ gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
+ return gsr_capture_xcomposite_should_stop(&cap_xcomp->xcomposite, err);
+}
+
+static int gsr_capture_xcomposite_vaapi_capture(gsr_capture *cap, AVFrame *frame) {
+ gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
+ return gsr_capture_xcomposite_capture(&cap_xcomp->xcomposite, frame);
+}
+
+static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
+
+ for(uint32_t i = 0; i < cap_xcomp->prime.num_objects; ++i) {
+ if(cap_xcomp->prime.objects[i].fd > 0) {
+ close(cap_xcomp->prime.objects[i].fd);
+ cap_xcomp->prime.objects[i].fd = 0;
+ }
+ }
+
+ gsr_capture_xcomposite_stop(&cap_xcomp->xcomposite);
+}
+
+static void gsr_capture_xcomposite_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ if(cap->priv) {
+ gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_xcomposite_vaapi_create(const gsr_capture_xcomposite_vaapi_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_xcomposite_vaapi *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_vaapi));
+ if(!cap_xcomp) {
+ free(cap);
+ return NULL;
+ }
+
+ gsr_capture_xcomposite_init(&cap_xcomp->xcomposite, &params->base);
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_xcomposite_vaapi_start,
+ .tick = gsr_capture_xcomposite_vaapi_tick,
+ .should_stop = gsr_capture_xcomposite_vaapi_should_stop,
+ .capture = gsr_capture_xcomposite_vaapi_capture,
+ .capture_end = NULL,
+ .destroy = gsr_capture_xcomposite_vaapi_destroy,
+ .priv = cap_xcomp
+ };
+
+ return cap;
+}