aboutsummaryrefslogtreecommitdiff
path: root/src/capture
diff options
context:
space:
mode:
Diffstat (limited to 'src/capture')
-rw-r--r--src/capture/capture.c396
-rw-r--r--src/capture/kms.c817
-rw-r--r--src/capture/kms_cuda.c181
-rw-r--r--src/capture/kms_vaapi.c135
-rw-r--r--src/capture/nvfbc.c295
-rw-r--r--src/capture/portal.c458
-rw-r--r--src/capture/xcomposite.c376
-rw-r--r--src/capture/xcomposite_cuda.c167
-rw-r--r--src/capture/xcomposite_vaapi.c121
9 files changed, 1377 insertions, 1569 deletions
diff --git a/src/capture/capture.c b/src/capture/capture.c
index 5e1f546..ec10854 100644
--- a/src/capture/capture.c
+++ b/src/capture/capture.c
@@ -1,20 +1,8 @@
#include "../../include/capture/capture.h"
-#include "../../include/egl.h"
-#include "../../include/cuda.h"
-#include "../../include/utils.h"
-#include <stdio.h>
-#include <stdint.h>
-#include <va/va.h>
-#include <va/va_drmcommon.h>
-#include <libavutil/frame.h>
-#include <libavutil/hwcontext_vaapi.h>
-#include <libavutil/hwcontext_cuda.h>
-#include <libavcodec/avcodec.h>
+#include <assert.h>
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- if(cap->started)
- return -1;
-
+ assert(!cap->started);
int res = cap->start(cap, video_codec_context, frame);
if(res == 0)
cap->started = true;
@@ -22,378 +10,48 @@ int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVF
return res;
}
-void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
- if(!cap->started) {
- fprintf(stderr, "gsr error: gsp_capture_tick failed: the gsr capture has not been started\n");
- return;
- }
-
+void gsr_capture_tick(gsr_capture *cap) {
+ assert(cap->started);
if(cap->tick)
- cap->tick(cap, video_codec_context);
+ cap->tick(cap);
}
-bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
- if(!cap->started) {
- fprintf(stderr, "gsr error: gsr_capture_should_stop failed: the gsr capture has not been started\n");
- return false;
- }
-
- if(!cap->should_stop)
- return false;
-
- return cap->should_stop(cap, err);
-}
-
-int gsr_capture_capture(gsr_capture *cap, AVFrame *frame) {
- if(!cap->started) {
- fprintf(stderr, "gsr error: gsr_capture_capture failed: the gsr capture has not been started\n");
- return -1;
- }
- return cap->capture(cap, frame);
+void gsr_capture_on_event(gsr_capture *cap, gsr_egl *egl) {
+ if(cap->on_event)
+ cap->on_event(cap, egl);
}
-void gsr_capture_end(gsr_capture *cap, AVFrame *frame) {
- if(!cap->started) {
- fprintf(stderr, "gsr error: gsr_capture_end failed: the gsr capture has not been started\n");
- return;
- }
-
- if(!cap->capture_end)
- return;
-
- cap->capture_end(cap, frame);
-}
-
-void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
- cap->destroy(cap, video_codec_context);
-}
-
-static uint32_t fourcc(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
- return (d << 24) | (c << 16) | (b << 8) | a;
-}
-
-bool gsr_capture_base_setup_vaapi_textures(gsr_capture_base *self, AVFrame *frame, VADisplay va_dpy, VADRMPRIMESurfaceDescriptor *prime, gsr_color_range color_range) {
- const int res = av_hwframe_get_buffer(self->video_codec_context->hw_frames_ctx, frame, 0);
- if(res < 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: av_hwframe_get_buffer failed: %d\n", res);
- return false;
- }
-
- VASurfaceID target_surface_id = (uintptr_t)frame->data[3];
-
- VAStatus va_status = vaExportSurfaceHandle(va_dpy, target_surface_id, VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2, VA_EXPORT_SURFACE_WRITE_ONLY | VA_EXPORT_SURFACE_SEPARATE_LAYERS, prime);
- if(va_status != VA_STATUS_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: vaExportSurfaceHandle failed, error: %d\n", va_status);
- return false;
- }
- vaSyncSurface(va_dpy, target_surface_id);
-
- self->egl->glGenTextures(1, &self->input_texture);
- self->egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->egl->glBindTexture(GL_TEXTURE_2D, 0);
-
- self->egl->glGenTextures(1, &self->cursor_texture);
- self->egl->glBindTexture(GL_TEXTURE_2D, self->cursor_texture);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->egl->glBindTexture(GL_TEXTURE_2D, 0);
-
- const uint32_t formats_nv12[2] = { fourcc('R', '8', ' ', ' '), fourcc('G', 'R', '8', '8') };
- const uint32_t formats_p010[2] = { fourcc('R', '1', '6', ' '), fourcc('G', 'R', '3', '2') };
-
- if(prime->fourcc == VA_FOURCC_NV12 || prime->fourcc == VA_FOURCC_P010) {
- const uint32_t *formats = prime->fourcc == VA_FOURCC_NV12 ? formats_nv12 : formats_p010;
- const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
-
- self->egl->glGenTextures(2, self->target_textures);
- for(int i = 0; i < 2; ++i) {
- const int layer = i;
- const int plane = 0;
-
- const uint64_t modifier = prime->objects[prime->layers[layer].object_index[plane]].drm_format_modifier;
- const intptr_t img_attr[] = {
- EGL_LINUX_DRM_FOURCC_EXT, formats[i],
- EGL_WIDTH, prime->width / div[i],
- EGL_HEIGHT, prime->height / div[i],
- EGL_DMA_BUF_PLANE0_FD_EXT, prime->objects[prime->layers[layer].object_index[plane]].fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, prime->layers[layer].offset[plane],
- EGL_DMA_BUF_PLANE0_PITCH_EXT, prime->layers[layer].pitch[plane],
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
- EGL_NONE
- };
-
- while(self->egl->eglGetError() != EGL_SUCCESS){}
- EGLImage image = self->egl->eglCreateImage(self->egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
- if(!image) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create egl image from drm fd for output drm fd, error: %d\n", self->egl->eglGetError());
- return false;
- }
-
- self->egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
-
- while(self->egl->glGetError()) {}
- while(self->egl->eglGetError() != EGL_SUCCESS){}
- self->egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
- if(self->egl->glGetError() != 0 || self->egl->eglGetError() != EGL_SUCCESS) {
- // TODO: Get the error properly
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to bind egl image to gl texture, error: %d\n", self->egl->eglGetError());
- self->egl->eglDestroyImage(self->egl->egl_display, image);
- self->egl->glBindTexture(GL_TEXTURE_2D, 0);
- return false;
- }
-
- self->egl->eglDestroyImage(self->egl->egl_display, image);
- self->egl->glBindTexture(GL_TEXTURE_2D, 0);
- }
-
- gsr_color_conversion_params color_conversion_params = {0};
- color_conversion_params.color_range = color_range;
- color_conversion_params.egl = self->egl;
- color_conversion_params.source_color = GSR_SOURCE_COLOR_RGB;
- if(prime->fourcc == VA_FOURCC_NV12)
- color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
- else
- color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
-
- color_conversion_params.destination_textures[0] = self->target_textures[0];
- color_conversion_params.destination_textures[1] = self->target_textures[1];
- color_conversion_params.num_destination_textures = 2;
-
- if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: failed to create color conversion\n");
- return false;
- }
-
- gsr_color_conversion_clear(&self->color_conversion);
-
- return true;
- } else {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_vaapi_textures: unexpected fourcc %u for output drm fd, expected nv12 or p010\n", prime->fourcc);
+bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
+ assert(cap->started);
+ if(cap->should_stop)
+ return cap->should_stop(cap, err);
+ else
return false;
- }
}
-static unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format) {
- unsigned int texture_id = 0;
- egl->glGenTextures(1, &texture_id);
- egl->glBindTexture(GL_TEXTURE_2D, texture_id);
- egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
-
- egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
-
- egl->glBindTexture(GL_TEXTURE_2D, 0);
- return texture_id;
+int gsr_capture_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ assert(cap->started);
+ return cap->capture(cap, frame, color_conversion);
}
-static bool cuda_register_opengl_texture(gsr_cuda *cuda, CUgraphicsResource *cuda_graphics_resource, CUarray *mapped_array, unsigned int texture_id) {
- CUresult res;
- res = cuda->cuGraphicsGLRegisterImage(cuda_graphics_resource, texture_id, GL_TEXTURE_2D, CU_GRAPHICS_REGISTER_FLAGS_NONE);
- if (res != CUDA_SUCCESS) {
- const char *err_str = "unknown";
- cuda->cuGetErrorString(res, &err_str);
- fprintf(stderr, "gsr error: cuda_register_opengl_texture: cuGraphicsGLRegisterImage failed, error: %s, texture " "id: %u\n", err_str, texture_id);
- return false;
- }
-
- res = cuda->cuGraphicsResourceSetMapFlags(*cuda_graphics_resource, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
- res = cuda->cuGraphicsMapResources(1, cuda_graphics_resource, 0);
-
- res = cuda->cuGraphicsSubResourceGetMappedArray(mapped_array, *cuda_graphics_resource, 0, 0);
- return true;
+gsr_source_color gsr_capture_get_source_color(gsr_capture *cap) {
+ return cap->get_source_color(cap);
}
-bool gsr_capture_base_setup_cuda_textures(gsr_capture_base *self, AVFrame *frame, gsr_cuda_context *cuda_context, gsr_color_range color_range, gsr_source_color source_color, bool hdr) {
- // TODO:
- const int res = av_hwframe_get_buffer(self->video_codec_context->hw_frames_ctx, frame, 0);
- if(res < 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: av_hwframe_get_buffer failed: %d\n", res);
- return false;
- }
-
- self->egl->glGenTextures(1, &self->input_texture);
- self->egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->egl->glBindTexture(GL_TEXTURE_2D, 0);
-
- self->egl->glGenTextures(1, &self->cursor_texture);
- self->egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->cursor_texture);
- self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
-
- const unsigned int internal_formats_nv12[2] = { GL_R8, GL_RG8 };
- const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
- const unsigned int formats[2] = { GL_RED, GL_RG };
- const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
-
- for(int i = 0; i < 2; ++i) {
- self->target_textures[i] = gl_create_texture(self->egl, self->video_codec_context->width / div[i], self->video_codec_context->height / div[i], !hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
- if(self->target_textures[i] == 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
- return false;
- }
-
- if(!cuda_register_opengl_texture(cuda_context->cuda, &cuda_context->cuda_graphics_resources[i], &cuda_context->mapped_arrays[i], self->target_textures[i])) {
- return false;
- }
- }
-
- gsr_color_conversion_params color_conversion_params = {0};
- color_conversion_params.color_range = color_range;
- color_conversion_params.egl = self->egl;
- color_conversion_params.source_color = source_color;
- if(!hdr)
- color_conversion_params.destination_color = GSR_DESTINATION_COLOR_NV12;
+bool gsr_capture_uses_external_image(gsr_capture *cap) {
+ if(cap->uses_external_image)
+ return cap->uses_external_image(cap);
else
- color_conversion_params.destination_color = GSR_DESTINATION_COLOR_P010;
-
- color_conversion_params.destination_textures[0] = self->target_textures[0];
- color_conversion_params.destination_textures[1] = self->target_textures[1];
- color_conversion_params.num_destination_textures = 2;
- color_conversion_params.load_external_image_shader = true;
-
- if(gsr_color_conversion_init(&self->color_conversion, &color_conversion_params) != 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create color conversion\n");
return false;
- }
-
- gsr_color_conversion_clear(&self->color_conversion);
-
- return true;
-}
-
-void gsr_capture_base_stop(gsr_capture_base *self) {
- gsr_color_conversion_deinit(&self->color_conversion);
-
- if(self->egl->egl_context) {
- if(self->input_texture) {
- self->egl->glDeleteTextures(1, &self->input_texture);
- self->input_texture = 0;
- }
-
- if(self->cursor_texture) {
- self->egl->glDeleteTextures(1, &self->cursor_texture);
- self->cursor_texture = 0;
- }
-
- self->egl->glDeleteTextures(2, self->target_textures);
- self->target_textures[0] = 0;
- self->target_textures[1] = 0;
- }
-
- if(self->video_codec_context->hw_device_ctx)
- av_buffer_unref(&self->video_codec_context->hw_device_ctx);
- if(self->video_codec_context->hw_frames_ctx)
- av_buffer_unref(&self->video_codec_context->hw_frames_ctx);
}
-bool drm_create_codec_context(const char *card_path, AVCodecContext *video_codec_context, int width, int height, bool hdr, VADisplay *va_dpy) {
- char render_path[128];
- if(!gsr_card_path_get_render_path(card_path, render_path)) {
- fprintf(stderr, "gsr error: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
- return false;
- }
-
- AVBufferRef *device_ctx;
- if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
- fprintf(stderr, "Error: Failed to create hardware device context\n");
- return false;
- }
-
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
- if(!frame_context) {
- fprintf(stderr, "Error: Failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
- return false;
- }
-
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
- hw_frame_context->width = width;
- hw_frame_context->height = height;
- hw_frame_context->sw_format = hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
- hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
-
- //hw_frame_context->initial_pool_size = 20;
-
- AVVAAPIDeviceContext *vactx =((AVHWDeviceContext*)device_ctx->data)->hwctx;
- *va_dpy = vactx->display;
-
- if (av_hwframe_ctx_init(frame_context) < 0) {
- fprintf(stderr, "Error: Failed to initialize hardware frame context "
- "(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
- //av_buffer_unref(&frame_context);
+bool gsr_capture_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ if(cap->set_hdr_metadata)
+ return cap->set_hdr_metadata(cap, mastering_display_metadata, light_metadata);
+ else
return false;
- }
-
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
- video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
- return true;
}
-bool cuda_create_codec_context(CUcontext cu_ctx, AVCodecContext *video_codec_context, int width, int height, bool hdr, CUstream *cuda_stream) {
- AVBufferRef *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
- if(!device_ctx) {
- fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hardware device context\n");
- return false;
- }
-
- AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)device_ctx->data;
- AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext*)hw_device_context->hwctx;
- cuda_device_context->cuda_ctx = cu_ctx;
- if(av_hwdevice_ctx_init(device_ctx) < 0) {
- fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hardware device context\n");
- av_buffer_unref(&device_ctx);
- return false;
- }
-
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
- if(!frame_context) {
- fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
- return false;
- }
-
- AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
- hw_frame_context->width = width;
- hw_frame_context->height = height;
- hw_frame_context->sw_format = hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
- hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
-
- if (av_hwframe_ctx_init(frame_context) < 0) {
- fprintf(stderr, "gsr error: cuda_create_codec_context failed: failed to initialize hardware frame context "
- "(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
- //av_buffer_unref(&frame_context);
- return false;
- }
-
- *cuda_stream = cuda_device_context->stream;
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
- video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
- return true;
+void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ cap->destroy(cap, video_codec_context);
}
diff --git a/src/capture/kms.c b/src/capture/kms.c
index ec83cab..8b16ec9 100644
--- a/src/capture/kms.c
+++ b/src/capture/kms.c
@@ -1,19 +1,142 @@
#include "../../include/capture/kms.h"
-#include "../../include/capture/capture.h"
#include "../../include/utils.h"
+#include "../../include/color_conversion.h"
+#include "../../include/cursor.h"
+#include "../../kms/client/kms_client.h"
+
+#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
+#include <fcntl.h>
+
+#include <xf86drm.h>
+#include <libdrm/drm_fourcc.h>
+
#include <libavcodec/avcodec.h>
#include <libavutil/mastering_display_metadata.h>
+#include <libavformat/avformat.h>
#define HDMI_STATIC_METADATA_TYPE1 0
#define HDMI_EOTF_SMPTE_ST2084 2
+#define MAX_CONNECTOR_IDS 32
+
+typedef struct {
+ uint32_t connector_ids[MAX_CONNECTOR_IDS];
+ int num_connector_ids;
+} MonitorId;
+
+typedef struct {
+ gsr_capture_kms_params params;
+
+ gsr_kms_client kms_client;
+ gsr_kms_response kms_response;
+
+ vec2i capture_pos;
+ vec2i capture_size;
+ MonitorId monitor_id;
+
+ gsr_monitor_rotation monitor_rotation;
+
+ unsigned int input_texture_id;
+ unsigned int external_input_texture_id;
+ unsigned int cursor_texture_id;
+
+ bool no_modifiers_fallback;
+ bool external_texture_fallback;
+
+ struct hdr_output_metadata hdr_metadata;
+ bool hdr_metadata_set;
+
+ bool is_x11;
+ gsr_cursor x11_cursor;
+
+ AVCodecContext *video_codec_context;
+ bool performance_error_shown;
+ bool fast_path_failed;
+
+ //int drm_fd;
+ //uint64_t prev_sequence;
+ //bool damaged;
+
+ vec2i prev_target_pos;
+ vec2i prev_plane_size;
+} gsr_capture_kms;
+
+static void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
+ for(int i = 0; i < self->kms_response.num_items; ++i) {
+ for(int j = 0; j < self->kms_response.items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &self->kms_response.items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ self->kms_response.items[i].num_dma_bufs = 0;
+ }
+ self->kms_response.num_items = 0;
+}
+
+static void gsr_capture_kms_stop(gsr_capture_kms *self) {
+ if(self->input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->input_texture_id);
+ self->input_texture_id = 0;
+ }
+
+ if(self->external_input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->external_input_texture_id);
+ self->external_input_texture_id = 0;
+ }
+
+ if(self->cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->cursor_texture_id);
+ self->cursor_texture_id = 0;
+ }
+
+ // if(self->drm_fd > 0) {
+ // close(self->drm_fd);
+ // self->drm_fd = -1;
+ // }
+
+ gsr_capture_kms_cleanup_kms_fds(self);
+ gsr_kms_client_deinit(&self->kms_client);
+ gsr_cursor_deinit(&self->x11_cursor);
+}
+
static int max_int(int a, int b) {
return a > b ? a : b;
}
+static void gsr_capture_kms_create_input_texture_ids(gsr_capture_kms *self) {
+ self->params.egl->glGenTextures(1, &self->input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->params.egl->glGenTextures(1, &self->external_input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->external_input_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const int cursor_texture_id_target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+
+ self->params.egl->glGenTextures(1, &self->cursor_texture_id);
+ self->params.egl->glBindTexture(cursor_texture_id_target, self->cursor_texture_id);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(cursor_texture_id_target, 0);
+}
+
/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
typedef struct {
@@ -39,75 +162,98 @@ static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
fprintf(stderr, "gsr warning: reached max connector ids\n");
}
-int gsr_capture_kms_start(gsr_capture_kms *self, const char *display_to_capture, gsr_egl *egl, AVCodecContext *video_codec_context, AVFrame *frame) {
- memset(self, 0, sizeof(*self));
- self->base.video_codec_context = video_codec_context;
- self->base.egl = egl;
+static vec2i rotate_capture_size_if_rotated(gsr_capture_kms *self, vec2i capture_size) {
+ if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
+ int tmp_x = capture_size.x;
+ capture_size.x = capture_size.y;
+ capture_size.y = tmp_x;
+ }
+ return capture_size;
+}
+
+static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_kms *self = cap->priv;
+
+ gsr_capture_kms_create_input_texture_ids(self);
gsr_monitor monitor;
self->monitor_id.num_connector_ids = 0;
- int kms_init_res = gsr_kms_client_init(&self->kms_client, egl->card_path);
+ int kms_init_res = gsr_kms_client_init(&self->kms_client, self->params.egl->card_path);
if(kms_init_res != 0)
return kms_init_res;
+ self->is_x11 = gsr_egl_get_display_server(self->params.egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = self->is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+ if(self->is_x11)
+ gsr_cursor_init(&self->x11_cursor, self->params.egl, self->params.egl->x11.dpy);
+
MonitorCallbackUserdata monitor_callback_userdata = {
&self->monitor_id,
- display_to_capture, strlen(display_to_capture),
+ self->params.display_to_capture, strlen(self->params.display_to_capture),
0,
};
- for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
+ for_each_active_monitor_output(self->params.egl, connection_type, monitor_callback, &monitor_callback_userdata);
- if(!get_monitor_by_name(egl, GSR_CONNECTION_DRM, display_to_capture, &monitor)) {
- fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", display_to_capture);
+ if(!get_monitor_by_name(self->params.egl, connection_type, self->params.display_to_capture, &monitor)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
+ gsr_capture_kms_stop(self);
return -1;
}
- monitor.name = display_to_capture;
- self->monitor_rotation = drm_monitor_get_display_server_rotation(egl, &monitor);
+ monitor.name = self->params.display_to_capture;
+ self->monitor_rotation = drm_monitor_get_display_server_rotation(self->params.egl, &monitor);
self->capture_pos = monitor.pos;
- if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
- self->capture_size.x = monitor.size.y;
- self->capture_size.y = monitor.size.x;
- } else {
+ /* Monitor size is already rotated on x11 when the monitor is rotated, no need to apply it ourselves */
+ if(self->is_x11)
self->capture_size = monitor.size;
- }
+ else
+ self->capture_size = rotate_capture_size_if_rotated(self, monitor.size);
/* Disable vsync */
- egl->eglSwapInterval(egl->egl_display, 0);
-
- // TODO: Move this and xcomposite equivalent to a common section unrelated to capture method
- if(egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
- // TODO: dont do this if using ffmpeg reports that this is not needed (AMD driver bug that was fixed recently)
- self->base.video_codec_context->width = FFALIGN(self->capture_size.x, 64);
- self->base.video_codec_context->height = FFALIGN(self->capture_size.y, 16);
- } else if(egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
- // TODO: Dont do this for VCN 5 and forward which should fix this hardware bug
- self->base.video_codec_context->width = FFALIGN(self->capture_size.x, 64);
- // AMD driver has special case handling for 1080 height to set it to 1082 instead of 1088 (1080 aligned to 16).
- // TODO: Set height to 1082 in this case, but it wont work because it will be aligned to 1088.
- if(self->capture_size.y == 1080) {
- self->base.video_codec_context->height = 1080;
- } else {
- self->base.video_codec_context->height = FFALIGN(self->capture_size.y, 16);
- }
- } else {
- self->base.video_codec_context->width = FFALIGN(self->capture_size.x, 2);
- self->base.video_codec_context->height = FFALIGN(self->capture_size.y, 2);
- }
+ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
+
+ video_codec_context->width = FFALIGN(self->capture_size.x, 2);
+ video_codec_context->height = FFALIGN(self->capture_size.y, 2);
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
- frame->width = self->base.video_codec_context->width;
- frame->height = self->base.video_codec_context->height;
+ self->video_codec_context = video_codec_context;
return 0;
}
-void gsr_capture_kms_stop(gsr_capture_kms *self) {
- gsr_capture_kms_cleanup_kms_fds(self);
- gsr_kms_client_deinit(&self->kms_client);
- gsr_capture_base_stop(&self->base);
+static void gsr_capture_kms_on_event(gsr_capture *cap, gsr_egl *egl) {
+ gsr_capture_kms *self = cap->priv;
+ if(!self->is_x11)
+ return;
+
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ gsr_cursor_on_event(&self->x11_cursor, xev);
}
+// TODO: This is disabled for now because we want to be able to record at a framerate higher than the monitor framerate
+// static void gsr_capture_kms_tick(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+
+// if(self->drm_fd <= 0)
+// self->drm_fd = open(self->params.egl->card_path, O_RDONLY);
+
+// if(self->drm_fd <= 0)
+// return;
+
+// uint64_t sequence = 0;
+// uint64_t ns = 0;
+// if(drmCrtcGetSequence(self->drm_fd, 79, &sequence, &ns) != 0)
+// return;
+
+// if(sequence != self->prev_sequence) {
+// self->prev_sequence = sequence;
+// self->damaged = true;
+// }
+// }
+
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
switch(rot) {
case GSR_MONITOR_ROT_0: return 0.0f;
@@ -118,54 +264,40 @@ static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
return 0.0f;
}
-/* Prefer non combined planes */
-static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
- int index_combined = -1;
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
- if(kms_response->fds[i].is_combined_plane)
- index_combined = i;
- else
- return &kms_response->fds[i];
- }
- }
-
- if(index_combined != -1)
- return &kms_response->fds[index_combined];
- else
- return NULL;
-}
-
-static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].connector_id == connector_id && !kms_response->items[i].is_cursor)
+ return &kms_response->items[i];
}
return NULL;
}
-static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
- if(kms_response->num_fds == 0)
+static gsr_kms_response_item* find_largest_drm(gsr_kms_response *kms_response) {
+ if(kms_response->num_items == 0)
return NULL;
int64_t largest_size = 0;
- gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
- for(int i = 0; i < kms_response->num_fds; ++i) {
- const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
- if(size > largest_size && !kms_response->fds[i].is_cursor) {
+ gsr_kms_response_item *largest_drm = &kms_response->items[0];
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ const int64_t size = (int64_t)kms_response->items[i].width * (int64_t)kms_response->items[i].height;
+ if(size > largest_size && !kms_response->items[i].is_cursor) {
largest_size = size;
- largest_drm = &kms_response->fds[i];
+ largest_drm = &kms_response->items[i];
}
}
return largest_drm;
}
-static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_cursor_drm(gsr_kms_response *kms_response, uint32_t connector_id) {
+ gsr_kms_response_item *cursor_drm = NULL;
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].is_cursor) {
+ cursor_drm = &kms_response->items[i];
+ if(kms_response->items[i].connector_id == connector_id)
+ break;
+ }
}
- return NULL;
+ return cursor_drm;
}
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
@@ -174,33 +306,13 @@ static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *h
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
}
-static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
- if(!self->mastering_display_metadata)
- self->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
-
- if(!self->light_metadata)
- self->light_metadata = av_content_light_metadata_create_side_data(frame);
-
- if(self->mastering_display_metadata) {
- for(int i = 0; i < 3; ++i) {
- self->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
- self->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
- }
-
- self->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
- self->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
-
- self->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
- self->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
-
- self->mastering_display_metadata->has_primaries = self->mastering_display_metadata->display_primaries[0][0].num > 0;
- self->mastering_display_metadata->has_luminance = self->mastering_display_metadata->max_luminance.num > 0;
- }
+// TODO: Check if this hdr data can be changed after the call to av_packet_side_data_add
+static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ if(self->hdr_metadata_set)
+ return;
- if(self->light_metadata) {
- self->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
- self->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
- }
+ self->hdr_metadata_set = true;
+ self->hdr_metadata = drm_fd->hdr_metadata;
}
static vec2i swap_vec2i(vec2i value) {
@@ -210,29 +322,90 @@ static vec2i swap_vec2i(vec2i value) {
return value;
}
-bool gsr_capture_kms_capture(gsr_capture_kms *self, AVFrame *frame, bool hdr, bool screen_plane_use_modifiers, bool cursor_texture_is_external, bool record_cursor) {
- //egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->base.egl->glClear(0);
+static EGLImage gsr_capture_kms_create_egl_image(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, drm_fd->pixel_format, drm_fd->width, drm_fd->height, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs, use_modifiers);
+ while(self->params.egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image || self->params.egl->eglGetError() != EGL_SUCCESS) {
+ if(image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ return NULL;
+ }
+ return image;
+}
- gsr_capture_kms_cleanup_kms_fds(self);
+static EGLImage gsr_capture_kms_create_egl_image_with_fallback(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
+ // Even ffmpeg kmsgrab causes this crash. The error is:
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
+ // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
+ // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
+ // Error: avcodec_send_frame failed, error: Input/output error
+ // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
+ // kms server info: kms client shutdown, shutting down the server
- gsr_kms_response_fd *drm_fd = NULL;
- gsr_kms_response_fd *cursor_drm_fd = NULL;
- bool capture_is_combined_plane = false;
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
- if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
- return false;
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
}
- if(self->kms_response.num_fds == 0) {
- static bool error_shown = false;
- if(!error_shown) {
- error_shown = true;
- fprintf(stderr, "gsr error: no drm found, capture will fail\n");
+ EGLImage image = NULL;
+ if(self->no_modifiers_fallback) {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
+ } else {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, true);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
+ self->no_modifiers_fallback = true;
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
}
- return false;
}
+ return image;
+}
+
+static bool gsr_capture_kms_bind_image_to_texture(gsr_capture_kms *self, EGLImage image, unsigned int texture_id, bool external_texture) {
+ const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ while(self->params.egl->glGetError() != 0){}
+ self->params.egl->glBindTexture(texture_target, texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(texture_target, image);
+ const bool success = self->params.egl->glGetError() == 0;
+ self->params.egl->glBindTexture(texture_target, 0);
+ return success;
+}
+
+static void gsr_capture_kms_bind_image_to_input_texture_with_fallback(gsr_capture_kms *self, EGLImage image) {
+ if(self->external_texture_fallback) {
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
+ } else {
+ if(!gsr_capture_kms_bind_image_to_texture(self, image, self->input_texture_id, false)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to bind image to texture, trying with external texture\n");
+ self->external_texture_fallback = true;
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
+ }
+ }
+}
+
+static gsr_kms_response_item* find_monitor_drm(gsr_capture_kms *self, bool *capture_is_combined_plane) {
+ *capture_is_combined_plane = false;
+ gsr_kms_response_item *drm_fd = NULL;
for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
drm_fd = find_drm_by_connector_id(&self->kms_response, self->monitor_id.connector_ids[i]);
@@ -242,156 +415,324 @@ bool gsr_capture_kms_capture(gsr_capture_kms *self, AVFrame *frame, bool hdr, bo
// Will never happen on wayland unless the target monitor has been disconnected
if(!drm_fd) {
- drm_fd = find_first_combined_drm(&self->kms_response);
- if(!drm_fd)
- drm_fd = find_largest_drm(&self->kms_response);
- capture_is_combined_plane = true;
+ drm_fd = find_largest_drm(&self->kms_response);
+ *capture_is_combined_plane = true;
}
- cursor_drm_fd = find_cursor_drm(&self->kms_response);
-
- if(!drm_fd)
- return false;
+ return drm_fd;
+}
- if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
+static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *self, uint32_t monitor_connector_id, bool capture_is_combined_plane) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm(&self->kms_response, monitor_connector_id);
+ if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != monitor_connector_id)
cursor_drm_fd = NULL;
+ return cursor_drm_fd;
+}
- if(drm_fd->has_hdr_metadata && hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
- gsr_kms_set_hdr_metadata(self, frame, drm_fd);
+static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation) {
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
- // Even ffmpeg kmsgrab causes this crash. The error is:
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
- // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
- // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
- // Error: avcodec_send_frame failed, error: Input/output error
- // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
- // kms server info: kms client shutdown, shutting down the server
- intptr_t img_attr[18] = {
- EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
- EGL_WIDTH, drm_fd->width,
- EGL_HEIGHT, drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
+ vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
+ switch(self->monitor_rotation) {
+ case GSR_MONITOR_ROT_0:
+ break;
+ case GSR_MONITOR_ROT_90:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ break;
+ case GSR_MONITOR_ROT_180:
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ cursor_pos.y -= cursor_size.y;
+ break;
+ case GSR_MONITOR_ROT_270:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.y -= cursor_size.y;
+ break;
+ }
+
+ cursor_pos.x += target_pos.x;
+ cursor_pos.y += target_pos.y;
+
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
+
+ for(int i = 0; i < cursor_drm_fd->num_dma_bufs; ++i) {
+ fds[i] = cursor_drm_fd->dma_buf[i].fd;
+ offsets[i] = cursor_drm_fd->dma_buf[i].offset;
+ pitches[i] = cursor_drm_fd->dma_buf[i].pitch;
+ modifiers[i] = cursor_drm_fd->modifier;
+ }
+
+ intptr_t img_attr_cursor[44];
+ setup_dma_buf_attrs(img_attr_cursor, cursor_drm_fd->pixel_format, cursor_drm_fd->width, cursor_drm_fd->height,
+ fds, offsets, pitches, modifiers, cursor_drm_fd->num_dma_bufs, true);
+
+ EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
+ const int target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ self->params.egl->glBindTexture(target, self->cursor_texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
+ self->params.egl->glBindTexture(target, 0);
+
+ if(cursor_image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+
+ gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
+ cursor_pos, cursor_size,
+ (vec2i){0, 0}, cursor_size,
+ texture_rotation, cursor_texture_id_is_external);
+
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos) {
+ if(!self->x11_cursor.visible)
+ return;
+
+ gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy));
+
+ const vec2i cursor_pos = {
+ target_pos.x + self->x11_cursor.position.x - self->x11_cursor.hotspot.x - capture_pos.x,
+ target_pos.y + self->x11_cursor.position.y - self->x11_cursor.hotspot.y - capture_pos.y
};
- if(screen_plane_use_modifiers) {
- img_attr[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- img_attr[13] = drm_fd->modifier & 0xFFFFFFFFULL;
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
- img_attr[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- img_attr[15] = drm_fd->modifier >> 32ULL;
+ gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
+ cursor_pos, self->x11_cursor.size,
+ (vec2i){0, 0}, self->x11_cursor.size,
+ 0.0f, false);
- img_attr[16] = EGL_NONE;
- img_attr[17] = EGL_NONE;
- } else {
- img_attr[12] = EGL_NONE;
- img_attr[13] = EGL_NONE;
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void gsr_capture_kms_update_capture_size_change(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i target_pos, const gsr_kms_response_item *drm_fd) {
+ if(target_pos.x != self->prev_target_pos.x || target_pos.y != self->prev_target_pos.y || drm_fd->src_w != self->prev_plane_size.x || drm_fd->src_h != self->prev_plane_size.y) {
+ self->prev_target_pos = target_pos;
+ self->prev_plane_size = self->capture_size;
+ gsr_color_conversion_clear(color_conversion);
+ }
+}
+
+static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_capture_kms *self = cap->priv;
+
+ gsr_capture_kms_cleanup_kms_fds(self);
+
+ if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
+ return -1;
+ }
+
+ if(self->kms_response.num_items == 0) {
+ static bool error_shown = false;
+ if(!error_shown) {
+ error_shown = true;
+ fprintf(stderr, "gsr error: no drm found, capture will fail\n");
+ }
+ return -1;
+ }
+
+ bool capture_is_combined_plane = false;
+ const gsr_kms_response_item *drm_fd = find_monitor_drm(self, &capture_is_combined_plane);
+ if(!drm_fd) {
+ gsr_capture_kms_cleanup_kms_fds(self);
+ return -1;
}
- EGLImage image = self->base.egl->eglCreateImage(self->base.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
- self->base.egl->glBindTexture(GL_TEXTURE_2D, self->base.input_texture);
- self->base.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
- self->base.egl->eglDestroyImage(self->base.egl->egl_display, image);
- self->base.egl->glBindTexture(GL_TEXTURE_2D, 0);
+ if(drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
+ gsr_kms_set_hdr_metadata(self, drm_fd);
+
+ if(!self->performance_error_shown && self->monitor_rotation != GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ self->performance_error_shown = true;
+ fprintf(stderr,"gsr warning: gsr_capture_kms_capture: the monitor you are recording is rotated, composition will have to be used."
+ " If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n");
+ }
+
+ const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+ self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h });
+ gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd);
vec2i capture_pos = self->capture_pos;
if(!capture_is_combined_plane)
capture_pos = (vec2i){drm_fd->x, drm_fd->y};
- const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && self->monitor_rotation == GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, self->capture_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
- const int target_x = max_int(0, frame->width / 2 - self->capture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->capture_size.y / 2);
+ if(self->fast_path_failed) {
+ EGLImage image = gsr_capture_kms_create_egl_image_with_fallback(self, drm_fd);
+ if(image) {
+ gsr_capture_kms_bind_image_to_input_texture_with_fallback(self, image);
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ }
- gsr_color_conversion_draw(&self->base.color_conversion, self->base.input_texture,
- (vec2i){target_x, target_y}, self->capture_size,
- capture_pos, self->capture_size,
- texture_rotation, false);
+ gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
+ target_pos, self->capture_size,
+ capture_pos, self->capture_size,
+ texture_rotation, self->external_texture_fallback);
+ }
- if(record_cursor && cursor_drm_fd) {
- const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
- switch(self->monitor_rotation) {
- case GSR_MONITOR_ROT_0:
- break;
- case GSR_MONITOR_ROT_90:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- break;
- case GSR_MONITOR_ROT_180:
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- cursor_pos.y -= cursor_size.y;
- break;
- case GSR_MONITOR_ROT_270:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.y -= cursor_size.y;
- break;
+ if(self->params.record_cursor) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm_if_on_monitor(self, drm_fd->connector_id, capture_is_combined_plane);
+ // The cursor is handled by x11 on x11 instead of using the cursor drm plane because on prime systems with a dedicated nvidia gpu
+ // the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
+ if(self->is_x11) {
+ const vec2i cursor_monitor_offset = self->capture_pos;
+ render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos);
+ } else if(cursor_drm_fd) {
+ render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation);
}
+ }
- cursor_pos.x += target_x;
- cursor_pos.y += target_y;
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
- const intptr_t img_attr_cursor[] = {
- EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
- EGL_WIDTH, cursor_drm_fd->width,
- EGL_HEIGHT, cursor_drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
- EGL_NONE
- };
+ gsr_capture_kms_cleanup_kms_fds(self);
- EGLImage cursor_image = self->base.egl->eglCreateImage(self->base.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
- const int target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
- self->base.egl->glBindTexture(target, self->base.cursor_texture);
- self->base.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
- self->base.egl->eglDestroyImage(self->base.egl->egl_display, cursor_image);
- self->base.egl->glBindTexture(target, 0);
+ return 0;
+}
+
+static bool gsr_capture_kms_should_stop(gsr_capture *cap, bool *err) {
+ (void)cap;
+ if(err)
+ *err = false;
+ return false;
+}
+
+static gsr_source_color gsr_capture_kms_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_RGB;
+}
+
+static bool gsr_capture_kms_uses_external_image(gsr_capture *cap) {
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_kms_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ gsr_capture_kms *self = cap->priv;
- self->base.egl->glEnable(GL_SCISSOR_TEST);
- self->base.egl->glScissor(target_x, target_y, self->capture_size.x, self->capture_size.y);
+ if(!self->hdr_metadata_set)
+ return false;
- gsr_color_conversion_draw(&self->base.color_conversion, self->base.cursor_texture,
- cursor_pos, cursor_size,
- (vec2i){0, 0}, cursor_size,
- texture_rotation, cursor_texture_is_external);
+ light_metadata->MaxCLL = self->hdr_metadata.hdmi_metadata_type1.max_cll;
+ light_metadata->MaxFALL = self->hdr_metadata.hdmi_metadata_type1.max_fall;
- self->base.egl->glDisable(GL_SCISSOR_TEST);
+ for(int i = 0; i < 3; ++i) {
+ mastering_display_metadata->display_primaries[i][0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
+ mastering_display_metadata->display_primaries[i][1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
}
- self->base.egl->eglSwapBuffers(self->base.egl->egl_display, self->base.egl->egl_surface);
- //self->base.egl->glFlush();
- //self->base.egl->glFinish();
+ mastering_display_metadata->white_point[0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
+ mastering_display_metadata->white_point[1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
+
+ mastering_display_metadata->min_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
+ mastering_display_metadata->max_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
+
+ mastering_display_metadata->has_primaries = mastering_display_metadata->display_primaries[0][0].num > 0;
+ mastering_display_metadata->has_luminance = mastering_display_metadata->max_luminance.num > 0;
return true;
}
-void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
- for(int i = 0; i < self->kms_response.num_fds; ++i) {
- if(self->kms_response.fds[i].fd > 0)
- close(self->kms_response.fds[i].fd);
- self->kms_response.fds[i].fd = 0;
+// static bool gsr_capture_kms_is_damaged(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// return self->damaged;
+// }
+
+// static void gsr_capture_kms_clear_damage(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// self->damaged = false;
+// }
+
+static void gsr_capture_kms_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_kms *self = cap->priv;
+ if(cap->priv) {
+ gsr_capture_kms_stop(self);
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
+ free(cap->priv);
+ cap->priv = NULL;
}
- self->kms_response.num_fds = 0;
+ free(cap);
+}
+
+gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_kms *cap_kms = calloc(1, sizeof(gsr_capture_kms));
+ if(!cap_kms) {
+ free(cap);
+ return NULL;
+ }
+
+ const char *display_to_capture = strdup(params->display_to_capture);
+ if(!display_to_capture) {
+ free(cap);
+ free(cap_kms);
+ return NULL;
+ }
+
+ cap_kms->params = *params;
+ cap_kms->params.display_to_capture = display_to_capture;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_kms_start,
+ .on_event = gsr_capture_kms_on_event,
+ //.tick = gsr_capture_kms_tick,
+ .should_stop = gsr_capture_kms_should_stop,
+ .capture = gsr_capture_kms_capture,
+ .get_source_color = gsr_capture_kms_get_source_color,
+ .uses_external_image = gsr_capture_kms_uses_external_image,
+ .set_hdr_metadata = gsr_capture_kms_set_hdr_metadata,
+ //.is_damaged = gsr_capture_kms_is_damaged,
+ //.clear_damage = gsr_capture_kms_clear_damage,
+ .destroy = gsr_capture_kms_destroy,
+ .priv = cap_kms
+ };
+
+ return cap;
}
diff --git a/src/capture/kms_cuda.c b/src/capture/kms_cuda.c
deleted file mode 100644
index a9f1f8e..0000000
--- a/src/capture/kms_cuda.c
+++ /dev/null
@@ -1,181 +0,0 @@
-#include "../../include/capture/kms_cuda.h"
-#include "../../include/capture/kms.h"
-#include "../../include/cuda.h"
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <assert.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/hwcontext_cuda.h>
-#include <libavcodec/avcodec.h>
-
-typedef struct {
- gsr_capture_kms kms;
-
- gsr_capture_kms_cuda_params params;
-
- gsr_cuda cuda;
- CUgraphicsResource cuda_graphics_resources[2];
- CUarray mapped_arrays[2];
- CUstream cuda_stream;
-} gsr_capture_kms_cuda;
-
-static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
-
-static int gsr_capture_kms_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_kms_cuda *cap_kms = cap->priv;
-
- const int res = gsr_capture_kms_start(&cap_kms->kms, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context, frame);
- if(res != 0) {
- gsr_capture_kms_cuda_stop(cap, video_codec_context);
- return res;
- }
-
- // TODO: overclocking is not supported on wayland...
- if(!gsr_cuda_load(&cap_kms->cuda, NULL, false)) {
- fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to load cuda\n");
- gsr_capture_kms_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- if(!cuda_create_codec_context(cap_kms->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, cap_kms->params.hdr, &cap_kms->cuda_stream)) {
- gsr_capture_kms_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- gsr_cuda_context cuda_context = {
- .cuda = &cap_kms->cuda,
- .cuda_graphics_resources = cap_kms->cuda_graphics_resources,
- .mapped_arrays = cap_kms->mapped_arrays
- };
-
- if(!gsr_capture_base_setup_cuda_textures(&cap_kms->kms.base, frame, &cuda_context, cap_kms->params.color_range, GSR_SOURCE_COLOR_RGB, cap_kms->params.hdr)) {
- gsr_capture_kms_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- return 0;
-}
-
-static bool gsr_capture_kms_cuda_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_kms_cuda *cap_kms = cap->priv;
- if(cap_kms->kms.should_stop) {
- if(err)
- *err = cap_kms->kms.stop_is_error;
- return true;
- }
-
- if(err)
- *err = false;
- return false;
-}
-
-static void gsr_capture_kms_unload_cuda_graphics(gsr_capture_kms_cuda *cap_kms) {
- if(cap_kms->cuda.cu_ctx) {
- for(int i = 0; i < 2; ++i) {
- if(cap_kms->cuda_graphics_resources[i]) {
- cap_kms->cuda.cuGraphicsUnmapResources(1, &cap_kms->cuda_graphics_resources[i], 0);
- cap_kms->cuda.cuGraphicsUnregisterResource(cap_kms->cuda_graphics_resources[i]);
- cap_kms->cuda_graphics_resources[i] = 0;
- }
- }
- }
-}
-
-static int gsr_capture_kms_cuda_capture(gsr_capture *cap, AVFrame *frame) {
- gsr_capture_kms_cuda *cap_kms = cap->priv;
-
- gsr_capture_kms_capture(&cap_kms->kms, frame, cap_kms->params.hdr, true, true, cap_kms->params.record_cursor);
-
- const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
- for(int i = 0; i < 2; ++i) {
- CUDA_MEMCPY2D memcpy_struct;
- memcpy_struct.srcXInBytes = 0;
- memcpy_struct.srcY = 0;
- memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
-
- memcpy_struct.dstXInBytes = 0;
- memcpy_struct.dstY = 0;
- memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
-
- memcpy_struct.srcArray = cap_kms->mapped_arrays[i];
- memcpy_struct.srcPitch = frame->width / div[i];
- memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
- memcpy_struct.dstPitch = frame->linesize[i];
- memcpy_struct.WidthInBytes = frame->width * (cap_kms->params.hdr ? 2 : 1);
- memcpy_struct.Height = frame->height / div[i];
- // TODO: Remove this copy if possible
- cap_kms->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_kms->cuda_stream);
- }
-
- // TODO: needed?
- cap_kms->cuda.cuStreamSynchronize(cap_kms->cuda_stream);
-
- return 0;
-}
-
-static void gsr_capture_kms_cuda_capture_end(gsr_capture *cap, AVFrame *frame) {
- (void)frame;
- gsr_capture_kms_cuda *cap_kms = cap->priv;
- gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
-}
-
-static void gsr_capture_kms_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_kms_cuda *cap_kms = cap->priv;
- gsr_capture_kms_unload_cuda_graphics(cap_kms);
- gsr_cuda_unload(&cap_kms->cuda);
- gsr_capture_kms_stop(&cap_kms->kms);
-}
-
-static void gsr_capture_kms_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_kms_cuda *cap_kms = cap->priv;
- if(cap->priv) {
- gsr_capture_kms_cuda_stop(cap, video_codec_context);
- free((void*)cap_kms->params.display_to_capture);
- cap_kms->params.display_to_capture = NULL;
- free(cap->priv);
- cap->priv = NULL;
- }
- free(cap);
-}
-
-gsr_capture* gsr_capture_kms_cuda_create(const gsr_capture_kms_cuda_params *params) {
- if(!params) {
- fprintf(stderr, "gsr error: gsr_capture_kms_cuda_create params is NULL\n");
- return NULL;
- }
-
- gsr_capture *cap = calloc(1, sizeof(gsr_capture));
- if(!cap)
- return NULL;
-
- gsr_capture_kms_cuda *cap_kms = calloc(1, sizeof(gsr_capture_kms_cuda));
- if(!cap_kms) {
- free(cap);
- return NULL;
- }
-
- const char *display_to_capture = strdup(params->display_to_capture);
- if(!display_to_capture) {
- free(cap);
- free(cap_kms);
- return NULL;
- }
-
- cap_kms->params = *params;
- cap_kms->params.display_to_capture = display_to_capture;
-
- *cap = (gsr_capture) {
- .start = gsr_capture_kms_cuda_start,
- .tick = NULL,
- .should_stop = gsr_capture_kms_cuda_should_stop,
- .capture = gsr_capture_kms_cuda_capture,
- .capture_end = gsr_capture_kms_cuda_capture_end,
- .destroy = gsr_capture_kms_cuda_destroy,
- .priv = cap_kms
- };
-
- return cap;
-}
diff --git a/src/capture/kms_vaapi.c b/src/capture/kms_vaapi.c
deleted file mode 100644
index b9c9ee5..0000000
--- a/src/capture/kms_vaapi.c
+++ /dev/null
@@ -1,135 +0,0 @@
-#include "../../include/capture/kms_vaapi.h"
-#include "../../include/capture/kms.h"
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <assert.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/hwcontext_vaapi.h>
-#include <libavcodec/avcodec.h>
-#include <va/va_drmcommon.h>
-
-typedef struct {
- gsr_capture_kms kms;
-
- gsr_capture_kms_vaapi_params params;
-
- VADisplay va_dpy;
- VADRMPRIMESurfaceDescriptor prime;
-} gsr_capture_kms_vaapi;
-
-static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
-
-static int gsr_capture_kms_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
-
- int res = gsr_capture_kms_start(&cap_kms->kms, cap_kms->params.display_to_capture, cap_kms->params.egl, video_codec_context, frame);
- if(res != 0) {
- gsr_capture_kms_vaapi_stop(cap, video_codec_context);
- return res;
- }
-
- if(!drm_create_codec_context(cap_kms->params.egl->card_path, video_codec_context, video_codec_context->width, video_codec_context->height, cap_kms->params.hdr, &cap_kms->va_dpy)) {
- gsr_capture_kms_vaapi_stop(cap, video_codec_context);
- return -1;
- }
-
- if(!gsr_capture_base_setup_vaapi_textures(&cap_kms->kms.base, frame, cap_kms->va_dpy, &cap_kms->prime, cap_kms->params.color_range)) {
- gsr_capture_kms_vaapi_stop(cap, video_codec_context);
- return -1;
- }
-
- return 0;
-}
-
-static bool gsr_capture_kms_vaapi_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
- if(cap_kms->kms.should_stop) {
- if(err)
- *err = cap_kms->kms.stop_is_error;
- return true;
- }
-
- if(err)
- *err = false;
- return false;
-}
-
-static int gsr_capture_kms_vaapi_capture(gsr_capture *cap, AVFrame *frame) {
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
- gsr_capture_kms_capture(&cap_kms->kms, frame, cap_kms->params.hdr, cap_kms->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_INTEL, false, cap_kms->params.record_cursor);
- return 0;
-}
-
-static void gsr_capture_kms_vaapi_capture_end(gsr_capture *cap, AVFrame *frame) {
- (void)frame;
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
- gsr_capture_kms_cleanup_kms_fds(&cap_kms->kms);
-}
-
-static void gsr_capture_kms_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
-
- for(uint32_t i = 0; i < cap_kms->prime.num_objects; ++i) {
- if(cap_kms->prime.objects[i].fd > 0) {
- close(cap_kms->prime.objects[i].fd);
- cap_kms->prime.objects[i].fd = 0;
- }
- }
-
- gsr_capture_kms_stop(&cap_kms->kms);
-}
-
-static void gsr_capture_kms_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_kms_vaapi *cap_kms = cap->priv;
- if(cap->priv) {
- gsr_capture_kms_vaapi_stop(cap, video_codec_context);
- free((void*)cap_kms->params.display_to_capture);
- cap_kms->params.display_to_capture = NULL;
- free(cap->priv);
- cap->priv = NULL;
- }
- free(cap);
-}
-
-gsr_capture* gsr_capture_kms_vaapi_create(const gsr_capture_kms_vaapi_params *params) {
- if(!params) {
- fprintf(stderr, "gsr error: gsr_capture_kms_vaapi_create params is NULL\n");
- return NULL;
- }
-
- gsr_capture *cap = calloc(1, sizeof(gsr_capture));
- if(!cap)
- return NULL;
-
- gsr_capture_kms_vaapi *cap_kms = calloc(1, sizeof(gsr_capture_kms_vaapi));
- if(!cap_kms) {
- free(cap);
- return NULL;
- }
-
- const char *display_to_capture = strdup(params->display_to_capture);
- if(!display_to_capture) {
- /* TODO XCloseDisplay */
- free(cap);
- free(cap_kms);
- return NULL;
- }
-
- cap_kms->params = *params;
- cap_kms->params.display_to_capture = display_to_capture;
-
- *cap = (gsr_capture) {
- .start = gsr_capture_kms_vaapi_start,
- .tick = NULL,
- .should_stop = gsr_capture_kms_vaapi_should_stop,
- .capture = gsr_capture_kms_vaapi_capture,
- .capture_end = gsr_capture_kms_vaapi_capture_end,
- .destroy = gsr_capture_kms_vaapi_destroy,
- .priv = cap_kms
- };
-
- return cap;
-}
diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c
index 9eabb18..ee77a20 100644
--- a/src/capture/nvfbc.c
+++ b/src/capture/nvfbc.c
@@ -1,22 +1,19 @@
#include "../../include/capture/nvfbc.h"
#include "../../external/NvFBC.h"
-#include "../../include/cuda.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
+#include "../../include/color_conversion.h"
+
#include <dlfcn.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
+
#include <X11/Xlib.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/hwcontext_cuda.h>
-#include <libavutil/frame.h>
-#include <libavutil/version.h>
#include <libavcodec/avcodec.h>
typedef struct {
- gsr_capture_base base;
gsr_capture_nvfbc_params params;
void *library;
@@ -26,13 +23,8 @@ typedef struct {
bool fbc_handle_created;
bool capture_session_created;
- gsr_cuda cuda;
- CUgraphicsResource cuda_graphics_resources[2];
- CUarray mapped_arrays[2];
- CUstream cuda_stream; // TODO: asdasdsa
NVFBC_TOGL_SETUP_PARAMS setup_params;
- bool direct_capture;
bool supports_direct_cursor;
bool capture_region;
uint32_t x, y, width, height;
@@ -43,13 +35,6 @@ typedef struct {
double nvfbc_dead_start;
} gsr_capture_nvfbc;
-#if defined(_WIN64) || defined(__LP64__)
-typedef unsigned long long CUdeviceptr_v2;
-#else
-typedef unsigned int CUdeviceptr_v2;
-#endif
-typedef CUdeviceptr_v2 CUdeviceptr;
-
static int max_int(int a, int b) {
return a > b ? a : b;
}
@@ -117,7 +102,7 @@ static void set_func_ptr(void **dst, void *src) {
}
static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
dlerror(); /* clear */
void *lib = dlopen("libnvidia-fbc.so.1", RTLD_LAZY);
@@ -126,23 +111,23 @@ static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
return false;
}
- set_func_ptr((void**)&cap_nvfbc->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
- if(!cap_nvfbc->nv_fbc_create_instance) {
+ set_func_ptr((void**)&self->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
+ if(!self->nv_fbc_create_instance) {
fprintf(stderr, "gsr error: unable to resolve symbol 'NvFBCCreateInstance'\n");
dlclose(lib);
return false;
}
- memset(&cap_nvfbc->nv_fbc_function_list, 0, sizeof(cap_nvfbc->nv_fbc_function_list));
- cap_nvfbc->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_create_instance(&cap_nvfbc->nv_fbc_function_list);
+ memset(&self->nv_fbc_function_list, 0, sizeof(self->nv_fbc_function_list));
+ self->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
+ NVFBCSTATUS status = self->nv_fbc_create_instance(&self->nv_fbc_function_list);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: failed to create NvFBC instance (status: %d)\n", status);
dlclose(lib);
return false;
}
- cap_nvfbc->library = lib;
+ self->library = lib;
return true;
}
@@ -168,64 +153,64 @@ static void set_vertical_sync_enabled(gsr_egl *egl, int enabled) {
fprintf(stderr, "gsr warning: setting vertical sync failed\n");
}
-static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created && cap_nvfbc->capture_session_created) {
+static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created && self->capture_session_created) {
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroy_capture_params;
memset(&destroy_capture_params, 0, sizeof(destroy_capture_params));
destroy_capture_params.dwVersion = NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyCaptureSession(cap_nvfbc->nv_fbc_handle, &destroy_capture_params);
- cap_nvfbc->capture_session_created = false;
+ self->nv_fbc_function_list.nvFBCDestroyCaptureSession(self->nv_fbc_handle, &destroy_capture_params);
+ self->capture_session_created = false;
}
}
-static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created) {
+static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created) {
NVFBC_DESTROY_HANDLE_PARAMS destroy_params;
memset(&destroy_params, 0, sizeof(destroy_params));
destroy_params.dwVersion = NVFBC_DESTROY_HANDLE_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyHandle(cap_nvfbc->nv_fbc_handle, &destroy_params);
- cap_nvfbc->fbc_handle_created = false;
- cap_nvfbc->nv_fbc_handle = 0;
+ self->nv_fbc_function_list.nvFBCDestroyHandle(self->nv_fbc_handle, &destroy_params);
+ self->fbc_handle_created = false;
+ self->nv_fbc_handle = 0;
}
}
-static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *cap_nvfbc) {
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
- gsr_capture_nvfbc_destroy_handle(cap_nvfbc);
+static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session(self);
+ gsr_capture_nvfbc_destroy_handle(self);
}
-static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) {
NVFBCSTATUS status;
NVFBC_CREATE_HANDLE_PARAMS create_params;
memset(&create_params, 0, sizeof(create_params));
create_params.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
create_params.bExternallyManagedContext = NVFBC_TRUE;
- create_params.glxCtx = cap_nvfbc->params.egl->glx_context;
- create_params.glxFBConfig = cap_nvfbc->params.egl->glx_fb_config;
+ create_params.glxCtx = self->params.egl->glx_context;
+ create_params.glxFBConfig = self->params.egl->glx_fb_config;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
// Reverse engineering for interoperability
const uint8_t enable_key[] = { 0xac, 0x10, 0xc9, 0x2e, 0xa5, 0xe6, 0x87, 0x4f, 0x8f, 0x4b, 0xf4, 0x61, 0xf8, 0x56, 0x27, 0xe9 };
create_params.privateData = enable_key;
create_params.privateDataSize = 16;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
}
- cap_nvfbc->fbc_handle_created = true;
+ self->fbc_handle_created = true;
NVFBC_GET_STATUS_PARAMS status_params;
memset(&status_params, 0, sizeof(status_params));
status_params.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCGetStatus(cap_nvfbc->nv_fbc_handle, &status_params);
+ status = self->nv_fbc_function_list.nvFBCGetStatus(self->nv_fbc_handle, &status_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
@@ -234,10 +219,10 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_type = strcmp(cap_nvfbc->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT) {
+ self->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_type = strcmp(self->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT) {
if(!status_params.bXRandRAvailable) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the xrandr extension is not available\n");
goto error_cleanup;
@@ -248,9 +233,9 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, cap_nvfbc->params.display_to_capture, &cap_nvfbc->tracking_width, &cap_nvfbc->tracking_height);
- if(cap_nvfbc->output_id == 0) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", cap_nvfbc->params.display_to_capture);
+ self->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, self->params.display_to_capture, &self->tracking_width, &self->tracking_height);
+ if(self->output_id == 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", self->params.display_to_capture);
goto error_cleanup;
}
}
@@ -258,80 +243,83 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
return 0;
error_cleanup:
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
return -1;
}
-static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *self) {
NVFBC_CREATE_CAPTURE_SESSION_PARAMS create_capture_params;
memset(&create_capture_params, 0, sizeof(create_capture_params));
create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
create_capture_params.eCaptureType = NVFBC_CAPTURE_TO_GL;
- create_capture_params.bWithCursor = (!cap_nvfbc->direct_capture || cap_nvfbc->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
- if(!cap_nvfbc->params.record_cursor)
+ create_capture_params.bWithCursor = (!self->params.direct_capture || self->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
+ if(!self->params.record_cursor)
create_capture_params.bWithCursor = false;
- if(cap_nvfbc->capture_region)
- create_capture_params.captureBox = (NVFBC_BOX){ cap_nvfbc->x, cap_nvfbc->y, cap_nvfbc->width, cap_nvfbc->height };
- create_capture_params.eTrackingType = cap_nvfbc->tracking_type;
- create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)cap_nvfbc->params.fps);
- create_capture_params.bAllowDirectCapture = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
- create_capture_params.bPushModel = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ if(self->capture_region)
+ create_capture_params.captureBox = (NVFBC_BOX){ self->x, self->y, self->width, self->height };
+ create_capture_params.eTrackingType = self->tracking_type;
+ create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)self->params.fps);
+ create_capture_params.bAllowDirectCapture = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ create_capture_params.bPushModel = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
create_capture_params.bDisableAutoModesetRecovery = true;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT)
- create_capture_params.dwOutputId = cap_nvfbc->output_id;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT)
+ create_capture_params.dwOutputId = self->output_id;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateCaptureSession(cap_nvfbc->nv_fbc_handle, &create_capture_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCCreateCaptureSession(self->nv_fbc_handle, &create_capture_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
return -1;
}
- cap_nvfbc->capture_session_created = true;
+ self->capture_session_created = true;
- memset(&cap_nvfbc->setup_params, 0, sizeof(cap_nvfbc->setup_params));
- cap_nvfbc->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
- cap_nvfbc->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
+ memset(&self->setup_params, 0, sizeof(self->setup_params));
+ self->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
+ self->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLSetUp(cap_nvfbc->nv_fbc_handle, &cap_nvfbc->setup_params);
+ status = self->nv_fbc_function_list.nvFBCToGLSetUp(self->nv_fbc_handle, &self->setup_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
+ gsr_capture_nvfbc_destroy_session(self);
return -1;
}
return 0;
}
-static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
-
- cap_nvfbc->base.video_codec_context = video_codec_context;
- cap_nvfbc->base.egl = cap_nvfbc->params.egl;
+static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
+ if(self->library) {
+ dlclose(self->library);
+ self->library = NULL;
+ }
+ if(self->params.display_to_capture) {
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
+ }
+}
- if(!gsr_cuda_load(&cap_nvfbc->cuda, cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.overclock))
- return -1;
+static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_nvfbc *self = cap->priv;
- if(!gsr_capture_nvfbc_load_library(cap)) {
- gsr_cuda_unload(&cap_nvfbc->cuda);
+ if(!gsr_capture_nvfbc_load_library(cap))
return -1;
- }
- cap_nvfbc->x = max_int(cap_nvfbc->params.pos.x, 0);
- cap_nvfbc->y = max_int(cap_nvfbc->params.pos.y, 0);
- cap_nvfbc->width = max_int(cap_nvfbc->params.size.x, 0);
- cap_nvfbc->height = max_int(cap_nvfbc->params.size.y, 0);
+ self->x = max_int(self->params.pos.x, 0);
+ self->y = max_int(self->params.pos.y, 0);
+ self->width = max_int(self->params.size.x, 0);
+ self->height = max_int(self->params.size.y, 0);
- cap_nvfbc->capture_region = (cap_nvfbc->x > 0 || cap_nvfbc->y > 0 || cap_nvfbc->width > 0 || cap_nvfbc->height > 0);
+ self->capture_region = (self->x > 0 || self->y > 0 || self->width > 0 || self->height > 0);
- cap_nvfbc->supports_direct_cursor = false;
- bool direct_capture = cap_nvfbc->params.direct_capture;
+ self->supports_direct_cursor = false;
int driver_major_version = 0;
int driver_minor_version = 0;
- if(direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
+ if(self->params.direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
fprintf(stderr, "Info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
// TODO:
if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
- direct_capture = false;
+ self->params.direct_capture = false;
fprintf(stderr, "Warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
}
@@ -340,78 +328,63 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
/*
if(direct_capture) {
if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
- supports_direct_cursor = true;
+ self->supports_direct_cursor = true;
else
fprintf(stderr, "Info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
}
*/
}
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
goto error_cleanup;
}
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
goto error_cleanup;
}
- if(cap_nvfbc->capture_region) {
- video_codec_context->width = cap_nvfbc->width & ~1;
- video_codec_context->height = cap_nvfbc->height & ~1;
+ if(self->capture_region) {
+ video_codec_context->width = FFALIGN(self->width, 2);
+ video_codec_context->height = FFALIGN(self->height, 2);
} else {
- video_codec_context->width = cap_nvfbc->tracking_width & ~1;
- video_codec_context->height = cap_nvfbc->tracking_height & ~1;
+ video_codec_context->width = FFALIGN(self->tracking_width, 2);
+ video_codec_context->height = FFALIGN(self->tracking_height, 2);
}
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
- if(!cuda_create_codec_context(cap_nvfbc->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_nvfbc->cuda_stream))
- goto error_cleanup;
-
- gsr_cuda_context cuda_context = {
- .cuda = &cap_nvfbc->cuda,
- .cuda_graphics_resources = cap_nvfbc->cuda_graphics_resources,
- .mapped_arrays = cap_nvfbc->mapped_arrays
- };
-
- // TODO: Remove this, it creates shit we dont need
- if(!gsr_capture_base_setup_cuda_textures(&cap_nvfbc->base, frame, &cuda_context, cap_nvfbc->params.color_range, GSR_SOURCE_COLOR_BGR, cap_nvfbc->params.hdr)) {
- goto error_cleanup;
- }
/* Disable vsync */
- set_vertical_sync_enabled(cap_nvfbc->params.egl, 0);
+ set_vertical_sync_enabled(self->params.egl, 0);
return 0;
error_cleanup:
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
- gsr_capture_base_stop(&cap_nvfbc->base);
- gsr_cuda_unload(&cap_nvfbc->cuda);
+ gsr_capture_nvfbc_stop(self);
return -1;
}
-static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_capture_nvfbc *self = cap->priv;
const double nvfbc_recreate_retry_time_seconds = 1.0;
- if(cap_nvfbc->nvfbc_needs_recreate) {
+ if(self->nvfbc_needs_recreate) {
const double now = clock_get_monotonic_seconds();
- if(now - cap_nvfbc->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
- cap_nvfbc->nvfbc_dead_start = now;
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ if(now - self->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
+ self->nvfbc_dead_start = now;
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc handle, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
-
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc session, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
- cap_nvfbc->nvfbc_needs_recreate = false;
+ self->nvfbc_needs_recreate = false;
} else {
return 0;
}
@@ -427,65 +400,38 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame) {
grab_params.pFrameGrabInfo = &frame_info;
grab_params.dwTimeoutMs = 0;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLGrabFrame(cap_nvfbc->nv_fbc_handle, &grab_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCToGLGrabFrame(self->nv_fbc_handle, &grab_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
- cap_nvfbc->nvfbc_needs_recreate = true;
- cap_nvfbc->nvfbc_dead_start = clock_get_monotonic_seconds();
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
+ self->nvfbc_needs_recreate = true;
+ self->nvfbc_dead_start = clock_get_monotonic_seconds();
return 0;
}
- //cap_nvfbc->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- cap_nvfbc->params.egl->glClear(0);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
- gsr_color_conversion_draw(&cap_nvfbc->base.color_conversion, cap_nvfbc->setup_params.dwTextures[grab_params.dwTextureIndex],
+ gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
0.0f, false);
- cap_nvfbc->params.egl->glXSwapBuffers(cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.egl->x11.window);
-
- // TODO: HDR is broken
- const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
- for(int i = 0; i < 2; ++i) {
- CUDA_MEMCPY2D memcpy_struct;
- memcpy_struct.srcXInBytes = 0;
- memcpy_struct.srcY = 0;
- memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
-
- memcpy_struct.dstXInBytes = 0;
- memcpy_struct.dstY = 0;
- memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
-
- memcpy_struct.srcArray = cap_nvfbc->mapped_arrays[i];
- memcpy_struct.srcPitch = frame->width / div[i];
- memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
- memcpy_struct.dstPitch = frame->linesize[i];
- memcpy_struct.WidthInBytes = frame->width * (cap_nvfbc->params.hdr ? 2 : 1);
- memcpy_struct.Height = frame->height / div[i];
- // TODO: Remove this copy if possible
- cap_nvfbc->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_nvfbc->cuda_stream);
- }
-
- // TODO: needed?
- cap_nvfbc->cuda.cuStreamSynchronize(cap_nvfbc->cuda_stream);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
+static gsr_source_color gsr_capture_nvfbc_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_BGR;
+}
+
static void gsr_capture_nvfbc_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
- if(cap_nvfbc) {
- gsr_capture_base_stop(&cap_nvfbc->base);
- gsr_cuda_unload(&cap_nvfbc->cuda);
- dlclose(cap_nvfbc->library);
- free((void*)cap_nvfbc->params.display_to_capture);
- cap_nvfbc->params.display_to_capture = NULL;
- free(cap->priv);
- cap->priv = NULL;
- }
+ gsr_capture_nvfbc *self = cap->priv;
+ gsr_capture_nvfbc_stop(self);
+ free(cap->priv);
free(cap);
}
@@ -520,13 +466,14 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
cap_nvfbc->params = *params;
cap_nvfbc->params.display_to_capture = display_to_capture;
cap_nvfbc->params.fps = max_int(cap_nvfbc->params.fps, 1);
-
+
*cap = (gsr_capture) {
.start = gsr_capture_nvfbc_start,
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_nvfbc_capture,
- .capture_end = NULL,
+ .get_source_color = gsr_capture_nvfbc_get_source_color,
+ .uses_external_image = NULL,
.destroy = gsr_capture_nvfbc_destroy,
.priv = cap_nvfbc
};
diff --git a/src/capture/portal.c b/src/capture/portal.c
new file mode 100644
index 0000000..9ab7e8b
--- /dev/null
+++ b/src/capture/portal.c
@@ -0,0 +1,458 @@
+#include "../../include/capture/portal.h"
+#include "../../include/color_conversion.h"
+#include "../../include/egl.h"
+#include "../../include/utils.h"
+#include "../../include/dbus.h"
+#include "../../include/pipewire.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_portal_params params;
+
+ gsr_texture_map texture_map;
+
+ gsr_dbus dbus;
+ char *session_handle;
+
+ gsr_pipewire pipewire;
+ vec2i capture_size;
+ gsr_pipewire_dmabuf_data dmabuf_data[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ int num_dmabuf_data;
+
+ AVCodecContext *video_codec_context;
+ bool fast_path_failed;
+} gsr_capture_portal;
+
+static void gsr_capture_portal_cleanup_plane_fds(gsr_capture_portal *self) {
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = 0;
+ }
+ }
+ self->num_dmabuf_data = 0;
+}
+
+static void gsr_capture_portal_stop(gsr_capture_portal *self) {
+ if(self->texture_map.texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.texture_id);
+ self->texture_map.texture_id = 0;
+ }
+
+ if(self->texture_map.external_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.external_texture_id);
+ self->texture_map.external_texture_id = 0;
+ }
+
+ if(self->texture_map.cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.cursor_texture_id);
+ self->texture_map.cursor_texture_id = 0;
+ }
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ gsr_pipewire_deinit(&self->pipewire);
+
+ if(self->session_handle) {
+ free(self->session_handle);
+ self->session_handle = NULL;
+ }
+
+ gsr_dbus_deinit(&self->dbus);
+}
+
+static void gsr_capture_portal_create_input_textures(gsr_capture_portal *self) {
+ self->params.egl->glGenTextures(1, &self->texture_map.texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.external_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_map.external_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.cursor_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.cursor_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+}
+
+static void get_default_gpu_screen_recorder_restore_token_path(char *buffer, size_t buffer_size) {
+ const char *xdg_config_home = getenv("XDG_CONFIG_HOME");
+ if(xdg_config_home) {
+ snprintf(buffer, buffer_size, "%s/gpu-screen-recorder/restore_token", xdg_config_home);
+ } else {
+ const char *home = getenv("HOME");
+ if(!home)
+ home = "/tmp";
+ snprintf(buffer, buffer_size, "%s/.config/gpu-screen-recorder/restore_token", home);
+ }
+}
+
+static bool create_directory_to_file(const char *filepath) {
+ char dir[PATH_MAX];
+ dir[0] = '\0';
+
+ const char *split = strrchr(filepath, '/');
+ if(!split) /* Assuming it's the current directory (for example if filepath is "restore_token"), which doesn't need to be created */
+ return true;
+
+ snprintf(dir, sizeof(dir), "%.*s", (int)(split - filepath), filepath);
+ if(create_directory_recursive(dir) != 0) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create directory (%s) for restore token\n", dir);
+ return false;
+ }
+ return true;
+}
+
+static void gsr_capture_portal_save_restore_token(const char *restore_token, const char *portal_session_token_filepath) {
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ if(!create_directory_to_file(restore_token_path))
+ return;
+
+ FILE *f = fopen(restore_token_path, "wb");
+ if(!f) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create restore token file (%s)\n", restore_token_path);
+ return;
+ }
+
+ const int restore_token_len = strlen(restore_token);
+ if((long)fwrite(restore_token, 1, restore_token_len, f) != restore_token_len) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to write restore token to file (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_save_restore_token: saved restore token to cache (%s)\n", restore_token);
+ fclose(f);
+}
+
+static void gsr_capture_portal_get_restore_token_from_cache(char *buffer, size_t buffer_size, const char *portal_session_token_filepath) {
+ assert(buffer_size > 0);
+ buffer[0] = '\0';
+
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ FILE *f = fopen(restore_token_path, "rb");
+ if(!f) {
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: no restore token found in cache or failed to load (%s)\n", restore_token_path);
+ return;
+ }
+
+ fseek(f, 0, SEEK_END);
+ long file_size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ if(file_size > 0 && file_size < 1024 && file_size < (long)buffer_size && (long)fread(buffer, 1, file_size, f) != file_size) {
+ buffer[0] = '\0';
+ fprintf(stderr, "gsr warning: gsr_capture_portal_get_restore_token_from_cache: failed to read restore token (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ if(file_size > 0 && file_size < (long)buffer_size)
+ buffer[file_size] = '\0';
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: read cached restore token (%s)\n", buffer);
+ fclose(f);
+}
+
+static int gsr_capture_portal_setup_dbus(gsr_capture_portal *self, int *pipewire_fd, uint32_t *pipewire_node) {
+ *pipewire_fd = 0;
+ *pipewire_node = 0;
+ int response_status = 0;
+
+ char restore_token[1024];
+ restore_token[0] = '\0';
+ if(self->params.restore_portal_session)
+ gsr_capture_portal_get_restore_token_from_cache(restore_token, sizeof(restore_token), self->params.portal_session_token_filepath);
+
+ if(!gsr_dbus_init(&self->dbus, restore_token))
+ return -1;
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: CreateSession\n");
+ response_status = gsr_dbus_screencast_create_session(&self->dbus, &self->session_handle);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: CreateSession failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: SelectSources\n");
+ response_status = gsr_dbus_screencast_select_sources(&self->dbus, self->session_handle, GSR_PORTAL_CAPTURE_TYPE_ALL, self->params.record_cursor ? GSR_PORTAL_CURSOR_MODE_EMBEDDED : GSR_PORTAL_CURSOR_MODE_HIDDEN);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: SelectSources failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: Start\n");
+ response_status = gsr_dbus_screencast_start(&self->dbus, self->session_handle, pipewire_node);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: Start failed\n");
+ return response_status;
+ }
+
+ const char *screencast_restore_token = gsr_dbus_screencast_get_restore_token(&self->dbus);
+ if(screencast_restore_token)
+ gsr_capture_portal_save_restore_token(screencast_restore_token, self->params.portal_session_token_filepath);
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: OpenPipeWireRemote\n");
+ if(!gsr_dbus_screencast_open_pipewire_remote(&self->dbus, self->session_handle, pipewire_fd)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: OpenPipeWireRemote failed\n");
+ return -1;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: desktop portal setup finished\n");
+ return 0;
+}
+
+static bool gsr_capture_portal_get_frame_dimensions(gsr_capture_portal *self) {
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: waiting for pipewire negotiation\n");
+
+ const double start_time = clock_get_monotonic_seconds();
+ while(clock_get_monotonic_seconds() - start_time < 5.0) {
+ bool uses_external_image = false;
+ uint32_t fourcc = 0;
+ uint64_t modifiers = 0;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &fourcc, &modifiers, &uses_external_image)) {
+ gsr_capture_portal_cleanup_plane_fds(self);
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire negotiation finished\n");
+ return true;
+ }
+ usleep(30 * 1000); /* 30 milliseconds */
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: timed out waiting for pipewire negotiation (5 seconds)\n");
+ return false;
+}
+
+static int gsr_capture_portal_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_portal *self = cap->priv;
+
+ gsr_capture_portal_create_input_textures(self);
+
+ int pipewire_fd = 0;
+ uint32_t pipewire_node = 0;
+ const int response_status = gsr_capture_portal_setup_dbus(self, &pipewire_fd, &pipewire_node);
+ if(response_status != 0) {
+ gsr_capture_portal_stop(self);
+ // Response status values:
+ // 0: Success, the request is carried out
+ // 1: The user cancelled the interaction
+ // 2: The user interaction was ended in some other way
+ // Response status value 2 happens usually if there was some kind of error in the desktop portal on the system
+ if(response_status == 2) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. Either you Wayland compositor doesn't support desktop portal capture or it's incorrectly setup on your system\n");
+ return 50;
+ } else if(response_status == 1) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. It seems like desktop portal capture was canceled by the user.\n");
+ return 60;
+ } else {
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: setting up pipewire\n");
+ /* TODO: support hdr when pipewire supports it */
+ /* gsr_pipewire closes the pipewire fd, even on failure */
+ if(!gsr_pipewire_init(&self->pipewire, pipewire_fd, pipewire_node, video_codec_context->framerate.num, self->params.record_cursor, self->params.egl)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: failed to setup pipewire with fd: %d, node: %" PRIu32 "\n", pipewire_fd, pipewire_node);
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire setup finished\n");
+
+ if(!gsr_capture_portal_get_frame_dimensions(self)) {
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+
+ /* Disable vsync */
+ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
+
+ video_codec_context->width = FFALIGN(self->capture_size.x, 2);
+ video_codec_context->height = FFALIGN(self->capture_size.y, 2);
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ self->video_codec_context = video_codec_context;
+ return 0;
+}
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ (void)frame;
+ (void)color_conversion;
+ gsr_capture_portal *self = cap->priv;
+
+ /* TODO: Handle formats other than RGB(a) */
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ uint32_t pipewire_fourcc = 0;
+ uint64_t pipewire_modifiers = 0;
+ bool using_external_image = false;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &pipewire_fourcc, &pipewire_modifiers, &using_external_image)) {
+ if(region.width != self->capture_size.x || region.height != self->capture_size.y) {
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ gsr_color_conversion_clear(color_conversion);
+ }
+ } else {
+ return 0;
+ }
+
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ // TODO: Handle region crop
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ fds[i] = self->dmabuf_data[i].fd;
+ offsets[i] = self->dmabuf_data[i].offset;
+ pitches[i] = self->dmabuf_data[i].stride;
+ modifiers[i] = pipewire_modifiers;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, self->capture_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
+
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
+ target_pos, self->capture_size,
+ (vec2i){region.x, region.y}, self->capture_size,
+ 0.0f, using_external_image);
+ }
+
+ if(self->params.record_cursor) {
+ const vec2i cursor_pos = {
+ target_pos.x + cursor_region.x,
+ target_pos.y + cursor_region.y
+ };
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+ gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
+ (vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width, cursor_region.height},
+ (vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
+ 0.0f, false);
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+ }
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ return 0;
+}
+
+static gsr_source_color gsr_capture_portal_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_RGB;
+}
+
+static bool gsr_capture_portal_uses_external_image(gsr_capture *cap) {
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_portal_is_damaged(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ return gsr_pipewire_is_damaged(&self->pipewire);
+}
+
+static void gsr_capture_portal_clear_damage(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ gsr_pipewire_clear_damage(&self->pipewire);
+}
+
+static void gsr_capture_portal_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_portal *self = cap->priv;
+ if(cap->priv) {
+ gsr_capture_portal_stop(self);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_portal *cap_portal = calloc(1, sizeof(gsr_capture_portal));
+ if(!cap_portal) {
+ free(cap);
+ return NULL;
+ }
+
+ cap_portal->params = *params;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_portal_start,
+ .tick = NULL,
+ .should_stop = NULL,
+ .capture = gsr_capture_portal_capture,
+ .get_source_color = gsr_capture_portal_get_source_color,
+ .uses_external_image = gsr_capture_portal_uses_external_image,
+ .is_damaged = gsr_capture_portal_is_damaged,
+ .clear_damage = gsr_capture_portal_clear_damage,
+ .destroy = gsr_capture_portal_destroy,
+ .priv = cap_portal
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
index 3240ed8..9e208d6 100644
--- a/src/capture/xcomposite.c
+++ b/src/capture/xcomposite.c
@@ -1,26 +1,51 @@
#include "../../include/capture/xcomposite.h"
#include "../../include/window_texture.h"
#include "../../include/utils.h"
+#include "../../include/cursor.h"
+#include "../../include/color_conversion.h"
+
#include <stdlib.h>
#include <stdio.h>
-#include <unistd.h>
+#include <string.h>
#include <assert.h>
+
#include <X11/Xlib.h>
-#include <X11/extensions/Xdamage.h>
-#include <libavutil/hwcontext.h>
-#include <libavutil/hwcontext.h>
+
#include <libavutil/frame.h>
#include <libavcodec/avcodec.h>
-#include <va/va.h>
-#include <va/va_drmcommon.h>
-static int max_int(int a, int b) {
- return a > b ? a : b;
+typedef struct {
+ gsr_capture_xcomposite_params params;
+
+ bool should_stop;
+ bool stop_is_error;
+ bool window_resized;
+ bool follow_focused_initialized;
+ bool init_new_window;
+
+ Window window;
+ vec2i window_size;
+ vec2i texture_size;
+ double window_resize_timer;
+
+ WindowTexture window_texture;
+ AVCodecContext *video_codec_context;
+
+ Atom net_active_window_atom;
+
+ gsr_cursor cursor;
+
+ bool clear_background;
+ bool fast_path_failed;
+} gsr_capture_xcomposite;
+
+static void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
+ window_texture_deinit(&self->window_texture);
+ gsr_cursor_deinit(&self->cursor);
}
-void gsr_capture_xcomposite_init(gsr_capture_xcomposite *self, const gsr_capture_xcomposite_params *params) {
- memset(self, 0, sizeof(*self));
- self->params = *params;
+static int max_int(int a, int b) {
+ return a > b ? a : b;
}
static Window get_focused_window(Display *display, Atom net_active_window_atom) {
@@ -37,26 +62,8 @@ static Window get_focused_window(Display *display, Atom net_active_window_atom)
return None;
}
-static void gsr_capture_xcomposite_setup_damage(gsr_capture_xcomposite *self, Window window) {
- if(self->damage_event == 0)
- return;
-
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
- self->damage = XDamageCreate(self->params.egl->x11.dpy, window, XDamageReportNonEmpty);
- if(self->damage) {
- XDamageSubtract(self->params.egl->x11.dpy, self->damage, None, None);
- } else {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_setup_damage: XDamageCreate failed\n");
- }
-}
-
-int gsr_capture_xcomposite_start(gsr_capture_xcomposite *self, AVCodecContext *video_codec_context, AVFrame *frame) {
- self->base.video_codec_context = video_codec_context;
- self->base.egl = self->params.egl;
+static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_xcomposite *self = cap->priv;
if(self->params.follow_focused) {
self->net_active_window_atom = XInternAtom(self->params.egl->x11.dpy, "_NET_ACTIVE_WINDOW", False);
@@ -69,20 +76,6 @@ int gsr_capture_xcomposite_start(gsr_capture_xcomposite *self, AVCodecContext *v
self->window = self->params.window;
}
- if(self->params.track_damage) {
- if(!XDamageQueryExtension(self->params.egl->x11.dpy, &self->damage_event, &self->damage_error)) {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_start: XDamage is not supported by your X11 server\n");
- self->damage_event = 0;
- self->damage_error = 0;
- }
- } else {
- self->damage_event = 0;
- self->damage_error = 0;
- }
-
- self->damaged = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
-
/* TODO: Do these in tick, and allow error if follow_focused */
XWindowAttributes attr;
@@ -100,16 +93,6 @@ int gsr_capture_xcomposite_start(gsr_capture_xcomposite *self, AVCodecContext *v
// TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
XSelectInput(self->params.egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
- if(!self->params.egl->eglExportDMABUFImageQueryMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageQueryMESA\n");
- return -1;
- }
-
- if(!self->params.egl->eglExportDMABUFImageMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageMESA\n");
- return -1;
- }
-
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
if(window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
@@ -135,109 +118,26 @@ int gsr_capture_xcomposite_start(gsr_capture_xcomposite *self, AVCodecContext *v
if(self->params.region_size.x > 0 && self->params.region_size.y > 0)
video_size = self->params.region_size;
- if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
- // TODO: dont do this if using ffmpeg reports that this is not needed (AMD driver bug that was fixed recently)
- video_codec_context->width = FFALIGN(video_size.x, 64);
- video_codec_context->height = FFALIGN(video_size.y, 16);
- } else if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
- // TODO: Dont do this for VCN 5 and forward which should fix this hardware bug
- video_codec_context->width = FFALIGN(video_size.x, 64);
- // AMD driver has special case handling for 1080 height to set it to 1082 instead of 1088 (1080 aligned to 16).
- // TODO: Set height to 1082 in this case, but it wont work because it will be aligned to 1088.
- if(video_size.y == 1080) {
- video_codec_context->height = 1080;
- } else {
- video_codec_context->height = FFALIGN(video_size.y, 16);
- }
- } else {
- video_codec_context->width = FFALIGN(video_size.x, 2);
- video_codec_context->height = FFALIGN(video_size.y, 2);
- }
+ video_codec_context->width = FFALIGN(video_size.x, 2);
+ video_codec_context->height = FFALIGN(video_size.y, 2);
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
+ self->video_codec_context = video_codec_context;
self->window_resize_timer = clock_get_monotonic_seconds();
return 0;
}
-void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
- window_texture_deinit(&self->window_texture);
- gsr_cursor_deinit(&self->cursor);
- gsr_capture_base_stop(&self->base);
-}
-
-void gsr_capture_xcomposite_tick(gsr_capture_xcomposite *self, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- //self->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->params.egl->glClear(0);
-
- bool init_new_window = false;
- while(XPending(self->params.egl->x11.dpy)) {
- XNextEvent(self->params.egl->x11.dpy, &self->xev);
-
- switch(self->xev.type) {
- case DestroyNotify: {
- /* Window died (when not following focused window), so we stop recording */
- if(!self->params.follow_focused && self->xev.xdestroywindow.window == self->window) {
- self->should_stop = true;
- self->stop_is_error = false;
- }
- break;
- }
- case Expose: {
- /* Requires window texture recreate */
- if(self->xev.xexpose.count == 0 && self->xev.xexpose.window == self->window) {
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case ConfigureNotify: {
- /* Window resized */
- if(self->xev.xconfigure.window == self->window && (self->xev.xconfigure.width != self->window_size.x || self->xev.xconfigure.height != self->window_size.y)) {
- self->window_size.x = max_int(self->xev.xconfigure.width, 0);
- self->window_size.y = max_int(self->xev.xconfigure.height, 0);
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case PropertyNotify: {
- /* Focused window changed */
- if(self->params.follow_focused && self->xev.xproperty.atom == self->net_active_window_atom) {
- init_new_window = true;
- }
- break;
- }
- }
-
- if(self->damage_event && self->xev.type == self->damage_event + XDamageNotify) {
- XDamageNotifyEvent *de = (XDamageNotifyEvent*)&self->xev;
- XserverRegion region = XFixesCreateRegion(self->params.egl->x11.dpy, NULL, 0);
- // Subtract all the damage, repairing the window
- XDamageSubtract(self->params.egl->x11.dpy, de->damage, None, region);
- XFixesDestroyRegion(self->params.egl->x11.dpy, region);
- self->damaged = true;
- }
-
- if(gsr_cursor_update(&self->cursor, &self->xev)) {
- if(self->params.record_cursor && self->cursor.visible) {
- self->damaged = true;
- }
- }
- }
+static void gsr_capture_xcomposite_tick(gsr_capture *cap) {
+ gsr_capture_xcomposite *self = cap->priv;
if(self->params.follow_focused && !self->follow_focused_initialized) {
- init_new_window = true;
+ self->init_new_window = true;
}
- if(init_new_window) {
+ if(self->init_new_window) {
+ self->init_new_window = false;
Window focused_window = get_focused_window(self->params.egl->x11.dpy, self->net_active_window_atom);
if(focused_window != self->window || !self->follow_focused_initialized) {
self->follow_focused_initialized = true;
@@ -253,11 +153,20 @@ void gsr_capture_xcomposite_tick(gsr_capture_xcomposite *self, AVCodecContext *v
self->window_size.x = max_int(attr.width, 0);
self->window_size.y = max_int(attr.height, 0);
- self->window_resized = true;
window_texture_deinit(&self->window_texture);
window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl); // TODO: Do not do the below window_texture_on_resize after this
- gsr_capture_xcomposite_setup_damage(self, self->window);
+
+ self->texture_size.x = 0;
+ self->texture_size.y = 0;
+
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, window_texture_get_opengl_texture_id(&self->window_texture));
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &self->texture_size.x);
+ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->window_resized = false;
+ self->clear_background = true;
}
}
@@ -280,20 +189,54 @@ void gsr_capture_xcomposite_tick(gsr_capture_xcomposite *self, AVCodecContext *v
self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
- gsr_color_conversion_clear(&self->base.color_conversion);
- gsr_capture_xcomposite_setup_damage(self, self->window);
+ self->clear_background = true;
}
}
-bool gsr_capture_xcomposite_is_damaged(gsr_capture_xcomposite *self) {
- return self->damage_event ? self->damaged : true;
-}
+static void gsr_capture_xcomposite_on_event(gsr_capture *cap, gsr_egl *egl) {
+ gsr_capture_xcomposite *self = cap->priv;
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ switch(xev->type) {
+ case DestroyNotify: {
+ /* Window died (when not following focused window), so we stop recording */
+ if(!self->params.follow_focused && xev->xdestroywindow.window == self->window) {
+ self->should_stop = true;
+ self->stop_is_error = false;
+ }
+ break;
+ }
+ case Expose: {
+ /* Requires window texture recreate */
+ if(xev->xexpose.count == 0 && xev->xexpose.window == self->window) {
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case ConfigureNotify: {
+ /* Window resized */
+ if(xev->xconfigure.window == self->window && (xev->xconfigure.width != self->window_size.x || xev->xconfigure.height != self->window_size.y)) {
+ self->window_size.x = max_int(xev->xconfigure.width, 0);
+ self->window_size.y = max_int(xev->xconfigure.height, 0);
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case PropertyNotify: {
+ /* Focused window changed */
+ if(self->params.follow_focused && xev->xproperty.atom == self->net_active_window_atom) {
+ self->init_new_window = true;
+ }
+ break;
+ }
+ }
-void gsr_capture_xcomposite_clear_damage(gsr_capture_xcomposite *self) {
- self->damaged = false;
+ gsr_cursor_on_event(&self->cursor, xev);
}
-bool gsr_capture_xcomposite_should_stop(gsr_capture_xcomposite *self, bool *err) {
+static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
+ gsr_capture_xcomposite *self = cap->priv;
if(self->should_stop) {
if(err)
*err = self->stop_is_error;
@@ -305,47 +248,112 @@ bool gsr_capture_xcomposite_should_stop(gsr_capture_xcomposite *self, bool *err)
return false;
}
-int gsr_capture_xcomposite_capture(gsr_capture_xcomposite *self, AVFrame *frame) {
+static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_capture_xcomposite *self = cap->priv;
(void)frame;
- const int target_x = max_int(0, frame->width / 2 - self->texture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->texture_size.y / 2);
+ if(self->clear_background) {
+ self->clear_background = false;
+ gsr_color_conversion_clear(color_conversion);
+ }
- const vec2i cursor_pos = {
- target_x + self->cursor.position.x - self->cursor.hotspot.x,
- target_y + self->cursor.position.y - self->cursor.hotspot.y
- };
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->texture_size.x / 2), max_int(0, frame->height / 2 - self->texture_size.y / 2) };
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
- gsr_color_conversion_draw(&self->base.color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
- (vec2i){target_x, target_y}, self->texture_size,
- (vec2i){0, 0}, self->texture_size,
- 0.0f, false);
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, self->texture_size, self->video_codec_context, frame)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
+
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
+ target_pos, self->texture_size,
+ (vec2i){0, 0}, self->texture_size,
+ 0.0f, false);
+ }
if(self->params.record_cursor && self->cursor.visible) {
gsr_cursor_tick(&self->cursor, self->window);
- const bool cursor_inside_window =
- cursor_pos.x + self->cursor.size.x >= target_x &&
- cursor_pos.x <= target_x + self->texture_size.x &&
- cursor_pos.y + self->cursor.size.y >= target_y &&
- cursor_pos.y <= target_y + self->texture_size.y;
+ const vec2i cursor_pos = {
+ target_pos.x + self->cursor.position.x - self->cursor.hotspot.x,
+ target_pos.y + self->cursor.position.y - self->cursor.hotspot.y
+ };
- if(cursor_inside_window) {
- self->base.egl->glEnable(GL_SCISSOR_TEST);
- self->base.egl->glScissor(target_x, target_y, self->texture_size.x, self->texture_size.y);
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->texture_size.x, self->texture_size.y);
- gsr_color_conversion_draw(&self->base.color_conversion, self->cursor.texture_id,
- cursor_pos, self->cursor.size,
- (vec2i){0, 0}, self->cursor.size,
- 0.0f, false);
+ gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
+ cursor_pos, self->cursor.size,
+ (vec2i){0, 0}, self->cursor.size,
+ 0.0f, false);
- self->base.egl->glDisable(GL_SCISSOR_TEST);
- }
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
}
- self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
- //self->params.egl->glFlush();
- //self->params.egl->glFinish();
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
+
+static gsr_source_color gsr_capture_xcomposite_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_RGB;
+}
+
+static uint64_t gsr_capture_xcomposite_get_window_id(gsr_capture *cap) {
+ gsr_capture_xcomposite *self = cap->priv;
+ return self->window;
+}
+
+static void gsr_capture_xcomposite_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ if(cap->priv) {
+ gsr_capture_xcomposite_stop(cap->priv);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_xcomposite *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite));
+ if(!cap_xcomp) {
+ free(cap);
+ return NULL;
+ }
+
+ cap_xcomp->params = *params;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_xcomposite_start,
+ .on_event = gsr_capture_xcomposite_on_event,
+ .tick = gsr_capture_xcomposite_tick,
+ .should_stop = gsr_capture_xcomposite_should_stop,
+ .capture = gsr_capture_xcomposite_capture,
+ .get_source_color = gsr_capture_xcomposite_get_source_color,
+ .uses_external_image = NULL,
+ .get_window_id = gsr_capture_xcomposite_get_window_id,
+ .destroy = gsr_capture_xcomposite_destroy,
+ .priv = cap_xcomp
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite_cuda.c b/src/capture/xcomposite_cuda.c
deleted file mode 100644
index c436221..0000000
--- a/src/capture/xcomposite_cuda.c
+++ /dev/null
@@ -1,167 +0,0 @@
-#include "../../include/capture/xcomposite_cuda.h"
-#include "../../include/cuda.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <libavutil/frame.h>
-#include <libavcodec/avcodec.h>
-
-typedef struct {
- gsr_capture_xcomposite xcomposite;
- bool overclock;
-
- gsr_cuda cuda;
- CUgraphicsResource cuda_graphics_resources[2];
- CUarray mapped_arrays[2];
- CUstream cuda_stream;
-} gsr_capture_xcomposite_cuda;
-
-static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
-
-static int gsr_capture_xcomposite_cuda_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
-
- const int res = gsr_capture_xcomposite_start(&cap_xcomp->xcomposite, video_codec_context, frame);
- if(res != 0) {
- gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
- return res;
- }
-
- if(!gsr_cuda_load(&cap_xcomp->cuda, cap_xcomp->xcomposite.params.egl->x11.dpy, cap_xcomp->overclock)) {
- fprintf(stderr, "gsr error: gsr_capture_kms_cuda_start: failed to load cuda\n");
- gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- if(!cuda_create_codec_context(cap_xcomp->cuda.cu_ctx, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_xcomp->cuda_stream)) {
- gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- gsr_cuda_context cuda_context = {
- .cuda = &cap_xcomp->cuda,
- .cuda_graphics_resources = cap_xcomp->cuda_graphics_resources,
- .mapped_arrays = cap_xcomp->mapped_arrays
- };
-
- if(!gsr_capture_base_setup_cuda_textures(&cap_xcomp->xcomposite.base, frame, &cuda_context, cap_xcomp->xcomposite.params.color_range, GSR_SOURCE_COLOR_RGB, false)) {
- gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
- return -1;
- }
-
- return 0;
-}
-
-static void gsr_capture_xcomposite_unload_cuda_graphics(gsr_capture_xcomposite_cuda *cap_xcomp) {
- if(cap_xcomp->cuda.cu_ctx) {
- for(int i = 0; i < 2; ++i) {
- if(cap_xcomp->cuda_graphics_resources[i]) {
- cap_xcomp->cuda.cuGraphicsUnmapResources(1, &cap_xcomp->cuda_graphics_resources[i], 0);
- cap_xcomp->cuda.cuGraphicsUnregisterResource(cap_xcomp->cuda_graphics_resources[i]);
- cap_xcomp->cuda_graphics_resources[i] = 0;
- }
- }
- }
-}
-
-static void gsr_capture_xcomposite_cuda_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
- gsr_capture_xcomposite_stop(&cap_xcomp->xcomposite);
- gsr_capture_xcomposite_unload_cuda_graphics(cap_xcomp);
- gsr_cuda_unload(&cap_xcomp->cuda);
-}
-
-static void gsr_capture_xcomposite_cuda_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
- gsr_capture_xcomposite_tick(&cap_xcomp->xcomposite, video_codec_context);
-}
-
-static bool gsr_capture_xcomposite_cuda_is_damaged(gsr_capture *cap) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
- return gsr_capture_xcomposite_is_damaged(&cap_xcomp->xcomposite);
-}
-
-static void gsr_capture_xcomposite_cuda_clear_damage(gsr_capture *cap) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
- gsr_capture_xcomposite_clear_damage(&cap_xcomp->xcomposite);
-}
-
-static bool gsr_capture_xcomposite_cuda_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
- return gsr_capture_xcomposite_should_stop(&cap_xcomp->xcomposite, err);
-}
-
-static int gsr_capture_xcomposite_cuda_capture(gsr_capture *cap, AVFrame *frame) {
- gsr_capture_xcomposite_cuda *cap_xcomp = cap->priv;
-
- gsr_capture_xcomposite_capture(&cap_xcomp->xcomposite, frame);
-
- const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
- for(int i = 0; i < 2; ++i) {
- CUDA_MEMCPY2D memcpy_struct;
- memcpy_struct.srcXInBytes = 0;
- memcpy_struct.srcY = 0;
- memcpy_struct.srcMemoryType = CU_MEMORYTYPE_ARRAY;
-
- memcpy_struct.dstXInBytes = 0;
- memcpy_struct.dstY = 0;
- memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
-
- memcpy_struct.srcArray = cap_xcomp->mapped_arrays[i];
- memcpy_struct.srcPitch = frame->width / div[i];
- memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
- memcpy_struct.dstPitch = frame->linesize[i];
- memcpy_struct.WidthInBytes = frame->width;
- memcpy_struct.Height = frame->height / div[i];
- // TODO: Remove this copy if possible
- cap_xcomp->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, cap_xcomp->cuda_stream);
- }
-
- // TODO: needed?
- cap_xcomp->cuda.cuStreamSynchronize(cap_xcomp->cuda_stream);
-
- return 0;
-}
-
-static void gsr_capture_xcomposite_cuda_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
- if(cap->priv) {
- gsr_capture_xcomposite_cuda_stop(cap, video_codec_context);
- free(cap->priv);
- cap->priv = NULL;
- }
- free(cap);
-}
-
-gsr_capture* gsr_capture_xcomposite_cuda_create(const gsr_capture_xcomposite_cuda_params *params) {
- if(!params) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_cuda_create params is NULL\n");
- return NULL;
- }
-
- gsr_capture *cap = calloc(1, sizeof(gsr_capture));
- if(!cap)
- return NULL;
-
- gsr_capture_xcomposite_cuda *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_cuda));
- if(!cap_xcomp) {
- free(cap);
- return NULL;
- }
-
- gsr_capture_xcomposite_init(&cap_xcomp->xcomposite, &params->base);
- cap_xcomp->overclock = params->overclock;
-
- *cap = (gsr_capture) {
- .start = gsr_capture_xcomposite_cuda_start,
- .tick = gsr_capture_xcomposite_cuda_tick,
- .is_damaged = gsr_capture_xcomposite_cuda_is_damaged,
- .clear_damage = gsr_capture_xcomposite_cuda_clear_damage,
- .should_stop = gsr_capture_xcomposite_cuda_should_stop,
- .capture = gsr_capture_xcomposite_cuda_capture,
- .capture_end = NULL,
- .destroy = gsr_capture_xcomposite_cuda_destroy,
- .priv = cap_xcomp
- };
-
- return cap;
-}
diff --git a/src/capture/xcomposite_vaapi.c b/src/capture/xcomposite_vaapi.c
deleted file mode 100644
index 3f27014..0000000
--- a/src/capture/xcomposite_vaapi.c
+++ /dev/null
@@ -1,121 +0,0 @@
-#include "../../include/capture/xcomposite_vaapi.h"
-#include "../../include/capture/xcomposite.h"
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <va/va.h>
-#include <va/va_drmcommon.h>
-#include <libavcodec/avcodec.h>
-
-typedef struct {
- gsr_capture_xcomposite xcomposite;
-
- VADisplay va_dpy;
- VADRMPRIMESurfaceDescriptor prime;
-} gsr_capture_xcomposite_vaapi;
-
-static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context);
-
-static int gsr_capture_xcomposite_vaapi_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
-
- const int res = gsr_capture_xcomposite_start(&cap_xcomp->xcomposite, video_codec_context, frame);
- if(res != 0) {
- gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
- return res;
- }
-
- if(!drm_create_codec_context(cap_xcomp->xcomposite.params.egl->card_path, video_codec_context, video_codec_context->width, video_codec_context->height, false, &cap_xcomp->va_dpy)) {
- gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
- return -1;
- }
-
- if(!gsr_capture_base_setup_vaapi_textures(&cap_xcomp->xcomposite.base, frame, cap_xcomp->va_dpy, &cap_xcomp->prime, cap_xcomp->xcomposite.params.color_range)) {
- gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
- return -1;
- }
-
- return 0;
-}
-
-static void gsr_capture_xcomposite_vaapi_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
- gsr_capture_xcomposite_tick(&cap_xcomp->xcomposite, video_codec_context);
-}
-
-static bool gsr_capture_xcomposite_vaapi_is_damaged(gsr_capture *cap) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
- return gsr_capture_xcomposite_is_damaged(&cap_xcomp->xcomposite);
-}
-
-static void gsr_capture_xcomposite_vaapi_clear_damage(gsr_capture *cap) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
- gsr_capture_xcomposite_clear_damage(&cap_xcomp->xcomposite);
-}
-
-static bool gsr_capture_xcomposite_vaapi_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
- return gsr_capture_xcomposite_should_stop(&cap_xcomp->xcomposite, err);
-}
-
-static int gsr_capture_xcomposite_vaapi_capture(gsr_capture *cap, AVFrame *frame) {
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
- return gsr_capture_xcomposite_capture(&cap_xcomp->xcomposite, frame);
-}
-
-static void gsr_capture_xcomposite_vaapi_stop(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- gsr_capture_xcomposite_vaapi *cap_xcomp = cap->priv;
-
- for(uint32_t i = 0; i < cap_xcomp->prime.num_objects; ++i) {
- if(cap_xcomp->prime.objects[i].fd > 0) {
- close(cap_xcomp->prime.objects[i].fd);
- cap_xcomp->prime.objects[i].fd = 0;
- }
- }
-
- gsr_capture_xcomposite_stop(&cap_xcomp->xcomposite);
-}
-
-static void gsr_capture_xcomposite_vaapi_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
- if(cap->priv) {
- gsr_capture_xcomposite_vaapi_stop(cap, video_codec_context);
- free(cap->priv);
- cap->priv = NULL;
- }
- free(cap);
-}
-
-gsr_capture* gsr_capture_xcomposite_vaapi_create(const gsr_capture_xcomposite_vaapi_params *params) {
- if(!params) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_create params is NULL\n");
- return NULL;
- }
-
- gsr_capture *cap = calloc(1, sizeof(gsr_capture));
- if(!cap)
- return NULL;
-
- gsr_capture_xcomposite_vaapi *cap_xcomp = calloc(1, sizeof(gsr_capture_xcomposite_vaapi));
- if(!cap_xcomp) {
- free(cap);
- return NULL;
- }
-
- gsr_capture_xcomposite_init(&cap_xcomp->xcomposite, &params->base);
-
- *cap = (gsr_capture) {
- .start = gsr_capture_xcomposite_vaapi_start,
- .tick = gsr_capture_xcomposite_vaapi_tick,
- .is_damaged = gsr_capture_xcomposite_vaapi_is_damaged,
- .clear_damage = gsr_capture_xcomposite_vaapi_clear_damage,
- .should_stop = gsr_capture_xcomposite_vaapi_should_stop,
- .capture = gsr_capture_xcomposite_vaapi_capture,
- .capture_end = NULL,
- .destroy = gsr_capture_xcomposite_vaapi_destroy,
- .priv = cap_xcomp
- };
-
- return cap;
-}