aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/capture/capture.c22
-rw-r--r--src/capture/kms.c724
-rw-r--r--src/capture/nvfbc.c226
-rw-r--r--src/capture/portal.c458
-rw-r--r--src/capture/xcomposite.c242
-rw-r--r--src/codec_query/nvenc.c235
-rw-r--r--src/codec_query/vaapi.c203
-rw-r--r--src/codec_query/vulkan.c156
-rw-r--r--src/cursor.c62
-rw-r--r--src/damage.c324
-rw-r--r--src/dbus.c876
-rw-r--r--src/egl.c144
-rw-r--r--src/encoder/video/cuda.c67
-rw-r--r--src/encoder/video/software.c31
-rw-r--r--src/encoder/video/vaapi.c108
-rw-r--r--src/encoder/video/video.c4
-rw-r--r--src/encoder/video/vulkan.c313
-rw-r--r--src/main.cpp2359
-rw-r--r--src/overclock.c4
-rw-r--r--src/pipewire.c788
-rw-r--r--src/sound.cpp66
-rw-r--r--src/utils.c452
-rw-r--r--src/window_texture.c12
23 files changed, 6319 insertions, 1557 deletions
diff --git a/src/capture/capture.c b/src/capture/capture.c
index 40507bf..ec10854 100644
--- a/src/capture/capture.c
+++ b/src/capture/capture.c
@@ -10,10 +10,15 @@ int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVF
return res;
}
-void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
+void gsr_capture_tick(gsr_capture *cap) {
assert(cap->started);
if(cap->tick)
- cap->tick(cap, video_codec_context);
+ cap->tick(cap);
+}
+
+void gsr_capture_on_event(gsr_capture *cap, gsr_egl *egl) {
+ if(cap->on_event)
+ cap->on_event(cap, egl);
}
bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
@@ -29,12 +34,6 @@ int gsr_capture_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *
return cap->capture(cap, frame, color_conversion);
}
-void gsr_capture_capture_end(gsr_capture *cap, AVFrame *frame) {
- assert(cap->started);
- if(cap->capture_end)
- cap->capture_end(cap, frame);
-}
-
gsr_source_color gsr_capture_get_source_color(gsr_capture *cap) {
return cap->get_source_color(cap);
}
@@ -46,6 +45,13 @@ bool gsr_capture_uses_external_image(gsr_capture *cap) {
return false;
}
+bool gsr_capture_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ if(cap->set_hdr_metadata)
+ return cap->set_hdr_metadata(cap, mastering_display_metadata, light_metadata);
+ else
+ return false;
+}
+
void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
cap->destroy(cap, video_codec_context);
}
diff --git a/src/capture/kms.c b/src/capture/kms.c
index a9ce73c..8b16ec9 100644
--- a/src/capture/kms.c
+++ b/src/capture/kms.c
@@ -1,14 +1,21 @@
#include "../../include/capture/kms.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
+#include "../../include/cursor.h"
#include "../../kms/client/kms_client.h"
+#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
+#include <fcntl.h>
+
+#include <xf86drm.h>
+#include <libdrm/drm_fourcc.h>
#include <libavcodec/avcodec.h>
#include <libavutil/mastering_display_metadata.h>
+#include <libavformat/avformat.h>
#define HDMI_STATIC_METADATA_TYPE1 0
#define HDMI_EOTF_SMPTE_ST2084 2
@@ -22,9 +29,6 @@ typedef struct {
typedef struct {
gsr_capture_kms_params params;
-
- bool should_stop;
- bool stop_is_error;
gsr_kms_client kms_client;
gsr_kms_response kms_response;
@@ -33,62 +37,104 @@ typedef struct {
vec2i capture_size;
MonitorId monitor_id;
- AVMasteringDisplayMetadata *mastering_display_metadata;
- AVContentLightMetadata *light_metadata;
-
gsr_monitor_rotation monitor_rotation;
- unsigned int input_texture;
- unsigned int cursor_texture;
+ unsigned int input_texture_id;
+ unsigned int external_input_texture_id;
+ unsigned int cursor_texture_id;
+
+ bool no_modifiers_fallback;
+ bool external_texture_fallback;
+
+ struct hdr_output_metadata hdr_metadata;
+ bool hdr_metadata_set;
+
+ bool is_x11;
+ gsr_cursor x11_cursor;
+
+ AVCodecContext *video_codec_context;
+ bool performance_error_shown;
+ bool fast_path_failed;
+
+ //int drm_fd;
+ //uint64_t prev_sequence;
+ //bool damaged;
+
+ vec2i prev_target_pos;
+ vec2i prev_plane_size;
} gsr_capture_kms;
static void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
- for(int i = 0; i < self->kms_response.num_fds; ++i) {
- if(self->kms_response.fds[i].fd > 0)
- close(self->kms_response.fds[i].fd);
- self->kms_response.fds[i].fd = 0;
+ for(int i = 0; i < self->kms_response.num_items; ++i) {
+ for(int j = 0; j < self->kms_response.items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &self->kms_response.items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ self->kms_response.items[i].num_dma_bufs = 0;
}
- self->kms_response.num_fds = 0;
+ self->kms_response.num_items = 0;
}
static void gsr_capture_kms_stop(gsr_capture_kms *self) {
- if(self->input_texture) {
- self->params.egl->glDeleteTextures(1, &self->input_texture);
- self->input_texture = 0;
+ if(self->input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->input_texture_id);
+ self->input_texture_id = 0;
}
- if(self->cursor_texture) {
- self->params.egl->glDeleteTextures(1, &self->cursor_texture);
- self->cursor_texture = 0;
+ if(self->external_input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->external_input_texture_id);
+ self->external_input_texture_id = 0;
}
+ if(self->cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->cursor_texture_id);
+ self->cursor_texture_id = 0;
+ }
+
+ // if(self->drm_fd > 0) {
+ // close(self->drm_fd);
+ // self->drm_fd = -1;
+ // }
+
gsr_capture_kms_cleanup_kms_fds(self);
gsr_kms_client_deinit(&self->kms_client);
+ gsr_cursor_deinit(&self->x11_cursor);
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
-static void gsr_capture_kms_create_input_textures(gsr_capture_kms *self) {
- self->params.egl->glGenTextures(1, &self->input_texture);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
+static void gsr_capture_kms_create_input_texture_ids(gsr_capture_kms *self) {
+ self->params.egl->glGenTextures(1, &self->input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
- const bool cursor_texture_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
- const int cursor_texture_target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
-
- self->params.egl->glGenTextures(1, &self->cursor_texture);
- self->params.egl->glBindTexture(cursor_texture_target, self->cursor_texture);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->params.egl->glBindTexture(cursor_texture_target, 0);
+ self->params.egl->glGenTextures(1, &self->external_input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->external_input_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const int cursor_texture_id_target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+
+ self->params.egl->glGenTextures(1, &self->cursor_texture_id);
+ self->params.egl->glBindTexture(cursor_texture_id_target, self->cursor_texture_id);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(cursor_texture_id_target, 0);
}
/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
@@ -116,10 +162,19 @@ static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
fprintf(stderr, "gsr warning: reached max connector ids\n");
}
+static vec2i rotate_capture_size_if_rotated(gsr_capture_kms *self, vec2i capture_size) {
+ if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
+ int tmp_x = capture_size.x;
+ capture_size.x = capture_size.y;
+ capture_size.y = tmp_x;
+ }
+ return capture_size;
+}
+
static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_capture_kms *self = cap->priv;
- gsr_capture_kms_create_input_textures(self);
+ gsr_capture_kms_create_input_texture_ids(self);
gsr_monitor monitor;
self->monitor_id.num_connector_ids = 0;
@@ -128,14 +183,19 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
if(kms_init_res != 0)
return kms_init_res;
+ self->is_x11 = gsr_egl_get_display_server(self->params.egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = self->is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+ if(self->is_x11)
+ gsr_cursor_init(&self->x11_cursor, self->params.egl, self->params.egl->x11.dpy);
+
MonitorCallbackUserdata monitor_callback_userdata = {
&self->monitor_id,
self->params.display_to_capture, strlen(self->params.display_to_capture),
0,
};
- for_each_active_monitor_output(self->params.egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
+ for_each_active_monitor_output(self->params.egl, connection_type, monitor_callback, &monitor_callback_userdata);
- if(!get_monitor_by_name(self->params.egl, GSR_CONNECTION_DRM, self->params.display_to_capture, &monitor)) {
+ if(!get_monitor_by_name(self->params.egl, connection_type, self->params.display_to_capture, &monitor)) {
fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
gsr_capture_kms_stop(self);
return -1;
@@ -145,12 +205,11 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
self->monitor_rotation = drm_monitor_get_display_server_rotation(self->params.egl, &monitor);
self->capture_pos = monitor.pos;
- if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
- self->capture_size.x = monitor.size.y;
- self->capture_size.y = monitor.size.x;
- } else {
+ /* Monitor size is already rotated on x11 when the monitor is rotated, no need to apply it ourselves */
+ if(self->is_x11)
self->capture_size = monitor.size;
- }
+ else
+ self->capture_size = rotate_capture_size_if_rotated(self, monitor.size);
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
@@ -160,9 +219,41 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
+
+ self->video_codec_context = video_codec_context;
return 0;
}
+static void gsr_capture_kms_on_event(gsr_capture *cap, gsr_egl *egl) {
+ gsr_capture_kms *self = cap->priv;
+ if(!self->is_x11)
+ return;
+
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ gsr_cursor_on_event(&self->x11_cursor, xev);
+}
+
+// TODO: This is disabled for now because we want to be able to record at a framerate higher than the monitor framerate
+// static void gsr_capture_kms_tick(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+
+// if(self->drm_fd <= 0)
+// self->drm_fd = open(self->params.egl->card_path, O_RDONLY);
+
+// if(self->drm_fd <= 0)
+// return;
+
+// uint64_t sequence = 0;
+// uint64_t ns = 0;
+// if(drmCrtcGetSequence(self->drm_fd, 79, &sequence, &ns) != 0)
+// return;
+
+// if(sequence != self->prev_sequence) {
+// self->prev_sequence = sequence;
+// self->damaged = true;
+// }
+// }
+
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
switch(rot) {
case GSR_MONITOR_ROT_0: return 0.0f;
@@ -173,54 +264,40 @@ static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
return 0.0f;
}
-/* Prefer non combined planes */
-static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
- int index_combined = -1;
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
- if(kms_response->fds[i].is_combined_plane)
- index_combined = i;
- else
- return &kms_response->fds[i];
- }
- }
-
- if(index_combined != -1)
- return &kms_response->fds[index_combined];
- else
- return NULL;
-}
-
-static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].connector_id == connector_id && !kms_response->items[i].is_cursor)
+ return &kms_response->items[i];
}
return NULL;
}
-static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
- if(kms_response->num_fds == 0)
+static gsr_kms_response_item* find_largest_drm(gsr_kms_response *kms_response) {
+ if(kms_response->num_items == 0)
return NULL;
int64_t largest_size = 0;
- gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
- for(int i = 0; i < kms_response->num_fds; ++i) {
- const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
- if(size > largest_size && !kms_response->fds[i].is_cursor) {
+ gsr_kms_response_item *largest_drm = &kms_response->items[0];
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ const int64_t size = (int64_t)kms_response->items[i].width * (int64_t)kms_response->items[i].height;
+ if(size > largest_size && !kms_response->items[i].is_cursor) {
largest_size = size;
- largest_drm = &kms_response->fds[i];
+ largest_drm = &kms_response->items[i];
}
}
return largest_drm;
}
-static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_cursor_drm(gsr_kms_response *kms_response, uint32_t connector_id) {
+ gsr_kms_response_item *cursor_drm = NULL;
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].is_cursor) {
+ cursor_drm = &kms_response->items[i];
+ if(kms_response->items[i].connector_id == connector_id)
+ break;
+ }
}
- return NULL;
+ return cursor_drm;
}
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
@@ -229,33 +306,13 @@ static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *h
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
}
-static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
- if(!self->mastering_display_metadata)
- self->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
-
- if(!self->light_metadata)
- self->light_metadata = av_content_light_metadata_create_side_data(frame);
-
- if(self->mastering_display_metadata) {
- for(int i = 0; i < 3; ++i) {
- self->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
- self->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
- }
-
- self->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
- self->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
-
- self->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
- self->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
-
- self->mastering_display_metadata->has_primaries = self->mastering_display_metadata->display_primaries[0][0].num > 0;
- self->mastering_display_metadata->has_luminance = self->mastering_display_metadata->max_luminance.num > 0;
- }
+// TODO: Check if this hdr data can be changed after the call to av_packet_side_data_add
+static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ if(self->hdr_metadata_set)
+ return;
- if(self->light_metadata) {
- self->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
- self->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
- }
+ self->hdr_metadata_set = true;
+ self->hdr_metadata = drm_fd->hdr_metadata;
}
static vec2i swap_vec2i(vec2i value) {
@@ -265,33 +322,90 @@ static vec2i swap_vec2i(vec2i value) {
return value;
}
-static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
- gsr_capture_kms *self = cap->priv;
- const bool screen_plane_use_modifiers = self->params.egl->gpu_info.vendor != GSR_GPU_VENDOR_AMD;
- const bool cursor_texture_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+static EGLImage gsr_capture_kms_create_egl_image(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, drm_fd->pixel_format, drm_fd->width, drm_fd->height, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs, use_modifiers);
+ while(self->params.egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image || self->params.egl->eglGetError() != EGL_SUCCESS) {
+ if(image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ return NULL;
+ }
+ return image;
+}
- //egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->params.egl->glClear(0);
+static EGLImage gsr_capture_kms_create_egl_image_with_fallback(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
+ // Even ffmpeg kmsgrab causes this crash. The error is:
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
+ // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
+ // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
+ // Error: avcodec_send_frame failed, error: Input/output error
+ // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
+ // kms server info: kms client shutdown, shutting down the server
- gsr_capture_kms_cleanup_kms_fds(self);
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
- gsr_kms_response_fd *drm_fd = NULL;
- gsr_kms_response_fd *cursor_drm_fd = NULL;
- bool capture_is_combined_plane = false;
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
+ }
- if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
- return -1;
+ EGLImage image = NULL;
+ if(self->no_modifiers_fallback) {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
+ } else {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, true);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
+ self->no_modifiers_fallback = true;
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
+ }
}
+ return image;
+}
- if(self->kms_response.num_fds == 0) {
- static bool error_shown = false;
- if(!error_shown) {
- error_shown = true;
- fprintf(stderr, "gsr error: no drm found, capture will fail\n");
+static bool gsr_capture_kms_bind_image_to_texture(gsr_capture_kms *self, EGLImage image, unsigned int texture_id, bool external_texture) {
+ const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ while(self->params.egl->glGetError() != 0){}
+ self->params.egl->glBindTexture(texture_target, texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(texture_target, image);
+ const bool success = self->params.egl->glGetError() == 0;
+ self->params.egl->glBindTexture(texture_target, 0);
+ return success;
+}
+
+static void gsr_capture_kms_bind_image_to_input_texture_with_fallback(gsr_capture_kms *self, EGLImage image) {
+ if(self->external_texture_fallback) {
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
+ } else {
+ if(!gsr_capture_kms_bind_image_to_texture(self, image, self->input_texture_id, false)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to bind image to texture, trying with external texture\n");
+ self->external_texture_fallback = true;
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
}
- return -1;
}
+}
+
+static gsr_kms_response_item* find_monitor_drm(gsr_capture_kms *self, bool *capture_is_combined_plane) {
+ *capture_is_combined_plane = false;
+ gsr_kms_response_item *drm_fd = NULL;
for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
drm_fd = find_drm_by_connector_id(&self->kms_response, self->monitor_id.connector_ids[i]);
@@ -301,189 +415,278 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
// Will never happen on wayland unless the target monitor has been disconnected
if(!drm_fd) {
- drm_fd = find_first_combined_drm(&self->kms_response);
- if(!drm_fd)
- drm_fd = find_largest_drm(&self->kms_response);
- capture_is_combined_plane = true;
+ drm_fd = find_largest_drm(&self->kms_response);
+ *capture_is_combined_plane = true;
}
- cursor_drm_fd = find_cursor_drm(&self->kms_response);
-
- if(!drm_fd)
- return -1;
+ return drm_fd;
+}
- if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
+static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *self, uint32_t monitor_connector_id, bool capture_is_combined_plane) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm(&self->kms_response, monitor_connector_id);
+ if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != monitor_connector_id)
cursor_drm_fd = NULL;
+ return cursor_drm_fd;
+}
- if(drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
- gsr_kms_set_hdr_metadata(self, frame, drm_fd);
+static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation) {
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
- // Even ffmpeg kmsgrab causes this crash. The error is:
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
- // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
- // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
- // Error: avcodec_send_frame failed, error: Input/output error
- // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
- // kms server info: kms client shutdown, shutting down the server
- intptr_t img_attr[18] = {
- EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
- EGL_WIDTH, drm_fd->width,
- EGL_HEIGHT, drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
- };
+ vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
+ switch(self->monitor_rotation) {
+ case GSR_MONITOR_ROT_0:
+ break;
+ case GSR_MONITOR_ROT_90:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ break;
+ case GSR_MONITOR_ROT_180:
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ cursor_pos.y -= cursor_size.y;
+ break;
+ case GSR_MONITOR_ROT_270:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.y -= cursor_size.y;
+ break;
+ }
- if(screen_plane_use_modifiers) {
- img_attr[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- img_attr[13] = drm_fd->modifier & 0xFFFFFFFFULL;
+ cursor_pos.x += target_pos.x;
+ cursor_pos.y += target_pos.y;
- img_attr[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- img_attr[15] = drm_fd->modifier >> 32ULL;
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
- img_attr[16] = EGL_NONE;
- img_attr[17] = EGL_NONE;
- } else {
- img_attr[12] = EGL_NONE;
- img_attr[13] = EGL_NONE;
+ for(int i = 0; i < cursor_drm_fd->num_dma_bufs; ++i) {
+ fds[i] = cursor_drm_fd->dma_buf[i].fd;
+ offsets[i] = cursor_drm_fd->dma_buf[i].offset;
+ pitches[i] = cursor_drm_fd->dma_buf[i].pitch;
+ modifiers[i] = cursor_drm_fd->modifier;
}
- EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
- self->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
- self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+ intptr_t img_attr_cursor[44];
+ setup_dma_buf_attrs(img_attr_cursor, cursor_drm_fd->pixel_format, cursor_drm_fd->width, cursor_drm_fd->height,
+ fds, offsets, pitches, modifiers, cursor_drm_fd->num_dma_bufs, true);
- vec2i capture_pos = self->capture_pos;
- if(!capture_is_combined_plane)
- capture_pos = (vec2i){drm_fd->x, drm_fd->y};
+ EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
+ const int target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ self->params.egl->glBindTexture(target, self->cursor_texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
+ self->params.egl->glBindTexture(target, 0);
- const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ if(cursor_image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
- const int target_x = max_int(0, frame->width / 2 - self->capture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->capture_size.y / 2);
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
- gsr_color_conversion_draw(color_conversion, self->input_texture,
- (vec2i){target_x, target_y}, self->capture_size,
- capture_pos, self->capture_size,
- texture_rotation, false);
+ gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
+ cursor_pos, cursor_size,
+ (vec2i){0, 0}, cursor_size,
+ texture_rotation, cursor_texture_id_is_external);
- if(self->params.record_cursor && cursor_drm_fd) {
- const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
- switch(self->monitor_rotation) {
- case GSR_MONITOR_ROT_0:
- break;
- case GSR_MONITOR_ROT_90:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- break;
- case GSR_MONITOR_ROT_180:
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- cursor_pos.y -= cursor_size.y;
- break;
- case GSR_MONITOR_ROT_270:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.y -= cursor_size.y;
- break;
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos) {
+ if(!self->x11_cursor.visible)
+ return;
+
+ gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy));
+
+ const vec2i cursor_pos = {
+ target_pos.x + self->x11_cursor.position.x - self->x11_cursor.hotspot.x - capture_pos.x,
+ target_pos.y + self->x11_cursor.position.y - self->x11_cursor.hotspot.y - capture_pos.y
+ };
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+
+ gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
+ cursor_pos, self->x11_cursor.size,
+ (vec2i){0, 0}, self->x11_cursor.size,
+ 0.0f, false);
+
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void gsr_capture_kms_update_capture_size_change(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i target_pos, const gsr_kms_response_item *drm_fd) {
+ if(target_pos.x != self->prev_target_pos.x || target_pos.y != self->prev_target_pos.y || drm_fd->src_w != self->prev_plane_size.x || drm_fd->src_h != self->prev_plane_size.y) {
+ self->prev_target_pos = target_pos;
+ self->prev_plane_size = self->capture_size;
+ gsr_color_conversion_clear(color_conversion);
+ }
+}
+
+static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_capture_kms *self = cap->priv;
+
+ gsr_capture_kms_cleanup_kms_fds(self);
+
+ if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
+ return -1;
+ }
+
+ if(self->kms_response.num_items == 0) {
+ static bool error_shown = false;
+ if(!error_shown) {
+ error_shown = true;
+ fprintf(stderr, "gsr error: no drm found, capture will fail\n");
}
+ return -1;
+ }
- cursor_pos.x += target_x;
- cursor_pos.y += target_y;
-
- const intptr_t img_attr_cursor[] = {
- EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
- EGL_WIDTH, cursor_drm_fd->width,
- EGL_HEIGHT, cursor_drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
- EGL_NONE
- };
-
- EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
- const int target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
- self->params.egl->glBindTexture(target, self->cursor_texture);
- self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
- self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
- self->params.egl->glBindTexture(target, 0);
+ bool capture_is_combined_plane = false;
+ const gsr_kms_response_item *drm_fd = find_monitor_drm(self, &capture_is_combined_plane);
+ if(!drm_fd) {
+ gsr_capture_kms_cleanup_kms_fds(self);
+ return -1;
+ }
- self->params.egl->glEnable(GL_SCISSOR_TEST);
- self->params.egl->glScissor(target_x, target_y, self->capture_size.x, self->capture_size.y);
+ if(drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
+ gsr_kms_set_hdr_metadata(self, drm_fd);
- gsr_color_conversion_draw(color_conversion, self->cursor_texture,
- cursor_pos, cursor_size,
- (vec2i){0, 0}, cursor_size,
- texture_rotation, cursor_texture_is_external);
+ if(!self->performance_error_shown && self->monitor_rotation != GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ self->performance_error_shown = true;
+ fprintf(stderr,"gsr warning: gsr_capture_kms_capture: the monitor you are recording is rotated, composition will have to be used."
+ " If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n");
+ }
- self->params.egl->glDisable(GL_SCISSOR_TEST);
+ const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+ self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h });
+ gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd);
+
+ vec2i capture_pos = self->capture_pos;
+ if(!capture_is_combined_plane)
+ capture_pos = (vec2i){drm_fd->x, drm_fd->y};
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && self->monitor_rotation == GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, self->capture_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
}
- self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
-
- // TODO: Do software specific video encoder conversion here
+ if(self->fast_path_failed) {
+ EGLImage image = gsr_capture_kms_create_egl_image_with_fallback(self, drm_fd);
+ if(image) {
+ gsr_capture_kms_bind_image_to_input_texture_with_fallback(self, image);
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ }
- //self->params.egl->glFlush();
- //self->params.egl->glFinish();
+ gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
+ target_pos, self->capture_size,
+ capture_pos, self->capture_size,
+ texture_rotation, self->external_texture_fallback);
+ }
+
+ if(self->params.record_cursor) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm_if_on_monitor(self, drm_fd->connector_id, capture_is_combined_plane);
+ // The cursor is handled by x11 on x11 instead of using the cursor drm plane because on prime systems with a dedicated nvidia gpu
+ // the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
+ if(self->is_x11) {
+ const vec2i cursor_monitor_offset = self->capture_pos;
+ render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos);
+ } else if(cursor_drm_fd) {
+ render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation);
+ }
+ }
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ gsr_capture_kms_cleanup_kms_fds(self);
return 0;
}
static bool gsr_capture_kms_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_kms *cap_kms = cap->priv;
- if(cap_kms->should_stop) {
- if(err)
- *err = cap_kms->stop_is_error;
- return true;
- }
-
+ (void)cap;
if(err)
*err = false;
return false;
}
-static void gsr_capture_kms_capture_end(gsr_capture *cap, AVFrame *frame) {
- (void)frame;
- gsr_capture_kms_cleanup_kms_fds(cap->priv);
-}
-
static gsr_source_color gsr_capture_kms_get_source_color(gsr_capture *cap) {
(void)cap;
return GSR_SOURCE_COLOR_RGB;
}
static bool gsr_capture_kms_uses_external_image(gsr_capture *cap) {
- gsr_capture_kms *cap_kms = cap->priv;
- return cap_kms->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_kms_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ gsr_capture_kms *self = cap->priv;
+
+ if(!self->hdr_metadata_set)
+ return false;
+
+ light_metadata->MaxCLL = self->hdr_metadata.hdmi_metadata_type1.max_cll;
+ light_metadata->MaxFALL = self->hdr_metadata.hdmi_metadata_type1.max_fall;
+
+ for(int i = 0; i < 3; ++i) {
+ mastering_display_metadata->display_primaries[i][0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
+ mastering_display_metadata->display_primaries[i][1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
+ }
+
+ mastering_display_metadata->white_point[0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
+ mastering_display_metadata->white_point[1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
+
+ mastering_display_metadata->min_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
+ mastering_display_metadata->max_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
+
+ mastering_display_metadata->has_primaries = mastering_display_metadata->display_primaries[0][0].num > 0;
+ mastering_display_metadata->has_luminance = mastering_display_metadata->max_luminance.num > 0;
+
+ return true;
}
+// static bool gsr_capture_kms_is_damaged(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// return self->damaged;
+// }
+
+// static void gsr_capture_kms_clear_damage(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// self->damaged = false;
+// }
+
static void gsr_capture_kms_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
- gsr_capture_kms *cap_kms = cap->priv;
+ gsr_capture_kms *self = cap->priv;
if(cap->priv) {
- gsr_capture_kms_stop(cap_kms);
- free((void*)cap_kms->params.display_to_capture);
- cap_kms->params.display_to_capture = NULL;
+ gsr_capture_kms_stop(self);
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
free(cap->priv);
cap->priv = NULL;
}
@@ -518,12 +721,15 @@ gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params) {
*cap = (gsr_capture) {
.start = gsr_capture_kms_start,
- .tick = NULL,
+ .on_event = gsr_capture_kms_on_event,
+ //.tick = gsr_capture_kms_tick,
.should_stop = gsr_capture_kms_should_stop,
.capture = gsr_capture_kms_capture,
- .capture_end = gsr_capture_kms_capture_end,
.get_source_color = gsr_capture_kms_get_source_color,
.uses_external_image = gsr_capture_kms_uses_external_image,
+ .set_hdr_metadata = gsr_capture_kms_set_hdr_metadata,
+ //.is_damaged = gsr_capture_kms_is_damaged,
+ //.clear_damage = gsr_capture_kms_clear_damage,
.destroy = gsr_capture_kms_destroy,
.priv = cap_kms
};
diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c
index 9c7a041..ee77a20 100644
--- a/src/capture/nvfbc.c
+++ b/src/capture/nvfbc.c
@@ -1,6 +1,5 @@
#include "../../include/capture/nvfbc.h"
#include "../../external/NvFBC.h"
-#include "../../include/cuda.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
@@ -24,13 +23,8 @@ typedef struct {
bool fbc_handle_created;
bool capture_session_created;
- gsr_cuda cuda;
- CUgraphicsResource cuda_graphics_resources[2];
- CUarray mapped_arrays[2];
- CUstream cuda_stream; // TODO: asdasdsa
NVFBC_TOGL_SETUP_PARAMS setup_params;
- bool direct_capture;
bool supports_direct_cursor;
bool capture_region;
uint32_t x, y, width, height;
@@ -108,7 +102,7 @@ static void set_func_ptr(void **dst, void *src) {
}
static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
dlerror(); /* clear */
void *lib = dlopen("libnvidia-fbc.so.1", RTLD_LAZY);
@@ -117,23 +111,23 @@ static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
return false;
}
- set_func_ptr((void**)&cap_nvfbc->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
- if(!cap_nvfbc->nv_fbc_create_instance) {
+ set_func_ptr((void**)&self->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
+ if(!self->nv_fbc_create_instance) {
fprintf(stderr, "gsr error: unable to resolve symbol 'NvFBCCreateInstance'\n");
dlclose(lib);
return false;
}
- memset(&cap_nvfbc->nv_fbc_function_list, 0, sizeof(cap_nvfbc->nv_fbc_function_list));
- cap_nvfbc->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_create_instance(&cap_nvfbc->nv_fbc_function_list);
+ memset(&self->nv_fbc_function_list, 0, sizeof(self->nv_fbc_function_list));
+ self->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
+ NVFBCSTATUS status = self->nv_fbc_create_instance(&self->nv_fbc_function_list);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: failed to create NvFBC instance (status: %d)\n", status);
dlclose(lib);
return false;
}
- cap_nvfbc->library = lib;
+ self->library = lib;
return true;
}
@@ -159,64 +153,64 @@ static void set_vertical_sync_enabled(gsr_egl *egl, int enabled) {
fprintf(stderr, "gsr warning: setting vertical sync failed\n");
}
-static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created && cap_nvfbc->capture_session_created) {
+static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created && self->capture_session_created) {
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroy_capture_params;
memset(&destroy_capture_params, 0, sizeof(destroy_capture_params));
destroy_capture_params.dwVersion = NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyCaptureSession(cap_nvfbc->nv_fbc_handle, &destroy_capture_params);
- cap_nvfbc->capture_session_created = false;
+ self->nv_fbc_function_list.nvFBCDestroyCaptureSession(self->nv_fbc_handle, &destroy_capture_params);
+ self->capture_session_created = false;
}
}
-static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created) {
+static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created) {
NVFBC_DESTROY_HANDLE_PARAMS destroy_params;
memset(&destroy_params, 0, sizeof(destroy_params));
destroy_params.dwVersion = NVFBC_DESTROY_HANDLE_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyHandle(cap_nvfbc->nv_fbc_handle, &destroy_params);
- cap_nvfbc->fbc_handle_created = false;
- cap_nvfbc->nv_fbc_handle = 0;
+ self->nv_fbc_function_list.nvFBCDestroyHandle(self->nv_fbc_handle, &destroy_params);
+ self->fbc_handle_created = false;
+ self->nv_fbc_handle = 0;
}
}
-static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *cap_nvfbc) {
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
- gsr_capture_nvfbc_destroy_handle(cap_nvfbc);
+static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session(self);
+ gsr_capture_nvfbc_destroy_handle(self);
}
-static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) {
NVFBCSTATUS status;
NVFBC_CREATE_HANDLE_PARAMS create_params;
memset(&create_params, 0, sizeof(create_params));
create_params.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
create_params.bExternallyManagedContext = NVFBC_TRUE;
- create_params.glxCtx = cap_nvfbc->params.egl->glx_context;
- create_params.glxFBConfig = cap_nvfbc->params.egl->glx_fb_config;
+ create_params.glxCtx = self->params.egl->glx_context;
+ create_params.glxFBConfig = self->params.egl->glx_fb_config;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
// Reverse engineering for interoperability
const uint8_t enable_key[] = { 0xac, 0x10, 0xc9, 0x2e, 0xa5, 0xe6, 0x87, 0x4f, 0x8f, 0x4b, 0xf4, 0x61, 0xf8, 0x56, 0x27, 0xe9 };
create_params.privateData = enable_key;
create_params.privateDataSize = 16;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
}
- cap_nvfbc->fbc_handle_created = true;
+ self->fbc_handle_created = true;
NVFBC_GET_STATUS_PARAMS status_params;
memset(&status_params, 0, sizeof(status_params));
status_params.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCGetStatus(cap_nvfbc->nv_fbc_handle, &status_params);
+ status = self->nv_fbc_function_list.nvFBCGetStatus(self->nv_fbc_handle, &status_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
@@ -225,10 +219,10 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_type = strcmp(cap_nvfbc->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT) {
+ self->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_type = strcmp(self->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT) {
if(!status_params.bXRandRAvailable) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the xrandr extension is not available\n");
goto error_cleanup;
@@ -239,9 +233,9 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, cap_nvfbc->params.display_to_capture, &cap_nvfbc->tracking_width, &cap_nvfbc->tracking_height);
- if(cap_nvfbc->output_id == 0) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", cap_nvfbc->params.display_to_capture);
+ self->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, self->params.display_to_capture, &self->tracking_width, &self->tracking_height);
+ if(self->output_id == 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", self->params.display_to_capture);
goto error_cleanup;
}
}
@@ -249,92 +243,83 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
return 0;
error_cleanup:
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
return -1;
}
-static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *self) {
NVFBC_CREATE_CAPTURE_SESSION_PARAMS create_capture_params;
memset(&create_capture_params, 0, sizeof(create_capture_params));
create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
create_capture_params.eCaptureType = NVFBC_CAPTURE_TO_GL;
- create_capture_params.bWithCursor = (!cap_nvfbc->direct_capture || cap_nvfbc->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
- if(!cap_nvfbc->params.record_cursor)
+ create_capture_params.bWithCursor = (!self->params.direct_capture || self->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
+ if(!self->params.record_cursor)
create_capture_params.bWithCursor = false;
- if(cap_nvfbc->capture_region)
- create_capture_params.captureBox = (NVFBC_BOX){ cap_nvfbc->x, cap_nvfbc->y, cap_nvfbc->width, cap_nvfbc->height };
- create_capture_params.eTrackingType = cap_nvfbc->tracking_type;
- create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)cap_nvfbc->params.fps);
- create_capture_params.bAllowDirectCapture = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
- create_capture_params.bPushModel = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ if(self->capture_region)
+ create_capture_params.captureBox = (NVFBC_BOX){ self->x, self->y, self->width, self->height };
+ create_capture_params.eTrackingType = self->tracking_type;
+ create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)self->params.fps);
+ create_capture_params.bAllowDirectCapture = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ create_capture_params.bPushModel = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
create_capture_params.bDisableAutoModesetRecovery = true;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT)
- create_capture_params.dwOutputId = cap_nvfbc->output_id;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT)
+ create_capture_params.dwOutputId = self->output_id;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateCaptureSession(cap_nvfbc->nv_fbc_handle, &create_capture_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCCreateCaptureSession(self->nv_fbc_handle, &create_capture_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
return -1;
}
- cap_nvfbc->capture_session_created = true;
+ self->capture_session_created = true;
- memset(&cap_nvfbc->setup_params, 0, sizeof(cap_nvfbc->setup_params));
- cap_nvfbc->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
- cap_nvfbc->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
+ memset(&self->setup_params, 0, sizeof(self->setup_params));
+ self->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
+ self->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLSetUp(cap_nvfbc->nv_fbc_handle, &cap_nvfbc->setup_params);
+ status = self->nv_fbc_function_list.nvFBCToGLSetUp(self->nv_fbc_handle, &self->setup_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
+ gsr_capture_nvfbc_destroy_session(self);
return -1;
}
return 0;
}
-static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *cap_nvfbc) {
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
- gsr_cuda_unload(&cap_nvfbc->cuda);
- if(cap_nvfbc->library) {
- dlclose(cap_nvfbc->library);
- cap_nvfbc->library = NULL;
+static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
+ if(self->library) {
+ dlclose(self->library);
+ self->library = NULL;
}
- if(cap_nvfbc->params.display_to_capture) {
- free((void*)cap_nvfbc->params.display_to_capture);
- cap_nvfbc->params.display_to_capture = NULL;
+ if(self->params.display_to_capture) {
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
}
}
static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
- if(!cap_nvfbc->params.use_software_video_encoder) {
- if(!gsr_cuda_load(&cap_nvfbc->cuda, cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.overclock))
- return -1;
- }
-
- if(!gsr_capture_nvfbc_load_library(cap)) {
- gsr_cuda_unload(&cap_nvfbc->cuda);
+ if(!gsr_capture_nvfbc_load_library(cap))
return -1;
- }
- cap_nvfbc->x = max_int(cap_nvfbc->params.pos.x, 0);
- cap_nvfbc->y = max_int(cap_nvfbc->params.pos.y, 0);
- cap_nvfbc->width = max_int(cap_nvfbc->params.size.x, 0);
- cap_nvfbc->height = max_int(cap_nvfbc->params.size.y, 0);
+ self->x = max_int(self->params.pos.x, 0);
+ self->y = max_int(self->params.pos.y, 0);
+ self->width = max_int(self->params.size.x, 0);
+ self->height = max_int(self->params.size.y, 0);
- cap_nvfbc->capture_region = (cap_nvfbc->x > 0 || cap_nvfbc->y > 0 || cap_nvfbc->width > 0 || cap_nvfbc->height > 0);
+ self->capture_region = (self->x > 0 || self->y > 0 || self->width > 0 || self->height > 0);
- cap_nvfbc->supports_direct_cursor = false;
- bool direct_capture = cap_nvfbc->params.direct_capture;
+ self->supports_direct_cursor = false;
int driver_major_version = 0;
int driver_minor_version = 0;
- if(direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
+ if(self->params.direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
fprintf(stderr, "Info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
// TODO:
if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
- direct_capture = false;
+ self->params.direct_capture = false;
fprintf(stderr, "Warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
}
@@ -343,63 +328,63 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
/*
if(direct_capture) {
if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
- supports_direct_cursor = true;
+ self->supports_direct_cursor = true;
else
fprintf(stderr, "Info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
}
*/
}
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
goto error_cleanup;
}
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
goto error_cleanup;
}
- if(cap_nvfbc->capture_region) {
- video_codec_context->width = FFALIGN(cap_nvfbc->width, 2);
- video_codec_context->height = FFALIGN(cap_nvfbc->height, 2);
+ if(self->capture_region) {
+ video_codec_context->width = FFALIGN(self->width, 2);
+ video_codec_context->height = FFALIGN(self->height, 2);
} else {
- video_codec_context->width = FFALIGN(cap_nvfbc->tracking_width, 2);
- video_codec_context->height = FFALIGN(cap_nvfbc->tracking_height, 2);
+ video_codec_context->width = FFALIGN(self->tracking_width, 2);
+ video_codec_context->height = FFALIGN(self->tracking_height, 2);
}
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
/* Disable vsync */
- set_vertical_sync_enabled(cap_nvfbc->params.egl, 0);
+ set_vertical_sync_enabled(self->params.egl, 0);
return 0;
error_cleanup:
- gsr_capture_nvfbc_stop(cap_nvfbc);
+ gsr_capture_nvfbc_stop(self);
return -1;
}
static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
const double nvfbc_recreate_retry_time_seconds = 1.0;
- if(cap_nvfbc->nvfbc_needs_recreate) {
+ if(self->nvfbc_needs_recreate) {
const double now = clock_get_monotonic_seconds();
- if(now - cap_nvfbc->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
- cap_nvfbc->nvfbc_dead_start = now;
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ if(now - self->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
+ self->nvfbc_dead_start = now;
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc handle, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
-
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc session, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
- cap_nvfbc->nvfbc_needs_recreate = false;
+ self->nvfbc_needs_recreate = false;
} else {
return 0;
}
@@ -415,23 +400,24 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
grab_params.pFrameGrabInfo = &frame_info;
grab_params.dwTimeoutMs = 0;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLGrabFrame(cap_nvfbc->nv_fbc_handle, &grab_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCToGLGrabFrame(self->nv_fbc_handle, &grab_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
- cap_nvfbc->nvfbc_needs_recreate = true;
- cap_nvfbc->nvfbc_dead_start = clock_get_monotonic_seconds();
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
+ self->nvfbc_needs_recreate = true;
+ self->nvfbc_dead_start = clock_get_monotonic_seconds();
return 0;
}
- //cap_nvfbc->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- cap_nvfbc->params.egl->glClear(0);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
- gsr_color_conversion_draw(color_conversion, cap_nvfbc->setup_params.dwTextures[grab_params.dwTextureIndex],
+ gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
0.0f, false);
- cap_nvfbc->params.egl->glXSwapBuffers(cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.egl->x11.window);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
@@ -443,8 +429,9 @@ static gsr_source_color gsr_capture_nvfbc_get_source_color(gsr_capture *cap) {
static void gsr_capture_nvfbc_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
- gsr_capture_nvfbc_stop(cap_nvfbc);
+ gsr_capture_nvfbc *self = cap->priv;
+ gsr_capture_nvfbc_stop(self);
+ free(cap->priv);
free(cap);
}
@@ -479,13 +466,12 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
cap_nvfbc->params = *params;
cap_nvfbc->params.display_to_capture = display_to_capture;
cap_nvfbc->params.fps = max_int(cap_nvfbc->params.fps, 1);
-
+
*cap = (gsr_capture) {
.start = gsr_capture_nvfbc_start,
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_nvfbc_capture,
- .capture_end = NULL,
.get_source_color = gsr_capture_nvfbc_get_source_color,
.uses_external_image = NULL,
.destroy = gsr_capture_nvfbc_destroy,
diff --git a/src/capture/portal.c b/src/capture/portal.c
new file mode 100644
index 0000000..9ab7e8b
--- /dev/null
+++ b/src/capture/portal.c
@@ -0,0 +1,458 @@
+#include "../../include/capture/portal.h"
+#include "../../include/color_conversion.h"
+#include "../../include/egl.h"
+#include "../../include/utils.h"
+#include "../../include/dbus.h"
+#include "../../include/pipewire.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_portal_params params;
+
+ gsr_texture_map texture_map;
+
+ gsr_dbus dbus;
+ char *session_handle;
+
+ gsr_pipewire pipewire;
+ vec2i capture_size;
+ gsr_pipewire_dmabuf_data dmabuf_data[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ int num_dmabuf_data;
+
+ AVCodecContext *video_codec_context;
+ bool fast_path_failed;
+} gsr_capture_portal;
+
+static void gsr_capture_portal_cleanup_plane_fds(gsr_capture_portal *self) {
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = 0;
+ }
+ }
+ self->num_dmabuf_data = 0;
+}
+
+static void gsr_capture_portal_stop(gsr_capture_portal *self) {
+ if(self->texture_map.texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.texture_id);
+ self->texture_map.texture_id = 0;
+ }
+
+ if(self->texture_map.external_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.external_texture_id);
+ self->texture_map.external_texture_id = 0;
+ }
+
+ if(self->texture_map.cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.cursor_texture_id);
+ self->texture_map.cursor_texture_id = 0;
+ }
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ gsr_pipewire_deinit(&self->pipewire);
+
+ if(self->session_handle) {
+ free(self->session_handle);
+ self->session_handle = NULL;
+ }
+
+ gsr_dbus_deinit(&self->dbus);
+}
+
+static void gsr_capture_portal_create_input_textures(gsr_capture_portal *self) {
+ self->params.egl->glGenTextures(1, &self->texture_map.texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.external_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_map.external_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.cursor_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.cursor_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+}
+
+static void get_default_gpu_screen_recorder_restore_token_path(char *buffer, size_t buffer_size) {
+ const char *xdg_config_home = getenv("XDG_CONFIG_HOME");
+ if(xdg_config_home) {
+ snprintf(buffer, buffer_size, "%s/gpu-screen-recorder/restore_token", xdg_config_home);
+ } else {
+ const char *home = getenv("HOME");
+ if(!home)
+ home = "/tmp";
+ snprintf(buffer, buffer_size, "%s/.config/gpu-screen-recorder/restore_token", home);
+ }
+}
+
+static bool create_directory_to_file(const char *filepath) {
+ char dir[PATH_MAX];
+ dir[0] = '\0';
+
+ const char *split = strrchr(filepath, '/');
+ if(!split) /* Assuming it's the current directory (for example if filepath is "restore_token"), which doesn't need to be created */
+ return true;
+
+ snprintf(dir, sizeof(dir), "%.*s", (int)(split - filepath), filepath);
+ if(create_directory_recursive(dir) != 0) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create directory (%s) for restore token\n", dir);
+ return false;
+ }
+ return true;
+}
+
+static void gsr_capture_portal_save_restore_token(const char *restore_token, const char *portal_session_token_filepath) {
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ if(!create_directory_to_file(restore_token_path))
+ return;
+
+ FILE *f = fopen(restore_token_path, "wb");
+ if(!f) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create restore token file (%s)\n", restore_token_path);
+ return;
+ }
+
+ const int restore_token_len = strlen(restore_token);
+ if((long)fwrite(restore_token, 1, restore_token_len, f) != restore_token_len) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to write restore token to file (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_save_restore_token: saved restore token to cache (%s)\n", restore_token);
+ fclose(f);
+}
+
+static void gsr_capture_portal_get_restore_token_from_cache(char *buffer, size_t buffer_size, const char *portal_session_token_filepath) {
+ assert(buffer_size > 0);
+ buffer[0] = '\0';
+
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ FILE *f = fopen(restore_token_path, "rb");
+ if(!f) {
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: no restore token found in cache or failed to load (%s)\n", restore_token_path);
+ return;
+ }
+
+ fseek(f, 0, SEEK_END);
+ long file_size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ if(file_size > 0 && file_size < 1024 && file_size < (long)buffer_size && (long)fread(buffer, 1, file_size, f) != file_size) {
+ buffer[0] = '\0';
+ fprintf(stderr, "gsr warning: gsr_capture_portal_get_restore_token_from_cache: failed to read restore token (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ if(file_size > 0 && file_size < (long)buffer_size)
+ buffer[file_size] = '\0';
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: read cached restore token (%s)\n", buffer);
+ fclose(f);
+}
+
+static int gsr_capture_portal_setup_dbus(gsr_capture_portal *self, int *pipewire_fd, uint32_t *pipewire_node) {
+ *pipewire_fd = 0;
+ *pipewire_node = 0;
+ int response_status = 0;
+
+ char restore_token[1024];
+ restore_token[0] = '\0';
+ if(self->params.restore_portal_session)
+ gsr_capture_portal_get_restore_token_from_cache(restore_token, sizeof(restore_token), self->params.portal_session_token_filepath);
+
+ if(!gsr_dbus_init(&self->dbus, restore_token))
+ return -1;
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: CreateSession\n");
+ response_status = gsr_dbus_screencast_create_session(&self->dbus, &self->session_handle);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: CreateSession failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: SelectSources\n");
+ response_status = gsr_dbus_screencast_select_sources(&self->dbus, self->session_handle, GSR_PORTAL_CAPTURE_TYPE_ALL, self->params.record_cursor ? GSR_PORTAL_CURSOR_MODE_EMBEDDED : GSR_PORTAL_CURSOR_MODE_HIDDEN);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: SelectSources failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: Start\n");
+ response_status = gsr_dbus_screencast_start(&self->dbus, self->session_handle, pipewire_node);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: Start failed\n");
+ return response_status;
+ }
+
+ const char *screencast_restore_token = gsr_dbus_screencast_get_restore_token(&self->dbus);
+ if(screencast_restore_token)
+ gsr_capture_portal_save_restore_token(screencast_restore_token, self->params.portal_session_token_filepath);
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: OpenPipeWireRemote\n");
+ if(!gsr_dbus_screencast_open_pipewire_remote(&self->dbus, self->session_handle, pipewire_fd)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: OpenPipeWireRemote failed\n");
+ return -1;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: desktop portal setup finished\n");
+ return 0;
+}
+
+static bool gsr_capture_portal_get_frame_dimensions(gsr_capture_portal *self) {
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: waiting for pipewire negotiation\n");
+
+ const double start_time = clock_get_monotonic_seconds();
+ while(clock_get_monotonic_seconds() - start_time < 5.0) {
+ bool uses_external_image = false;
+ uint32_t fourcc = 0;
+ uint64_t modifiers = 0;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &fourcc, &modifiers, &uses_external_image)) {
+ gsr_capture_portal_cleanup_plane_fds(self);
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire negotiation finished\n");
+ return true;
+ }
+ usleep(30 * 1000); /* 30 milliseconds */
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: timed out waiting for pipewire negotiation (5 seconds)\n");
+ return false;
+}
+
+static int gsr_capture_portal_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_portal *self = cap->priv;
+
+ gsr_capture_portal_create_input_textures(self);
+
+ int pipewire_fd = 0;
+ uint32_t pipewire_node = 0;
+ const int response_status = gsr_capture_portal_setup_dbus(self, &pipewire_fd, &pipewire_node);
+ if(response_status != 0) {
+ gsr_capture_portal_stop(self);
+ // Response status values:
+ // 0: Success, the request is carried out
+ // 1: The user cancelled the interaction
+ // 2: The user interaction was ended in some other way
+ // Response status value 2 happens usually if there was some kind of error in the desktop portal on the system
+ if(response_status == 2) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. Either you Wayland compositor doesn't support desktop portal capture or it's incorrectly setup on your system\n");
+ return 50;
+ } else if(response_status == 1) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. It seems like desktop portal capture was canceled by the user.\n");
+ return 60;
+ } else {
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: setting up pipewire\n");
+ /* TODO: support hdr when pipewire supports it */
+ /* gsr_pipewire closes the pipewire fd, even on failure */
+ if(!gsr_pipewire_init(&self->pipewire, pipewire_fd, pipewire_node, video_codec_context->framerate.num, self->params.record_cursor, self->params.egl)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: failed to setup pipewire with fd: %d, node: %" PRIu32 "\n", pipewire_fd, pipewire_node);
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire setup finished\n");
+
+ if(!gsr_capture_portal_get_frame_dimensions(self)) {
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+
+ /* Disable vsync */
+ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
+
+ video_codec_context->width = FFALIGN(self->capture_size.x, 2);
+ video_codec_context->height = FFALIGN(self->capture_size.y, 2);
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ self->video_codec_context = video_codec_context;
+ return 0;
+}
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ (void)frame;
+ (void)color_conversion;
+ gsr_capture_portal *self = cap->priv;
+
+ /* TODO: Handle formats other than RGB(a) */
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ uint32_t pipewire_fourcc = 0;
+ uint64_t pipewire_modifiers = 0;
+ bool using_external_image = false;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &pipewire_fourcc, &pipewire_modifiers, &using_external_image)) {
+ if(region.width != self->capture_size.x || region.height != self->capture_size.y) {
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ gsr_color_conversion_clear(color_conversion);
+ }
+ } else {
+ return 0;
+ }
+
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ // TODO: Handle region crop
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ fds[i] = self->dmabuf_data[i].fd;
+ offsets[i] = self->dmabuf_data[i].offset;
+ pitches[i] = self->dmabuf_data[i].stride;
+ modifiers[i] = pipewire_modifiers;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, self->capture_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
+
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
+ target_pos, self->capture_size,
+ (vec2i){region.x, region.y}, self->capture_size,
+ 0.0f, using_external_image);
+ }
+
+ if(self->params.record_cursor) {
+ const vec2i cursor_pos = {
+ target_pos.x + cursor_region.x,
+ target_pos.y + cursor_region.y
+ };
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+ gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
+ (vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width, cursor_region.height},
+ (vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
+ 0.0f, false);
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+ }
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ return 0;
+}
+
+static gsr_source_color gsr_capture_portal_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_RGB;
+}
+
+static bool gsr_capture_portal_uses_external_image(gsr_capture *cap) {
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_portal_is_damaged(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ return gsr_pipewire_is_damaged(&self->pipewire);
+}
+
+static void gsr_capture_portal_clear_damage(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ gsr_pipewire_clear_damage(&self->pipewire);
+}
+
+static void gsr_capture_portal_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_portal *self = cap->priv;
+ if(cap->priv) {
+ gsr_capture_portal_stop(self);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_portal *cap_portal = calloc(1, sizeof(gsr_capture_portal));
+ if(!cap_portal) {
+ free(cap);
+ return NULL;
+ }
+
+ cap_portal->params = *params;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_portal_start,
+ .tick = NULL,
+ .should_stop = NULL,
+ .capture = gsr_capture_portal_capture,
+ .get_source_color = gsr_capture_portal_get_source_color,
+ .uses_external_image = gsr_capture_portal_uses_external_image,
+ .is_damaged = gsr_capture_portal_is_damaged,
+ .clear_damage = gsr_capture_portal_clear_damage,
+ .destroy = gsr_capture_portal_destroy,
+ .priv = cap_portal
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
index f5d2b2f..9e208d6 100644
--- a/src/capture/xcomposite.c
+++ b/src/capture/xcomposite.c
@@ -10,19 +10,18 @@
#include <assert.h>
#include <X11/Xlib.h>
-#include <X11/extensions/Xdamage.h>
#include <libavutil/frame.h>
#include <libavcodec/avcodec.h>
typedef struct {
gsr_capture_xcomposite_params params;
- XEvent xev;
bool should_stop;
bool stop_is_error;
bool window_resized;
bool follow_focused_initialized;
+ bool init_new_window;
Window window;
vec2i window_size;
@@ -30,25 +29,17 @@ typedef struct {
double window_resize_timer;
WindowTexture window_texture;
+ AVCodecContext *video_codec_context;
Atom net_active_window_atom;
gsr_cursor cursor;
- int damage_event;
- int damage_error;
- XID damage;
- bool damaged;
-
bool clear_background;
+ bool fast_path_failed;
} gsr_capture_xcomposite;
static void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
window_texture_deinit(&self->window_texture);
gsr_cursor_deinit(&self->cursor);
}
@@ -71,23 +62,6 @@ static Window get_focused_window(Display *display, Atom net_active_window_atom)
return None;
}
-static void gsr_capture_xcomposite_setup_damage(gsr_capture_xcomposite *self, Window window) {
- if(self->damage_event == 0)
- return;
-
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
- self->damage = XDamageCreate(self->params.egl->x11.dpy, window, XDamageReportNonEmpty);
- if(self->damage) {
- XDamageSubtract(self->params.egl->x11.dpy, self->damage, None, None);
- } else {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_setup_damage: XDamageCreate failed\n");
- }
-}
-
static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_capture_xcomposite *self = cap->priv;
@@ -102,20 +76,6 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
self->window = self->params.window;
}
- if(self->params.track_damage) {
- if(!XDamageQueryExtension(self->params.egl->x11.dpy, &self->damage_event, &self->damage_error)) {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_start: XDamage is not supported by your X11 server\n");
- self->damage_event = 0;
- self->damage_error = 0;
- }
- } else {
- self->damage_event = 0;
- self->damage_error = 0;
- }
-
- self->damaged = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
-
/* TODO: Do these in tick, and allow error if follow_focused */
XWindowAttributes attr;
@@ -133,16 +93,6 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
// TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
XSelectInput(self->params.egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
- if(!self->params.egl->eglExportDMABUFImageQueryMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageQueryMESA\n");
- return -1;
- }
-
- if(!self->params.egl->eglExportDMABUFImageMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageMESA\n");
- return -1;
- }
-
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
if(window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
@@ -174,75 +124,20 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
+ self->video_codec_context = video_codec_context;
self->window_resize_timer = clock_get_monotonic_seconds();
return 0;
}
-static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
+static void gsr_capture_xcomposite_tick(gsr_capture *cap) {
gsr_capture_xcomposite *self = cap->priv;
- bool init_new_window = false;
- while(XPending(self->params.egl->x11.dpy)) {
- XNextEvent(self->params.egl->x11.dpy, &self->xev);
-
- switch(self->xev.type) {
- case DestroyNotify: {
- /* Window died (when not following focused window), so we stop recording */
- if(!self->params.follow_focused && self->xev.xdestroywindow.window == self->window) {
- self->should_stop = true;
- self->stop_is_error = false;
- }
- break;
- }
- case Expose: {
- /* Requires window texture recreate */
- if(self->xev.xexpose.count == 0 && self->xev.xexpose.window == self->window) {
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case ConfigureNotify: {
- /* Window resized */
- if(self->xev.xconfigure.window == self->window && (self->xev.xconfigure.width != self->window_size.x || self->xev.xconfigure.height != self->window_size.y)) {
- self->window_size.x = max_int(self->xev.xconfigure.width, 0);
- self->window_size.y = max_int(self->xev.xconfigure.height, 0);
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case PropertyNotify: {
- /* Focused window changed */
- if(self->params.follow_focused && self->xev.xproperty.atom == self->net_active_window_atom) {
- init_new_window = true;
- }
- break;
- }
- }
-
- if(self->damage_event && self->xev.type == self->damage_event + XDamageNotify) {
- XDamageNotifyEvent *de = (XDamageNotifyEvent*)&self->xev;
- XserverRegion region = XFixesCreateRegion(self->params.egl->x11.dpy, NULL, 0);
- // Subtract all the damage, repairing the window
- XDamageSubtract(self->params.egl->x11.dpy, de->damage, None, region);
- XFixesDestroyRegion(self->params.egl->x11.dpy, region);
- self->damaged = true;
- }
-
- if(gsr_cursor_update(&self->cursor, &self->xev)) {
- if(self->params.record_cursor && self->cursor.visible) {
- self->damaged = true;
- }
- }
- }
-
if(self->params.follow_focused && !self->follow_focused_initialized) {
- init_new_window = true;
+ self->init_new_window = true;
}
- if(init_new_window) {
+ if(self->init_new_window) {
+ self->init_new_window = false;
Window focused_window = get_focused_window(self->params.egl->x11.dpy, self->net_active_window_atom);
if(focused_window != self->window || !self->follow_focused_initialized) {
self->follow_focused_initialized = true;
@@ -272,7 +167,6 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
self->window_resized = false;
self->clear_background = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
}
}
@@ -296,18 +190,49 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->clear_background = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
}
}
-static bool gsr_capture_xcomposite_is_damaged(gsr_capture *cap) {
+static void gsr_capture_xcomposite_on_event(gsr_capture *cap, gsr_egl *egl) {
gsr_capture_xcomposite *self = cap->priv;
- return self->damage_event ? self->damaged : true;
-}
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ switch(xev->type) {
+ case DestroyNotify: {
+ /* Window died (when not following focused window), so we stop recording */
+ if(!self->params.follow_focused && xev->xdestroywindow.window == self->window) {
+ self->should_stop = true;
+ self->stop_is_error = false;
+ }
+ break;
+ }
+ case Expose: {
+ /* Requires window texture recreate */
+ if(xev->xexpose.count == 0 && xev->xexpose.window == self->window) {
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case ConfigureNotify: {
+ /* Window resized */
+ if(xev->xconfigure.window == self->window && (xev->xconfigure.width != self->window_size.x || xev->xconfigure.height != self->window_size.y)) {
+ self->window_size.x = max_int(xev->xconfigure.width, 0);
+ self->window_size.y = max_int(xev->xconfigure.height, 0);
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case PropertyNotify: {
+ /* Focused window changed */
+ if(self->params.follow_focused && xev->xproperty.atom == self->net_active_window_atom) {
+ self->init_new_window = true;
+ }
+ break;
+ }
+ }
-static void gsr_capture_xcomposite_clear_damage(gsr_capture *cap) {
- gsr_capture_xcomposite *self = cap->priv;
- self->damaged = false;
+ gsr_cursor_on_event(&self->cursor, xev);
}
static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
@@ -327,55 +252,54 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
gsr_capture_xcomposite *self = cap->priv;
(void)frame;
- //self->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->params.egl->glClear(0);
-
if(self->clear_background) {
self->clear_background = false;
gsr_color_conversion_clear(color_conversion);
}
- const int target_x = max_int(0, frame->width / 2 - self->texture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->texture_size.y / 2);
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->texture_size.x / 2), max_int(0, frame->height / 2 - self->texture_size.y / 2) };
- const vec2i cursor_pos = {
- target_x + self->cursor.position.x - self->cursor.hotspot.x,
- target_y + self->cursor.position.y - self->cursor.hotspot.y
- };
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, self->texture_size, self->video_codec_context, frame)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
- gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
- (vec2i){target_x, target_y}, self->texture_size,
- (vec2i){0, 0}, self->texture_size,
- 0.0f, false);
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
+ target_pos, self->texture_size,
+ (vec2i){0, 0}, self->texture_size,
+ 0.0f, false);
+ }
if(self->params.record_cursor && self->cursor.visible) {
gsr_cursor_tick(&self->cursor, self->window);
- const bool cursor_inside_window =
- cursor_pos.x + self->cursor.size.x >= target_x &&
- cursor_pos.x <= target_x + self->texture_size.x &&
- cursor_pos.y + self->cursor.size.y >= target_y &&
- cursor_pos.y <= target_y + self->texture_size.y;
+ const vec2i cursor_pos = {
+ target_pos.x + self->cursor.position.x - self->cursor.hotspot.x,
+ target_pos.y + self->cursor.position.y - self->cursor.hotspot.y
+ };
- if(cursor_inside_window) {
- self->params.egl->glEnable(GL_SCISSOR_TEST);
- self->params.egl->glScissor(target_x, target_y, self->texture_size.x, self->texture_size.y);
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->texture_size.x, self->texture_size.y);
- gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
- cursor_pos, self->cursor.size,
- (vec2i){0, 0}, self->cursor.size,
- 0.0f, false);
+ gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
+ cursor_pos, self->cursor.size,
+ (vec2i){0, 0}, self->cursor.size,
+ 0.0f, false);
- self->params.egl->glDisable(GL_SCISSOR_TEST);
- }
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
}
- self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
-
- // TODO: Do video encoder specific conversion here
-
- //self->params.egl->glFlush();
- //self->params.egl->glFinish();
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
@@ -385,6 +309,11 @@ static gsr_source_color gsr_capture_xcomposite_get_source_color(gsr_capture *cap
return GSR_SOURCE_COLOR_RGB;
}
+static uint64_t gsr_capture_xcomposite_get_window_id(gsr_capture *cap) {
+ gsr_capture_xcomposite *self = cap->priv;
+ return self->window;
+}
+
static void gsr_capture_xcomposite_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
if(cap->priv) {
@@ -415,14 +344,13 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
*cap = (gsr_capture) {
.start = gsr_capture_xcomposite_start,
+ .on_event = gsr_capture_xcomposite_on_event,
.tick = gsr_capture_xcomposite_tick,
- .is_damaged = gsr_capture_xcomposite_is_damaged,
- .clear_damage = gsr_capture_xcomposite_clear_damage,
.should_stop = gsr_capture_xcomposite_should_stop,
.capture = gsr_capture_xcomposite_capture,
- .capture_end = NULL,
.get_source_color = gsr_capture_xcomposite_get_source_color,
.uses_external_image = NULL,
+ .get_window_id = gsr_capture_xcomposite_get_window_id,
.destroy = gsr_capture_xcomposite_destroy,
.priv = cap_xcomp
};
diff --git a/src/codec_query/nvenc.c b/src/codec_query/nvenc.c
new file mode 100644
index 0000000..0501851
--- /dev/null
+++ b/src/codec_query/nvenc.c
@@ -0,0 +1,235 @@
+#include "../../include/codec_query/nvenc.h"
+#include "../../include/cuda.h"
+#include "../../external/nvEncodeAPI.h"
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include <string.h>
+
+static void* open_nvenc_library(void) {
+ dlerror(); /* clear */
+ void *lib = dlopen("libnvidia-encode.so.1", RTLD_LAZY);
+ if(!lib) {
+ lib = dlopen("libnvidia-encode.so", RTLD_LAZY);
+ if(!lib) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc failed: failed to load libnvidia-encode.so/libnvidia-encode.so.1, error: %s\n", dlerror());
+ return NULL;
+ }
+ }
+ return lib;
+}
+
+static bool profile_is_h264(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_H264_PROFILE_BASELINE_GUID,
+ &NV_ENC_H264_PROFILE_MAIN_GUID,
+ &NV_ENC_H264_PROFILE_HIGH_GUID,
+ &NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID,
+ &NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID
+ };
+
+ for(int i = 0; i < 5; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_hevc(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_HEVC_PROFILE_MAIN_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_hevc_10bit(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_HEVC_PROFILE_MAIN10_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_av1(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_AV1_PROFILE_MAIN_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool encoder_get_supported_profiles(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, const GUID *encoder_guid, gsr_supported_video_codecs *supported_video_codecs) {
+ bool success = false;
+ GUID *profile_guids = NULL;
+
+ uint32_t profile_guid_count = 0;
+ if(function_list->nvEncGetEncodeProfileGUIDCount(nvenc_encoder, *encoder_guid, &profile_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ if(profile_guid_count == 0)
+ goto fail;
+
+ profile_guids = calloc(profile_guid_count, sizeof(GUID));
+ if(!profile_guids) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)profile_guid_count);
+ goto fail;
+ }
+
+ if(function_list->nvEncGetEncodeProfileGUIDs(nvenc_encoder, *encoder_guid, profile_guids, profile_guid_count, &profile_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ for(uint32_t i = 0; i < profile_guid_count; ++i) {
+ if(profile_is_h264(&profile_guids[i])) {
+ supported_video_codecs->h264 = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_hevc(&profile_guids[i])) {
+ supported_video_codecs->hevc = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_hevc_10bit(&profile_guids[i])) {
+ supported_video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_av1(&profile_guids[i])) {
+ supported_video_codecs->av1 = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->av1_hdr = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->av1_10bit = (gsr_supported_video_codec){ true, false };
+ }
+ }
+
+ success = true;
+ fail:
+
+ if(profile_guids)
+ free(profile_guids);
+
+ return success;
+}
+
+static bool get_supported_video_codecs(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, gsr_supported_video_codecs *supported_video_codecs) {
+ bool success = false;
+ GUID *encoder_guids = NULL;
+ *supported_video_codecs = (gsr_supported_video_codecs){0};
+
+ uint32_t encode_guid_count = 0;
+ if(function_list->nvEncGetEncodeGUIDCount(nvenc_encoder, &encode_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ if(encode_guid_count == 0)
+ goto fail;
+
+ encoder_guids = calloc(encode_guid_count, sizeof(GUID));
+ if(!encoder_guids) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)encode_guid_count);
+ goto fail;
+ }
+
+ if(function_list->nvEncGetEncodeGUIDs(nvenc_encoder, encoder_guids, encode_guid_count, &encode_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ for(uint32_t i = 0; i < encode_guid_count; ++i) {
+ encoder_get_supported_profiles(function_list, nvenc_encoder, &encoder_guids[i], supported_video_codecs);
+ }
+
+ success = true;
+ fail:
+
+ if(encoder_guids)
+ free(encoder_guids);
+
+ return success;
+}
+
+#define NVENCAPI_VERSION_470 (11 | (1 << 24))
+#define NVENCAPI_STRUCT_VERSION_470(ver) ((uint32_t)NVENCAPI_VERSION_470 | ((ver)<<16) | (0x7 << 28))
+
+bool gsr_get_supported_video_codecs_nvenc(gsr_supported_video_codecs *video_codecs, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+
+ bool success = false;
+ void *nvenc_lib = NULL;
+ void *nvenc_encoder = NULL;
+ gsr_cuda cuda;
+ memset(&cuda, 0, sizeof(cuda));
+
+ if(!gsr_cuda_load(&cuda, NULL, false)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to load cuda\n");
+ goto done;
+ }
+
+ nvenc_lib = open_nvenc_library();
+ if(!nvenc_lib)
+ goto done;
+
+ typedef NVENCSTATUS NVENCAPI (*FUNC_NvEncodeAPICreateInstance)(NV_ENCODE_API_FUNCTION_LIST *functionList);
+ FUNC_NvEncodeAPICreateInstance nvEncodeAPICreateInstance = (FUNC_NvEncodeAPICreateInstance)dlsym(nvenc_lib, "NvEncodeAPICreateInstance");
+ if(!nvEncodeAPICreateInstance) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to find NvEncodeAPICreateInstance in libnvidia-encode.so\n");
+ goto done;
+ }
+
+ NV_ENCODE_API_FUNCTION_LIST function_list;
+ memset(&function_list, 0, sizeof(function_list));
+ function_list.version = NVENCAPI_STRUCT_VERSION(2);
+ if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance failed\n");
+ goto done;
+ }
+
+ NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params;
+ memset(&params, 0, sizeof(params));
+ params.version = NVENCAPI_STRUCT_VERSION(1);
+ params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
+ params.device = cuda.cu_ctx;
+ params.apiVersion = NVENCAPI_VERSION;
+ if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
+ // Old nvidia gpus dont support the new nvenc api (which is required for av1).
+ // In such cases fallback to old api version if possible and try again.
+ function_list.version = NVENCAPI_STRUCT_VERSION_470(2);
+ if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance (retry) failed\n");
+ goto done;
+ }
+
+ params.version = NVENCAPI_STRUCT_VERSION_470(1);
+ params.apiVersion = NVENCAPI_VERSION_470;
+ if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncOpenEncodeSessionEx (retry) failed\n");
+ goto done;
+ }
+ }
+
+ success = get_supported_video_codecs(&function_list, nvenc_encoder, video_codecs);
+
+ done:
+ if(cleanup) {
+ if(nvenc_encoder)
+ function_list.nvEncDestroyEncoder(nvenc_encoder);
+ if(nvenc_lib)
+ dlclose(nvenc_lib);
+ gsr_cuda_unload(&cuda);
+ }
+
+ return success;
+}
diff --git a/src/codec_query/vaapi.c b/src/codec_query/vaapi.c
new file mode 100644
index 0000000..2c74d96
--- /dev/null
+++ b/src/codec_query/vaapi.c
@@ -0,0 +1,203 @@
+#include "../../include/codec_query/vaapi.h"
+#include "../../include/utils.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <va/va.h>
+#include <va/va_drm.h>
+
+static bool profile_is_h264(VAProfile profile) {
+ switch(profile) {
+ case 5: // VAProfileH264Baseline
+ case VAProfileH264Main:
+ case VAProfileH264High:
+ case VAProfileH264ConstrainedBaseline:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_hevc_8bit(VAProfile profile) {
+ switch(profile) {
+ case VAProfileHEVCMain:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_hevc_10bit(VAProfile profile) {
+ switch(profile) {
+ case VAProfileHEVCMain10:
+ //case VAProfileHEVCMain12:
+ //case VAProfileHEVCMain422_10:
+ //case VAProfileHEVCMain422_12:
+ //case VAProfileHEVCMain444:
+ //case VAProfileHEVCMain444_10:
+ //case VAProfileHEVCMain444_12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_av1(VAProfile profile) {
+ switch(profile) {
+ case VAProfileAV1Profile0:
+ case VAProfileAV1Profile1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_vp8(VAProfile profile) {
+ switch(profile) {
+ case VAProfileVP8Version0_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_vp9(VAProfile profile) {
+ switch(profile) {
+ case VAProfileVP9Profile0:
+ case VAProfileVP9Profile1:
+ case VAProfileVP9Profile2:
+ case VAProfileVP9Profile3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_supports_video_encoding(VADisplay va_dpy, VAProfile profile, bool *low_power) {
+ *low_power = false;
+ int num_entrypoints = vaMaxNumEntrypoints(va_dpy);
+ if(num_entrypoints <= 0)
+ return false;
+
+ VAEntrypoint *entrypoint_list = calloc(num_entrypoints, sizeof(VAEntrypoint));
+ if(!entrypoint_list)
+ return false;
+
+ bool supports_encoding = false;
+ bool supports_low_power_encoding = false;
+ if(vaQueryConfigEntrypoints(va_dpy, profile, entrypoint_list, &num_entrypoints) == VA_STATUS_SUCCESS) {
+ for(int i = 0; i < num_entrypoints; ++i) {
+ if(entrypoint_list[i] == VAEntrypointEncSlice)
+ supports_encoding = true;
+ else if(entrypoint_list[i] == VAEntrypointEncSliceLP)
+ supports_low_power_encoding = true;
+ }
+ }
+
+ if(!supports_encoding && supports_low_power_encoding)
+ *low_power = true;
+
+ free(entrypoint_list);
+ return supports_encoding || supports_low_power_encoding;
+}
+
+static bool get_supported_video_codecs(VADisplay va_dpy, gsr_supported_video_codecs *video_codecs, bool cleanup) {
+ *video_codecs = (gsr_supported_video_codecs){0};
+ bool success = false;
+ VAProfile *profile_list = NULL;
+
+ vaSetInfoCallback(va_dpy, NULL, NULL);
+
+ int va_major = 0;
+ int va_minor = 0;
+ if(vaInitialize(va_dpy, &va_major, &va_minor) != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: vaInitialize failed\n");
+ goto fail;
+ }
+
+ int num_profiles = vaMaxNumProfiles(va_dpy);
+ if(num_profiles <= 0)
+ goto fail;
+
+ profile_list = calloc(num_profiles, sizeof(VAProfile));
+ if(!profile_list || vaQueryConfigProfiles(va_dpy, profile_list, &num_profiles) != VA_STATUS_SUCCESS)
+ goto fail;
+
+ for(int i = 0; i < num_profiles; ++i) {
+ bool low_power = false;
+ if(profile_is_h264(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->h264 = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_hevc_8bit(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->hevc = (gsr_supported_video_codec){ true, low_power };
+ } else if(profile_is_hevc_10bit(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_av1(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->av1 = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->av1_hdr = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->av1_10bit = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_vp8(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->vp8 = (gsr_supported_video_codec){ true, low_power };
+ } else if(profile_is_vp9(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->vp9 = (gsr_supported_video_codec){ true, low_power };
+ }
+ }
+
+ success = true;
+ fail:
+ if(profile_list)
+ free(profile_list);
+
+ if(cleanup)
+ vaTerminate(va_dpy);
+
+ return success;
+}
+
+bool gsr_get_supported_video_codecs_vaapi(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+ bool success = false;
+ int drm_fd = -1;
+
+ char render_path[128];
+ if(!gsr_card_path_get_render_path(card_path, render_path)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
+ goto done;
+ }
+
+ drm_fd = open(render_path, O_RDWR);
+ if(drm_fd == -1) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to open device %s\n", render_path);
+ goto done;
+ }
+
+ VADisplay va_dpy = vaGetDisplayDRM(drm_fd);
+ if(va_dpy) {
+ if(!get_supported_video_codecs(va_dpy, video_codecs, cleanup)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to query supported video codecs for device %s\n", render_path);
+ goto done;
+ }
+ success = true;
+ }
+
+ done:
+ if(cleanup) {
+ if(drm_fd > 0)
+ close(drm_fd);
+ }
+
+ return success;
+}
diff --git a/src/codec_query/vulkan.c b/src/codec_query/vulkan.c
new file mode 100644
index 0000000..15dd98b
--- /dev/null
+++ b/src/codec_query/vulkan.c
@@ -0,0 +1,156 @@
+#include "../../include/codec_query/vulkan.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <xf86drm.h>
+#define VK_NO_PROTOTYPES
+//#include <vulkan/vulkan.h>
+
+#define MAX_PHYSICAL_DEVICES 32
+
+static const char *required_device_extensions[] = {
+ "VK_KHR_external_memory_fd",
+ "VK_KHR_external_semaphore_fd",
+ "VK_KHR_video_encode_queue",
+ "VK_KHR_video_queue",
+ "VK_KHR_video_maintenance1",
+ "VK_EXT_external_memory_dma_buf",
+ "VK_EXT_external_memory_host",
+ "VK_EXT_image_drm_format_modifier"
+};
+static int num_required_device_extensions = 8;
+
+bool gsr_get_supported_video_codecs_vulkan(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+#if 0
+ bool success = false;
+ VkInstance instance = NULL;
+ VkPhysicalDevice physical_devices[MAX_PHYSICAL_DEVICES];
+ VkDevice device = NULL;
+ VkExtensionProperties *device_extensions = NULL;
+
+ const VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = "GPU Screen Recorder",
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "GPU Screen Recorder",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_3,
+ };
+
+ const VkInstanceCreateInfo instance_create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info
+ };
+
+ if(vkCreateInstance(&instance_create_info, NULL, &instance) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateInstance failed\n");
+ goto done;
+ }
+
+ uint32_t num_devices = 0;
+ if(vkEnumeratePhysicalDevices(instance, &num_devices, NULL) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (query num devices) failed\n");
+ goto done;
+ }
+
+ if(num_devices == 0) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: no vulkan capable device found\n");
+ goto done;
+ }
+
+ if(num_devices > MAX_PHYSICAL_DEVICES)
+ num_devices = MAX_PHYSICAL_DEVICES;
+
+ if(vkEnumeratePhysicalDevices(instance, &num_devices, physical_devices) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (get data) failed\n");
+ goto done;
+ }
+
+ VkPhysicalDevice physical_device = NULL;
+ char device_card_path[128];
+ for(uint32_t i = 0; i < num_devices; ++i) {
+ VkPhysicalDeviceDrmPropertiesEXT device_drm_properties = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT
+ };
+
+ VkPhysicalDeviceProperties2 device_properties = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+ .pNext = &device_drm_properties
+ };
+ vkGetPhysicalDeviceProperties2(physical_devices[i], &device_properties);
+
+ if(!device_drm_properties.hasPrimary)
+ continue;
+
+ snprintf(device_card_path, sizeof(device_card_path), DRM_DEV_NAME, DRM_DIR_NAME, (int)device_drm_properties.primaryMinor);
+ if(strcmp(device_card_path, card_path) == 0) {
+ physical_device = physical_devices[i];
+ break;
+ }
+ }
+
+ if(!physical_device) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to find a vulkan device that matches opengl device %s\n", card_path);
+ goto done;
+ }
+
+ const VkDeviceCreateInfo device_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .enabledExtensionCount = num_required_device_extensions,
+ .ppEnabledExtensionNames = required_device_extensions
+ };
+
+ if(vkCreateDevice(physical_device, &device_create_info, NULL, &device) != VK_SUCCESS) {
+ //fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateDevice failed. Device %s likely doesn't support vulkan video encoding\n", card_path);
+ goto done;
+ }
+
+ uint32_t num_device_extensions = 0;
+ if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, NULL) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (query num device extensions) failed\n");
+ goto done;
+ }
+
+ device_extensions = calloc(num_device_extensions, sizeof(VkExtensionProperties));
+ if(!device_extensions) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to allocate %d device extensions\n", num_device_extensions);
+ goto done;
+ }
+
+ if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, device_extensions) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (get data) failed\n");
+ goto done;
+ }
+
+ for(uint32_t i = 0; i < num_device_extensions; ++i) {
+ if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h264") == 0) {
+ video_codecs->h264 = true;
+ } else if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h265") == 0) {
+ // TODO: Verify if 10bit and hdr are actually supported
+ video_codecs->hevc = true;
+ video_codecs->hevc_10bit = true;
+ video_codecs->hevc_hdr = true;
+ }
+ }
+
+ success = true;
+
+ done:
+ if(cleanup) {
+ if(device)
+ vkDestroyDevice(device, NULL);
+ if(instance)
+ vkDestroyInstance(instance, NULL);
+ }
+ if(device_extensions)
+ free(device_extensions);
+ return success;
+#else
+ // TODO: Low power query
+ video_codecs->h264 = (gsr_supported_video_codec){ true, false };
+ video_codecs->hevc = (gsr_supported_video_codec){ true, false };
+ return true;
+#endif
+}
diff --git a/src/cursor.c b/src/cursor.c
index 9825ad2..3dca0c6 100644
--- a/src/cursor.c
+++ b/src/cursor.c
@@ -6,8 +6,6 @@
#include <assert.h>
#include <X11/extensions/Xfixes.h>
-#include <X11/extensions/XI2.h>
-#include <X11/extensions/XInput2.h>
// TODO: Test cursor visibility with XFixesHideCursor
@@ -52,6 +50,7 @@ static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorI
}
}
+ // TODO: glTextureSubImage2D if same size
self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->size.x, self->size.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, cursor_data);
free(cursor_data);
@@ -71,26 +70,6 @@ static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorI
return false;
}
-static bool xinput_is_supported(Display *dpy, int *xi_opcode) {
- *xi_opcode = 0;
- int query_event = 0;
- int query_error = 0;
- if(!XQueryExtension(dpy, "XInputExtension", xi_opcode, &query_event, &query_error)) {
- fprintf(stderr, "gsr error: gsr_cursor_init: X Input extension not available\n");
- return false;
- }
-
- int major = 2;
- int minor = 1;
- int retval = XIQueryVersion(dpy, &major, &minor);
- if (retval != Success) {
- fprintf(stderr, "gsr error: gsr_cursor_init: XInput 2.1 is not supported\n");
- return false;
- }
-
- return true;
-}
-
int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display) {
int x_fixes_error_base = 0;
@@ -107,31 +86,11 @@ int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display) {
return -1;
}
- if(!xinput_is_supported(self->display, &self->xi_opcode)) {
- gsr_cursor_deinit(self);
- return -1;
- }
-
- unsigned char mask[XIMaskLen(XI_LASTEVENT)];
- memset(mask, 0, sizeof(mask));
- XISetMask(mask, XI_RawMotion);
-
- XIEventMask xi_masks;
- xi_masks.deviceid = XIAllMasterDevices;
- xi_masks.mask_len = sizeof(mask);
- xi_masks.mask = mask;
- if(XISelectEvents(self->display, DefaultRootWindow(self->display), &xi_masks, 1) != Success) {
- fprintf(stderr, "gsr error: gsr_cursor_init: XISelectEvents failed\n");
- gsr_cursor_deinit(self);
- return -1;
- }
-
self->egl->glGenTextures(1, &self->texture_id);
XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), XFixesDisplayCursorNotifyMask);
gsr_cursor_set_from_x11_cursor_image(self, XFixesGetCursorImage(self->display), &self->visible);
self->cursor_image_set = true;
- self->cursor_moved = true;
return 0;
}
@@ -145,23 +104,15 @@ void gsr_cursor_deinit(gsr_cursor *self) {
self->texture_id = 0;
}
- XISelectEvents(self->display, DefaultRootWindow(self->display), NULL, 0);
- XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), 0);
+ if(self->display)
+ XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), 0);
self->display = NULL;
self->egl = NULL;
}
-bool gsr_cursor_update(gsr_cursor *self, XEvent *xev) {
+bool gsr_cursor_on_event(gsr_cursor *self, XEvent *xev) {
bool updated = false;
- XGenericEventCookie *cookie = (XGenericEventCookie*)&xev->xcookie;
- const Bool got_event_data = XGetEventData(self->display, cookie);
- if(got_event_data && cookie->type == GenericEvent && cookie->extension == self->xi_opcode && cookie->evtype == XI_RawMotion) {
- updated = true;
- self->cursor_moved = true;
- }
- if(got_event_data)
- XFreeEventData(self->display, cookie);
if(xev->type == self->x_fixes_event_base + XFixesCursorNotify) {
XFixesCursorNotifyEvent *cursor_notify_event = (XFixesCursorNotifyEvent*)xev;
@@ -180,11 +131,6 @@ bool gsr_cursor_update(gsr_cursor *self, XEvent *xev) {
}
void gsr_cursor_tick(gsr_cursor *self, Window relative_to) {
- if(!self->cursor_moved)
- return;
-
- self->cursor_moved = false;
-
Window dummy_window;
int dummy_i;
unsigned int dummy_u;
diff --git a/src/damage.c b/src/damage.c
new file mode 100644
index 0000000..8e62762
--- /dev/null
+++ b/src/damage.c
@@ -0,0 +1,324 @@
+#include "../include/damage.h"
+#include "../include/utils.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <X11/extensions/Xdamage.h>
+#include <X11/extensions/Xrandr.h>
+
+typedef struct {
+ vec2i pos;
+ vec2i size;
+} gsr_rectangle;
+
+static bool rectangles_intersect(gsr_rectangle rect1, gsr_rectangle rect2) {
+ return rect1.pos.x < rect2.pos.x + rect2.size.x && rect1.pos.x + rect1.size.x > rect2.pos.x &&
+ rect1.pos.y < rect2.pos.y + rect2.size.y && rect1.pos.y + rect1.size.y > rect2.pos.y;
+}
+
+static bool xrandr_is_supported(Display *display) {
+ int major_version = 0;
+ int minor_version = 0;
+ if(!XRRQueryVersion(display, &major_version, &minor_version))
+ return false;
+
+ return major_version > 1 || (major_version == 1 && minor_version >= 2);
+}
+
+bool gsr_damage_init(gsr_damage *self, gsr_egl *egl, bool track_cursor) {
+ memset(self, 0, sizeof(*self));
+ self->egl = egl;
+ self->track_cursor = track_cursor;
+
+ if(gsr_egl_get_display_server(egl) != GSR_DISPLAY_SERVER_X11) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: damage tracking is not supported on wayland\n");
+ return false;
+ }
+
+ if(!XDamageQueryExtension(self->egl->x11.dpy, &self->damage_event, &self->damage_error)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: XDamage is not supported by your X11 server\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(!XRRQueryExtension(self->egl->x11.dpy, &self->randr_event, &self->randr_error)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: XRandr is not supported by your X11 server\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(!xrandr_is_supported(self->egl->x11.dpy)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: your X11 randr version is too old\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(self->track_cursor)
+ self->track_cursor = gsr_cursor_init(&self->cursor, self->egl, self->egl->x11.dpy) == 0;
+
+ XRRSelectInput(self->egl->x11.dpy, DefaultRootWindow(self->egl->x11.dpy), RRScreenChangeNotifyMask | RRCrtcChangeNotifyMask | RROutputChangeNotifyMask);
+
+ self->damaged = true;
+ return true;
+}
+
+void gsr_damage_deinit(gsr_damage *self) {
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ gsr_cursor_deinit(&self->cursor);
+
+ self->damage_event = 0;
+ self->damage_error = 0;
+
+ self->randr_event = 0;
+ self->randr_error = 0;
+}
+
+bool gsr_damage_set_target_window(gsr_damage *self, uint64_t window) {
+ if(self->damage_event == 0)
+ return false;
+
+ if(window == self->window)
+ return true;
+
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ if(self->window)
+ XSelectInput(self->egl->x11.dpy, self->window, 0);
+
+ self->window = window;
+ XSelectInput(self->egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
+
+ XWindowAttributes win_attr;
+ win_attr.x = 0;
+ win_attr.y = 0;
+ win_attr.width = 0;
+ win_attr.height = 0;
+ if(!XGetWindowAttributes(self->egl->x11.dpy, self->window, &win_attr))
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_window failed: failed to get window attributes: %ld\n", (long)self->window);
+
+ //self->window_pos.x = win_attr.x;
+ //self->window_pos.y = win_attr.y;
+
+ self->window_size.x = win_attr.width;
+ self->window_size.y = win_attr.height;
+
+ self->damage = XDamageCreate(self->egl->x11.dpy, window, XDamageReportNonEmpty);
+ if(self->damage) {
+ XDamageSubtract(self->egl->x11.dpy, self->damage, None, None);
+ self->damaged = true;
+ self->track_type = GSR_DAMAGE_TRACK_WINDOW;
+ return true;
+ } else {
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_window: XDamageCreate failed\n");
+ self->track_type = GSR_DAMAGE_TRACK_NONE;
+ return false;
+ }
+}
+
+bool gsr_damage_set_target_monitor(gsr_damage *self, const char *monitor_name) {
+ if(self->damage_event == 0)
+ return false;
+
+ if(strcmp(self->monitor_name, monitor_name) == 0)
+ return true;
+
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ memset(&self->monitor, 0, sizeof(self->monitor));
+ if(strcmp(monitor_name, "screen") != 0 && strcmp(monitor_name, "screen-direct") != 0 && strcmp(monitor_name, "screen-direct-force") != 0) {
+ if(!get_monitor_by_name(self->egl, GSR_CONNECTION_X11, monitor_name, &self->monitor))
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_monitor: failed to find monitor: %s\n", monitor_name);
+ }
+
+ if(self->window)
+ XSelectInput(self->egl->x11.dpy, self->window, 0);
+
+ self->window = DefaultRootWindow(self->egl->x11.dpy);
+ self->damage = XDamageCreate(self->egl->x11.dpy, self->window, XDamageReportNonEmpty);
+ if(self->damage) {
+ XDamageSubtract(self->egl->x11.dpy, self->damage, None, None);
+ self->damaged = true;
+ snprintf(self->monitor_name, sizeof(self->monitor_name), "%s", monitor_name);
+ self->track_type = GSR_DAMAGE_TRACK_MONITOR;
+ return true;
+ } else {
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_monitor: XDamageCreate failed\n");
+ self->track_type = GSR_DAMAGE_TRACK_NONE;
+ return false;
+ }
+}
+
+static void gsr_damage_on_crtc_change(gsr_damage *self, XEvent *xev) {
+ const XRRCrtcChangeNotifyEvent *rr_crtc_change_event = (XRRCrtcChangeNotifyEvent*)xev;
+ if(rr_crtc_change_event->crtc == 0 || self->monitor.monitor_identifier == 0)
+ return;
+
+ if(rr_crtc_change_event->crtc != self->monitor.monitor_identifier)
+ return;
+
+ if(rr_crtc_change_event->width == 0 || rr_crtc_change_event->height == 0)
+ return;
+
+ if(rr_crtc_change_event->x != self->monitor.pos.x || rr_crtc_change_event->y != self->monitor.pos.y ||
+ (int)rr_crtc_change_event->width != self->monitor.size.x || (int)rr_crtc_change_event->height != self->monitor.size.y) {
+ self->monitor.pos.x = rr_crtc_change_event->x;
+ self->monitor.pos.y = rr_crtc_change_event->y;
+
+ self->monitor.size.x = rr_crtc_change_event->width;
+ self->monitor.size.y = rr_crtc_change_event->height;
+ }
+}
+
+static void gsr_damage_on_output_change(gsr_damage *self, XEvent *xev) {
+ const XRROutputChangeNotifyEvent *rr_output_change_event = (XRROutputChangeNotifyEvent*)xev;
+ if(!rr_output_change_event->output || self->monitor.monitor_identifier == 0)
+ return;
+
+ XRRScreenResources *screen_res = XRRGetScreenResources(self->egl->x11.dpy, DefaultRootWindow(self->egl->x11.dpy));
+ if(!screen_res)
+ return;
+
+ XRROutputInfo *out_info = XRRGetOutputInfo(self->egl->x11.dpy, screen_res, rr_output_change_event->output);
+ if(out_info && out_info->crtc && out_info->crtc == self->monitor.monitor_identifier) {
+ XRRCrtcInfo *crtc_info = XRRGetCrtcInfo(self->egl->x11.dpy, screen_res, out_info->crtc);
+ if(crtc_info && (crtc_info->x != self->monitor.pos.x || crtc_info->y != self->monitor.pos.y ||
+ (int)crtc_info->width != self->monitor.size.x || (int)crtc_info->height != self->monitor.size.y))
+ {
+ self->monitor.pos.x = crtc_info->x;
+ self->monitor.pos.y = crtc_info->y;
+
+ self->monitor.size.x = crtc_info->width;
+ self->monitor.size.y = crtc_info->height;
+ }
+
+ if(crtc_info)
+ XRRFreeCrtcInfo(crtc_info);
+ }
+
+ if(out_info)
+ XRRFreeOutputInfo(out_info);
+
+ XRRFreeScreenResources(screen_res);
+}
+
+static void gsr_damage_on_randr_event(gsr_damage *self, XEvent *xev) {
+ const XRRNotifyEvent *rr_event = (XRRNotifyEvent*)xev;
+ switch(rr_event->subtype) {
+ case RRNotify_CrtcChange:
+ gsr_damage_on_crtc_change(self, xev);
+ break;
+ case RRNotify_OutputChange:
+ gsr_damage_on_output_change(self, xev);
+ break;
+ }
+}
+
+static void gsr_damage_on_damage_event(gsr_damage *self, XEvent *xev) {
+ const XDamageNotifyEvent *de = (XDamageNotifyEvent*)xev;
+ XserverRegion region = XFixesCreateRegion(self->egl->x11.dpy, NULL, 0);
+ /* Subtract all the damage, repairing the window */
+ XDamageSubtract(self->egl->x11.dpy, de->damage, None, region);
+
+ if(self->track_type == GSR_DAMAGE_TRACK_WINDOW || (self->track_type == GSR_DAMAGE_TRACK_MONITOR && self->monitor.connector_id == 0)) {
+ self->damaged = true;
+ } else {
+ int num_rectangles = 0;
+ XRectangle *rectangles = XFixesFetchRegion(self->egl->x11.dpy, region, &num_rectangles);
+ if(rectangles) {
+ const gsr_rectangle monitor_region = { self->monitor.pos, self->monitor.size };
+ for(int i = 0; i < num_rectangles; ++i) {
+ const gsr_rectangle damage_region = { (vec2i){rectangles[i].x, rectangles[i].y}, (vec2i){rectangles[i].width, rectangles[i].height} };
+ self->damaged = rectangles_intersect(monitor_region, damage_region);
+ if(self->damaged)
+ break;
+ }
+ XFree(rectangles);
+ }
+ }
+
+ XFixesDestroyRegion(self->egl->x11.dpy, region);
+ XFlush(self->egl->x11.dpy);
+}
+
+static void gsr_damage_on_tick_cursor(gsr_damage *self) {
+ vec2i prev_cursor_pos = self->cursor.position;
+ gsr_cursor_tick(&self->cursor, self->window);
+ if(self->cursor.position.x != prev_cursor_pos.x || self->cursor.position.y != prev_cursor_pos.y) {
+ const gsr_rectangle cursor_region = { self->cursor.position, self->cursor.size };
+ switch(self->track_type) {
+ case GSR_DAMAGE_TRACK_NONE: {
+ self->damaged = true;
+ break;
+ }
+ case GSR_DAMAGE_TRACK_WINDOW: {
+ const gsr_rectangle window_region = { (vec2i){0, 0}, self->window_size };
+ self->damaged = self->window_size.x == 0 || rectangles_intersect(window_region, cursor_region);
+ break;
+ }
+ case GSR_DAMAGE_TRACK_MONITOR: {
+ const gsr_rectangle monitor_region = { self->monitor.pos, self->monitor.size };
+ self->damaged = self->monitor.monitor_identifier == 0 || rectangles_intersect(monitor_region, cursor_region);
+ break;
+ }
+ }
+ }
+}
+
+static void gsr_damage_on_window_configure_notify(gsr_damage *self, XEvent *xev) {
+ if(xev->xconfigure.window != self->window)
+ return;
+
+ //self->window_pos.x = xev->xconfigure.x;
+ //self->window_pos.y = xev->xconfigure.y;
+
+ self->window_size.x = xev->xconfigure.width;
+ self->window_size.y = xev->xconfigure.height;
+}
+
+void gsr_damage_on_event(gsr_damage *self, XEvent *xev) {
+ if(self->damage_event == 0 || self->track_type == GSR_DAMAGE_TRACK_NONE)
+ return;
+
+ if(self->track_type == GSR_DAMAGE_TRACK_WINDOW && xev->type == ConfigureNotify)
+ gsr_damage_on_window_configure_notify(self, xev);
+
+ if(self->randr_event) {
+ if(xev->type == self->randr_event + RRScreenChangeNotify)
+ XRRUpdateConfiguration(xev);
+
+ if(xev->type == self->randr_event + RRNotify)
+ gsr_damage_on_randr_event(self, xev);
+ }
+
+ if(self->damage_event && xev->type == self->damage_event + XDamageNotify)
+ gsr_damage_on_damage_event(self, xev);
+
+ if(self->track_cursor)
+ gsr_cursor_on_event(&self->cursor, xev);
+}
+
+void gsr_damage_tick(gsr_damage *self) {
+ if(self->damage_event == 0 || self->track_type == GSR_DAMAGE_TRACK_NONE)
+ return;
+
+ if(self->track_cursor && self->cursor.visible && !self->damaged)
+ gsr_damage_on_tick_cursor(self);
+}
+
+bool gsr_damage_is_damaged(gsr_damage *self) {
+ return self->damage_event == 0 || !self->damage || self->damaged || self->track_type == GSR_DAMAGE_TRACK_NONE;
+}
+
+void gsr_damage_clear(gsr_damage *self) {
+ self->damaged = false;
+}
diff --git a/src/dbus.c b/src/dbus.c
new file mode 100644
index 0000000..5757b8b
--- /dev/null
+++ b/src/dbus.c
@@ -0,0 +1,876 @@
+#include "../include/dbus.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/random.h>
+
+/* TODO: Make non-blocking when GPU Screen Recorder is turned into a library */
+/* TODO: Make sure responses matches the requests */
+
+#define DESKTOP_PORTAL_SIGNAL_RULE "type='signal',interface='org.freedesktop.Portal.Request'"
+
+typedef enum {
+ DICT_TYPE_STRING,
+ DICT_TYPE_UINT32,
+ DICT_TYPE_BOOL,
+} dict_value_type;
+
+typedef struct {
+ const char *key;
+ dict_value_type value_type;
+ union {
+ char *str;
+ dbus_uint32_t u32;
+ dbus_bool_t boolean;
+ };
+} dict_entry;
+
+static const char* dict_value_type_to_string(dict_value_type type) {
+ switch(type) {
+ case DICT_TYPE_STRING: return "string";
+ case DICT_TYPE_UINT32: return "uint32";
+ case DICT_TYPE_BOOL: return "boolean";
+ }
+ return "(unknown)";
+}
+
+static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
+ /* TODO: Use other functions on other platforms than linux */
+ if(getrandom(buffer, buffer_size, 0) < buffer_size) {
+ fprintf(stderr, "gsr error: generate_random_characters: failed to get random bytes, error: %s\n", strerror(errno));
+ return false;
+ }
+
+ for(int i = 0; i < buffer_size; ++i) {
+ unsigned char c = *(unsigned char*)&buffer[i];
+ buffer[i] = alphabet[c % alphabet_size];
+ }
+
+ return true;
+}
+
+bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token) {
+ memset(self, 0, sizeof(*self));
+ dbus_error_init(&self->err);
+
+ self->random_str[DBUS_RANDOM_STR_SIZE] = '\0';
+ if(!generate_random_characters(self->random_str, DBUS_RANDOM_STR_SIZE, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to generate random string\n");
+ return false;
+ }
+
+ self->con = dbus_bus_get(DBUS_BUS_SESSION, &self->err);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_get failed with error: %s\n", self->err.message);
+ return false;
+ }
+
+ if(!self->con) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to get dbus session\n");
+ return false;
+ }
+
+ /* TODO: Check the name */
+ const int ret = dbus_bus_request_name(self->con, "com.dec05eba.gpu_screen_recorder", DBUS_NAME_FLAG_REPLACE_EXISTING, &self->err);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_request_name failed with error: %s\n", self->err.message);
+ gsr_dbus_deinit(self);
+ return false;
+ }
+
+ if(screencast_restore_token) {
+ self->screencast_restore_token = strdup(screencast_restore_token);
+ if(!self->screencast_restore_token) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to clone restore token\n");
+ gsr_dbus_deinit(self);
+ return false;
+ }
+ }
+
+ (void)ret;
+ // if(ret != DBUS_REQUEST_NAME_REPLY_PRIMARY_OWNER) {
+ // fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: dbus_bus_request_name failed to get primary owner\n");
+ // return false;
+ // }
+
+ return true;
+}
+
+void gsr_dbus_deinit(gsr_dbus *self) {
+ if(self->screencast_restore_token) {
+ free(self->screencast_restore_token);
+ self->screencast_restore_token = NULL;
+ }
+
+ if(self->desktop_portal_rule_added) {
+ dbus_bus_remove_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, NULL);
+ // dbus_connection_flush(self->con);
+ self->desktop_portal_rule_added = false;
+ }
+
+ if(self->con) {
+ dbus_error_free(&self->err);
+
+ dbus_bus_release_name(self->con, "com.dec05eba.gpu_screen_recorder", NULL);
+
+ // Apparently shouldn't be used when a connection is setup by using dbus_bus_get
+ //dbus_connection_close(self->con);
+ dbus_connection_unref(self->con);
+ self->con = NULL;
+ }
+}
+
+static bool gsr_dbus_desktop_portal_get_property(gsr_dbus *self, const char *interface, const char *property_name, uint32_t *result) {
+ *result = 0;
+
+ DBusMessage *msg = dbus_message_new_method_call(
+ "org.freedesktop.portal.Desktop", // target for the method call
+ "/org/freedesktop/portal/desktop", // object to call on
+ "org.freedesktop.DBus.Properties", // interface to call on
+ "Get"); // method name
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_message_new_method_call failed\n");
+ return false;
+ }
+
+ DBusMessageIter it;
+ dbus_message_iter_init_append(msg, &it);
+
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &interface)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add interface\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &property_name)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add property_name\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ DBusPendingCall *pending = NULL;
+ if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_connection_send_with_reply failed\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ dbus_connection_flush(self->con);
+
+ //fprintf(stderr, "Request Sent\n");
+
+ dbus_message_unref(msg);
+ msg = NULL;
+
+ dbus_pending_call_block(pending);
+
+ msg = dbus_pending_call_steal_reply(pending);
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_pending_call_steal_reply failed\n");
+ dbus_pending_call_unref(pending);
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ dbus_pending_call_unref(pending);
+ pending = NULL;
+
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is missing arguments\n");
+ dbus_message_unref(msg);
+ return false;
+ } else if(DBUS_TYPE_UINT32 == dbus_message_iter_get_arg_type(&resp_args)) {
+ dbus_message_iter_get_basic(&resp_args, result);
+ } else if(DBUS_TYPE_VARIANT == dbus_message_iter_get_arg_type(&resp_args)) {
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&resp_args, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) == DBUS_TYPE_UINT32) {
+ dbus_message_iter_get_basic(&variant_iter, result);
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not a variant with an uint32, %c\n", dbus_message_iter_get_arg_type(&variant_iter));
+ dbus_message_unref(msg);
+ return false;
+ }
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not an uint32, %c\n", dbus_message_iter_get_arg_type(&resp_args));
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ }
+
+ dbus_message_unref(msg);
+ return true;
+}
+
+static uint32_t gsr_dbus_get_screencast_version_cached(gsr_dbus *self) {
+ if(self->screencast_version == 0)
+ gsr_dbus_desktop_portal_get_property(self, "org.freedesktop.portal.ScreenCast", "version", &self->screencast_version);
+ return self->screencast_version;
+}
+
+static bool gsr_dbus_ensure_desktop_portal_rule_added(gsr_dbus *self) {
+ if(self->desktop_portal_rule_added)
+ return true;
+
+ dbus_bus_add_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, &self->err);
+ dbus_connection_flush(self->con);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_ensure_desktop_portal_rule_added: failed to add dbus rule %s, error: %s\n", DESKTOP_PORTAL_SIGNAL_RULE, self->err.message);
+ return false;
+ }
+ self->desktop_portal_rule_added = true;
+ return true;
+}
+
+static void gsr_dbus_portal_get_unique_handle_token(gsr_dbus *self, char *buffer, int size) {
+ snprintf(buffer, size, "gpu_screen_recorder_handle_%s_%u", self->random_str, self->handle_counter++);
+}
+
+static void gsr_dbus_portal_get_unique_session_token(gsr_dbus *self, char *buffer, int size) {
+ snprintf(buffer, size, "gpu_screen_recorder_session_%s", self->random_str);
+}
+
+static bool dbus_add_dict(DBusMessageIter *it, const dict_entry *entries, int num_entries) {
+ DBusMessageIter array_it;
+ if(!dbus_message_iter_open_container(it, DBUS_TYPE_ARRAY, "{sv}", &array_it))
+ return false;
+
+ for (int i = 0; i < num_entries; ++i) {
+ DBusMessageIter entry_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ DBusMessageIter variant_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
+
+ if(!dbus_message_iter_open_container(&array_it, DBUS_TYPE_DICT_ENTRY, NULL, &entry_it))
+ goto entry_err;
+
+ if(!dbus_message_iter_append_basic(&entry_it, DBUS_TYPE_STRING, &entries[i].key))
+ goto entry_err;
+
+ switch (entries[i].value_type) {
+ case DICT_TYPE_STRING: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_STRING_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_STRING, &entries[i].str))
+ goto entry_err;
+ break;
+ }
+ case DICT_TYPE_UINT32: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_UINT32_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_UINT32, &entries[i].u32))
+ goto entry_err;
+ break;
+ }
+ case DICT_TYPE_BOOL: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_BOOLEAN_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_BOOLEAN, &entries[i].boolean))
+ goto entry_err;
+ break;
+ }
+ }
+
+ dbus_message_iter_close_container(&entry_it, &variant_it);
+ dbus_message_iter_close_container(&array_it, &entry_it);
+ continue;
+
+ entry_err:
+ dbus_message_iter_abandon_container_if_open(&array_it, &variant_it);
+ dbus_message_iter_abandon_container_if_open(&array_it, &entry_it);
+ dbus_message_iter_abandon_container_if_open(it, &array_it);
+ return false;
+ }
+
+ return dbus_message_iter_close_container(it, &array_it);
+}
+
+/* If |response_msg| is NULL then we dont wait for a response signal */
+static bool gsr_dbus_call_screencast_method(gsr_dbus *self, const char *method_name, const char *session_handle, const char *parent_window, const dict_entry *entries, int num_entries, int *resp_fd, DBusMessage **response_msg) {
+ if(resp_fd)
+ *resp_fd = -1;
+
+ if(response_msg)
+ *response_msg = NULL;
+
+ if(!gsr_dbus_ensure_desktop_portal_rule_added(self))
+ return false;
+
+ DBusMessage *msg = dbus_message_new_method_call(
+ "org.freedesktop.portal.Desktop", // target for the method call
+ "/org/freedesktop/portal/desktop", // object to call on
+ "org.freedesktop.portal.ScreenCast", // interface to call on
+ method_name); // method name
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_message_new_method_call failed\n");
+ return false;
+ }
+
+ DBusMessageIter it;
+ dbus_message_iter_init_append(msg, &it);
+
+ if(session_handle) {
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_OBJECT_PATH, &session_handle)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add session_handle\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ }
+
+ if(parent_window) {
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &parent_window)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add parent_window\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ }
+
+ if(!dbus_add_dict(&it, entries, num_entries)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add dict\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ DBusPendingCall *pending = NULL;
+ if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_connection_send_with_reply failed\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ dbus_connection_flush(self->con);
+
+ //fprintf(stderr, "Request Sent\n");
+
+ dbus_message_unref(msg);
+ msg = NULL;
+
+ dbus_pending_call_block(pending);
+
+ msg = dbus_pending_call_steal_reply(pending);
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_pending_call_steal_reply failed\n");
+ dbus_pending_call_unref(pending);
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ dbus_pending_call_unref(pending);
+ pending = NULL;
+
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is missing arguments\n");
+ dbus_message_unref(msg);
+ return false;
+ } else if (DBUS_TYPE_OBJECT_PATH == dbus_message_iter_get_arg_type(&resp_args)) {
+ const char *res = NULL;
+ dbus_message_iter_get_basic(&resp_args, &res);
+ } else if(DBUS_TYPE_UNIX_FD == dbus_message_iter_get_arg_type(&resp_args)) {
+ int fd = -1;
+ dbus_message_iter_get_basic(&resp_args, &fd);
+
+ if(resp_fd)
+ *resp_fd = fd;
+ } else if(DBUS_TYPE_STRING == dbus_message_iter_get_arg_type(&resp_args)) {
+ char *err = NULL;
+ dbus_message_iter_get_basic(&resp_args, &err);
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed with error: %s\n", err);
+
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is not an object path or unix fd\n");
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ }
+
+ dbus_message_unref(msg);
+ if(!response_msg)
+ return true;
+
+ /* TODO: Add timeout, but take into consideration user interactive signals (such as selecting a monitor to capture for ScreenCast) */
+ for (;;) {
+ const int timeout_milliseconds = 10;
+ dbus_connection_read_write(self->con, timeout_milliseconds);
+ *response_msg = dbus_connection_pop_message(self->con);
+
+ if(!*response_msg)
+ continue;
+
+ if(!dbus_message_is_signal(*response_msg, "org.freedesktop.portal.Request", "Response")) {
+ dbus_message_unref(*response_msg);
+ *response_msg = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return true;
+}
+
+static int gsr_dbus_get_response_status(DBusMessageIter *resp_args) {
+ if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_UINT32) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_response_status: missing uint32 in response\n");
+ return -1;
+ }
+
+ dbus_uint32_t response_status = 0;
+ dbus_message_iter_get_basic(resp_args, &response_status);
+
+ dbus_message_iter_next(resp_args);
+ return (int)response_status;
+}
+
+static dict_entry* find_dict_entry_by_key(dict_entry *entries, int num_entries, const char *key) {
+ for(int i = 0; i < num_entries; ++i) {
+ if(strcmp(entries[i].key, key) == 0)
+ return &entries[i];
+ }
+ return NULL;
+}
+
+static bool gsr_dbus_get_variant_value(DBusMessageIter *iter, dict_entry *entry) {
+ if(dbus_message_iter_get_arg_type(iter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: value is not a variant\n");
+ return false;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(iter, &variant_iter);
+
+ switch(dbus_message_iter_get_arg_type(&variant_iter)) {
+ case DBUS_TYPE_STRING: {
+ if(entry->value_type != DICT_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a string\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ const char *value = NULL;
+ dbus_message_iter_get_basic(&variant_iter, &value);
+
+ if(!value) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to get entry value as value\n");
+ return false;
+ }
+
+ if(entry->str) {
+ free(entry->str);
+ entry->str = NULL;
+ }
+
+ entry->str = strdup(value);
+ if(!entry->str) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to copy value\n");
+ return false;
+ }
+ return true;
+ }
+ case DBUS_TYPE_UINT32: {
+ if(entry->value_type != DICT_TYPE_UINT32) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was an uint32\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ dbus_message_iter_get_basic(&variant_iter, &entry->u32);
+ return true;
+ }
+ case DBUS_TYPE_BOOLEAN: {
+ if(entry->value_type != DICT_TYPE_BOOL) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a boolean\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ dbus_message_iter_get_basic(&variant_iter, &entry->boolean);
+ return true;
+ }
+ }
+
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: got unexpected type, expected string, uint32 or boolean\n");
+ return false;
+}
+
+/*
+ Parses a{sv} into matching key entries in |entries|.
+ If the entry value is a string then it's allocated with malloc and is null-terminated
+ and has to be free by the caller.
+ The entry values should be 0 before this method is called.
+ The entries are free'd if this function fails.
+*/
+static bool gsr_dbus_get_map(DBusMessageIter *resp_args, dict_entry *entries, int num_entries) {
+ if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: missing array in response\n");
+ return false;
+ }
+
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(resp_args, &subiter);
+
+ while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
+ DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ const char *key = NULL;
+ dict_entry *entry = NULL;
+
+ // fprintf(stderr, " array element type: %c, %s\n",
+ // dbus_message_iter_get_arg_type(&subiter),
+ // dbus_message_iter_get_signature(&subiter));
+ if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: array value is not an entry\n");
+ return false;
+ }
+
+ dbus_message_iter_recurse(&subiter, &dictiter);
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: entry key is not a string\n");
+ goto error;
+ }
+
+ dbus_message_iter_get_basic(&dictiter, &key);
+ if(!key) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: failed to get entry key as value\n");
+ goto error;
+ }
+
+ entry = find_dict_entry_by_key(entries, num_entries, key);
+ if(!entry) {
+ dbus_message_iter_next(&subiter);
+ continue;
+ }
+
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: missing entry value\n");
+ goto error;
+ }
+
+ if(!gsr_dbus_get_variant_value(&dictiter, entry))
+ goto error;
+
+ dbus_message_iter_next(&subiter);
+ }
+
+ return true;
+
+ error:
+ for(int i = 0; i < num_entries; ++i) {
+ if(entries[i].value_type == DICT_TYPE_STRING) {
+ free(entries[i].str);
+ entries[i].str = NULL;
+ }
+ }
+ return false;
+}
+
+int gsr_dbus_screencast_create_session(gsr_dbus *self, char **session_handle) {
+ assert(session_handle);
+ *session_handle = NULL;
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ char session_handle_token[64];
+ gsr_dbus_portal_get_unique_session_token(self, session_handle_token, sizeof(session_handle_token));
+
+ dict_entry args[2];
+ args[0].key = "handle_token";
+ args[0].value_type = DICT_TYPE_STRING;
+ args[0].str = handle_token;
+
+ args[1].key = "session_handle_token";
+ args[1].value_type = DICT_TYPE_STRING;
+ args[1].str = session_handle_token;
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "CreateSession", NULL, NULL, args, 2, NULL, &response_msg)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: failed to setup ScreenCast session. Make sure you have a desktop portal running with support for the ScreenCast interface and that the desktop portal matches the Wayland compositor you are running.\n");
+ return -1;
+ }
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ // DBUS_TYPE_ARRAY value?
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ dict_entry entries[1];
+ entries[0].key = "session_handle";
+ entries[0].str = NULL;
+ entries[0].value_type = DICT_TYPE_STRING;
+ if(!gsr_dbus_get_map(&resp_args, entries, 1)) {
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ if(!entries[0].str) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing \"session_handle\" in response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ *session_handle = entries[0].str;
+ //fprintf(stderr, "session handle: |%s|\n", entries[0].str);
+ //free(entries[0].str);
+
+ dbus_message_unref(response_msg);
+ return 0;
+}
+
+int gsr_dbus_screencast_select_sources(gsr_dbus *self, const char *session_handle, gsr_portal_capture_type capture_type, gsr_portal_cursor_mode cursor_mode) {
+ assert(session_handle);
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ int num_arg_dict = 4;
+ dict_entry args[6];
+ args[0].key = "types";
+ args[0].value_type = DICT_TYPE_UINT32;
+ args[0].u32 = capture_type;
+
+ args[1].key = "multiple";
+ args[1].value_type = DICT_TYPE_BOOL;
+ args[1].boolean = false; /* TODO: Wayland ignores this and still gives the option to select multiple sources. Support that case.. */
+
+ args[2].key = "handle_token";
+ args[2].value_type = DICT_TYPE_STRING;
+ args[2].str = handle_token;
+
+ args[3].key = "cursor_mode";
+ args[3].value_type = DICT_TYPE_UINT32;
+ args[3].u32 = cursor_mode;
+
+ const int screencast_server_version = gsr_dbus_get_screencast_version_cached(self);
+ if(screencast_server_version >= 4) {
+ num_arg_dict = 5;
+ args[4].key = "persist_mode";
+ args[4].value_type = DICT_TYPE_UINT32;
+ args[4].u32 = 2; /* persist until explicitly revoked */
+
+ if(self->screencast_restore_token && self->screencast_restore_token[0]) {
+ num_arg_dict = 6;
+
+ args[5].key = "restore_token";
+ args[5].value_type = DICT_TYPE_STRING;
+ args[5].str = self->screencast_restore_token;
+ }
+ } else if(self->screencast_restore_token && self->screencast_restore_token[0]) {
+ fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: tried to use restore token but this option is only available in screencast version >= 4, your wayland compositors screencast version is %d\n", screencast_server_version);
+ }
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg)) {
+ if(num_arg_dict == 6) {
+ /* We dont know what the error exactly is but assume it may be because of invalid restore token. In that case try without restore token */
+ fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: SelectSources failed, retrying without restore_token\n");
+ num_arg_dict = 5;
+ if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg))
+ return -1;
+ } else {
+ return -1;
+ }
+ }
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ dbus_message_unref(response_msg);
+ return 0;
+}
+
+static dbus_uint32_t screencast_stream_get_pipewire_node(DBusMessageIter *iter) {
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(iter, &subiter);
+
+ if(dbus_message_iter_get_arg_type(&subiter) == DBUS_TYPE_STRUCT) {
+ DBusMessageIter structiter;
+ dbus_message_iter_recurse(&subiter, &structiter);
+
+ if(dbus_message_iter_get_arg_type(&structiter) == DBUS_TYPE_UINT32) {
+ dbus_uint32_t data = 0;
+ dbus_message_iter_get_basic(&structiter, &data);
+ return data;
+ }
+ }
+
+ return 0;
+}
+
+int gsr_dbus_screencast_start(gsr_dbus *self, const char *session_handle, uint32_t *pipewire_node) {
+ assert(session_handle);
+ *pipewire_node = 0;
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ dict_entry args[1];
+ args[0].key = "handle_token";
+ args[0].value_type = DICT_TYPE_STRING;
+ args[0].str = handle_token;
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "Start", session_handle, "", args, 1, NULL, &response_msg))
+ return -1;
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ if(dbus_message_iter_get_arg_type(&resp_args) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing array in response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(&resp_args, &subiter);
+
+ while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
+ DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ const char *key = NULL;
+
+ // fprintf(stderr, " array element type: %c, %s\n",
+ // dbus_message_iter_get_arg_type(&subiter),
+ // dbus_message_iter_get_signature(&subiter));
+ if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: array value is not an entry\n");
+ goto error;
+ }
+
+ dbus_message_iter_recurse(&subiter, &dictiter);
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: entry key is not a string\n");
+ goto error;
+ }
+
+ dbus_message_iter_get_basic(&dictiter, &key);
+ if(!key) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: failed to get entry key as value\n");
+ goto error;
+ }
+
+ if(strcmp(key, "restore_token") == 0) {
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing restore_token value\n");
+ goto error;
+ }
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a variant\n");
+ goto error;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&dictiter, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a string\n");
+ goto error;
+ }
+
+ char *restore_token_str = NULL;
+ dbus_message_iter_get_basic(&variant_iter, &restore_token_str);
+
+ if(restore_token_str) {
+ if(self->screencast_restore_token) {
+ free(self->screencast_restore_token);
+ self->screencast_restore_token = NULL;
+ }
+ self->screencast_restore_token = strdup(restore_token_str);
+ //fprintf(stderr, "got restore token: %s\n", self->screencast_restore_token);
+ }
+ } else if(strcmp(key, "streams") == 0) {
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing streams value\n");
+ goto error;
+ }
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not a variant\n");
+ goto error;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&dictiter, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not an array\n");
+ goto error;
+ }
+
+ int num_streams = dbus_message_iter_get_element_count(&variant_iter);
+ //fprintf(stderr, "num streams: %d\n", num_streams);
+ /* Skip over all streams except the last one, since kde can return multiple streams even if only 1 is requested. The last one is the valid one */
+ for(int i = 0; i < num_streams - 1; ++i) {
+ screencast_stream_get_pipewire_node(&variant_iter);
+ }
+
+ if(num_streams > 0) {
+ *pipewire_node = screencast_stream_get_pipewire_node(&variant_iter);
+ //fprintf(stderr, "pipewire node: %u\n", *pipewire_node);
+ }
+ }
+
+ dbus_message_iter_next(&subiter);
+ }
+
+ if(*pipewire_node == 0) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: no pipewire node returned\n");
+ goto error;
+ }
+
+ dbus_message_unref(response_msg);
+ return 0;
+
+ error:
+ dbus_message_unref(response_msg);
+ return -1;
+}
+
+bool gsr_dbus_screencast_open_pipewire_remote(gsr_dbus *self, const char *session_handle, int *pipewire_fd) {
+ assert(session_handle);
+ *pipewire_fd = -1;
+ return gsr_dbus_call_screencast_method(self, "OpenPipeWireRemote", session_handle, NULL, NULL, 0, pipewire_fd, NULL);
+}
+
+const char* gsr_dbus_screencast_get_restore_token(gsr_dbus *self) {
+ return self->screencast_restore_token;
+}
diff --git a/src/egl.c b/src/egl.c
index ec9ad07..87c2b84 100644
--- a/src/egl.c
+++ b/src/egl.c
@@ -1,18 +1,19 @@
#include "../include/egl.h"
#include "../include/library_loader.h"
#include "../include/utils.h"
+
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <assert.h>
+#include <unistd.h>
+#include <sys/capability.h>
#include <wayland-client.h>
#include <wayland-egl.h>
-#include <unistd.h>
-#include <sys/capability.h>
-// TODO: rename gsr_egl to something else since this includes both egl and eglx and in the future maybe vulkan too
+// TODO: rename gsr_egl to something else since this includes both egl and glx and in the future maybe vulkan too
// TODO: Move this shit to a separate wayland file, and have a separate file for x11.
@@ -93,7 +94,7 @@ static void registry_add_object(void *data, struct wl_registry *registry, uint32
}
if(egl->wayland.num_outputs == GSR_MAX_OUTPUTS) {
- fprintf(stderr, "gsr warning: reached maximum outputs (32), ignoring output %u\n", name);
+ fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %u\n", GSR_MAX_OUTPUTS, name);
return;
}
@@ -134,21 +135,42 @@ static void reset_cap_nice(void) {
cap_free(caps);
}
-#define GLX_DRAWABLE_TYPE 0x8010
-#define GLX_RENDER_TYPE 0x8011
-#define GLX_RGBA_BIT 0x00000001
-#define GLX_WINDOW_BIT 0x00000001
-#define GLX_PIXMAP_BIT 0x00000002
+static void store_x11_monitor(const gsr_monitor *monitor, void *userdata) {
+ gsr_egl *egl = userdata;
+ if(egl->x11.num_outputs == GSR_MAX_OUTPUTS) {
+ fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %s\n", GSR_MAX_OUTPUTS, monitor->name);
+ return;
+ }
+
+ char *monitor_name = strdup(monitor->name);
+ if(!monitor_name)
+ return;
+
+ const int index = egl->x11.num_outputs;
+ egl->x11.outputs[index].name = monitor_name;
+ egl->x11.outputs[index].pos = monitor->pos;
+ egl->x11.outputs[index].size = monitor->size;
+ egl->x11.outputs[index].connector_id = monitor->connector_id;
+ egl->x11.outputs[index].rotation = monitor->rotation;
+ egl->x11.outputs[index].monitor_identifier = monitor->monitor_identifier;
+ ++egl->x11.num_outputs;
+}
+
+#define GLX_DRAWABLE_TYPE 0x8010
+#define GLX_RENDER_TYPE 0x8011
+#define GLX_RGBA_BIT 0x00000001
+#define GLX_WINDOW_BIT 0x00000001
+#define GLX_PIXMAP_BIT 0x00000002
#define GLX_BIND_TO_TEXTURE_RGBA_EXT 0x20D1
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
-#define GLX_DOUBLEBUFFER 5
-#define GLX_RED_SIZE 8
-#define GLX_GREEN_SIZE 9
-#define GLX_BLUE_SIZE 10
-#define GLX_ALPHA_SIZE 11
-#define GLX_DEPTH_SIZE 12
-#define GLX_RGBA_TYPE 0x8014
+#define GLX_DOUBLEBUFFER 5
+#define GLX_RED_SIZE 8
+#define GLX_GREEN_SIZE 9
+#define GLX_BLUE_SIZE 10
+#define GLX_ALPHA_SIZE 11
+#define GLX_DEPTH_SIZE 12
+#define GLX_RGBA_TYPE 0x8014
#define GLX_CONTEXT_PRIORITY_LEVEL_EXT 0x3100
#define GLX_CONTEXT_PRIORITY_HIGH_EXT 0x3101
@@ -185,6 +207,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
EGLConfig ecfg;
int32_t num_config = 0;
+ // TODO: Use EGL_OPENGL_ES_BIT as amd requires that for external texture, but that breaks software encoding
const int32_t attr[] = {
EGL_BUFFER_SIZE, 24,
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
@@ -226,6 +249,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
}
}
+ // TODO: Use EGL_OPENGL_ES_API as amd requires that for external texture, but that breaks software encoding
self->eglBindAPI(EGL_OPENGL_API);
self->egl_display = self->eglGetDisplay(self->wayland.dpy ? (EGLNativeDisplayType)self->wayland.dpy : (EGLNativeDisplayType)self->x11.dpy);
@@ -238,12 +262,12 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglInitialize failed\n");
goto fail;
}
-
+
if(!self->eglChooseConfig(self->egl_display, attr, &ecfg, 1, &num_config) || num_config != 1) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to find a matching config\n");
goto fail;
}
-
+
self->egl_context = self->eglCreateContext(self->egl_display, ecfg, NULL, ctxattr);
if(!self->egl_context) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to create egl context\n");
@@ -251,6 +275,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
}
if(wayland) {
+ // TODO: Error check?
self->wayland.surface = wl_compositor_create_surface(self->wayland.compositor);
self->wayland.window = wl_egl_window_create(self->wayland.surface, 16, 16);
self->egl_surface = self->eglCreateWindowSurface(self->egl_display, ecfg, (EGLNativeWindowType)self->wayland.window, NULL);
@@ -268,6 +293,11 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
goto fail;
}
+ if(!wayland) {
+ self->x11.num_outputs = 0;
+ for_each_active_monitor_output_x11_not_cached(self->x11.dpy, store_x11_monitor, self);
+ }
+
reset_cap_nice();
return true;
@@ -363,6 +393,17 @@ static bool gsr_egl_proc_load_egl(gsr_egl *self) {
self->glEGLImageTargetTexture2DOES = (FUNC_glEGLImageTargetTexture2DOES)self->eglGetProcAddress("glEGLImageTargetTexture2DOES");
self->eglQueryDisplayAttribEXT = (FUNC_eglQueryDisplayAttribEXT)self->eglGetProcAddress("eglQueryDisplayAttribEXT");
self->eglQueryDeviceStringEXT = (FUNC_eglQueryDeviceStringEXT)self->eglGetProcAddress("eglQueryDeviceStringEXT");
+ self->eglQueryDmaBufModifiersEXT = (FUNC_eglQueryDmaBufModifiersEXT)self->eglGetProcAddress("eglQueryDmaBufModifiersEXT");
+
+ if(!self->eglExportDMABUFImageQueryMESA) {
+ fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageQueryMESA\n");
+ return false;
+ }
+
+ if(!self->eglExportDMABUFImageMESA) {
+ fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageMESA\n");
+ return false;
+ }
if(!self->glEGLImageTargetTexture2DOES) {
fprintf(stderr, "gsr error: gsr_egl_load failed: could not find glEGLImageTargetTexture2DOES\n");
@@ -417,9 +458,7 @@ static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
{ (void**)&self->glTexParameteriv, "glTexParameteriv" },
{ (void**)&self->glGetTexLevelParameteriv, "glGetTexLevelParameteriv" },
{ (void**)&self->glTexImage2D, "glTexImage2D" },
- { (void**)&self->glCopyImageSubData, "glCopyImageSubData" },
{ (void**)&self->glGetTexImage, "glGetTexImage" },
- { (void**)&self->glClearTexImage, "glClearTexImage" },
{ (void**)&self->glGenFramebuffers, "glGenFramebuffers" },
{ (void**)&self->glBindFramebuffer, "glBindFramebuffer" },
{ (void**)&self->glDeleteFramebuffers, "glDeleteFramebuffers" },
@@ -460,6 +499,9 @@ static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
{ (void**)&self->glUniform2f, "glUniform2f" },
{ (void**)&self->glDebugMessageCallback, "glDebugMessageCallback" },
{ (void**)&self->glScissor, "glScissor" },
+ { (void**)&self->glReadPixels, "glReadPixels" },
+ { (void**)&self->glMapBuffer, "glMapBuffer" },
+ { (void**)&self->glUnmapBuffer, "glUnmapBuffer" },
{ NULL, NULL }
};
@@ -503,10 +545,6 @@ bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland, bool is_monitor_cap
}
self->glx_library = dlopen("libGLX.so.0", RTLD_LAZY);
- if(!self->glx_library) {
- fprintf(stderr, "gsr error: gsr_egl_load: failed to load libGLX.so.0, error: %s\n", dlerror());
- goto fail;
- }
self->gl_library = dlopen("libGL.so.1", RTLD_LAZY);
if(!self->egl_library) {
@@ -517,7 +555,8 @@ bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland, bool is_monitor_cap
if(!gsr_egl_load_egl(self, self->egl_library))
goto fail;
- if(!gsr_egl_load_glx(self, self->glx_library))
+ /* In some distros (alpine for example libGLX doesn't exist, but libGL can be used instead) */
+ if(!gsr_egl_load_glx(self, self->glx_library ? self->glx_library : self->gl_library))
goto fail;
if(!gsr_egl_load_gl(self, self->gl_library))
@@ -588,6 +627,14 @@ void gsr_egl_unload(gsr_egl *self) {
self->x11.window = None;
}
+ for(int i = 0; i < self->x11.num_outputs; ++i) {
+ if(self->x11.outputs[i].name) {
+ free(self->x11.outputs[i].name);
+ self->x11.outputs[i].name = NULL;
+ }
+ }
+ self->x11.num_outputs = 0;
+
if(self->wayland.window) {
wl_egl_window_destroy(self->wayland.window);
self->wayland.window = NULL;
@@ -644,10 +691,47 @@ void gsr_egl_unload(gsr_egl *self) {
memset(self, 0, sizeof(gsr_egl));
}
-void gsr_egl_update(gsr_egl *self) {
- if(!self->wayland.dpy)
- return;
+bool gsr_egl_process_event(gsr_egl *self) {
+ switch(gsr_egl_get_display_server(self)) {
+ case GSR_DISPLAY_SERVER_X11: {
+ if(XPending(self->x11.dpy)) {
+ XNextEvent(self->x11.dpy, &self->x11.xev);
+ return true;
+ }
+ return false;
+ }
+ case GSR_DISPLAY_SERVER_WAYLAND: {
+ // TODO: pselect on wl_display_get_fd before doing dispatch
+ const bool events_available = wl_display_dispatch_pending(self->wayland.dpy) > 0;
+ wl_display_flush(self->wayland.dpy);
+ return events_available;
+ }
+ }
+ return false;
+}
+
+void gsr_egl_swap_buffers(gsr_egl *self) {
+ /* This uses less cpu than swap buffer on nvidia */
+ // TODO: Do these and remove swap
+ //self->glFlush();
+ //self->glFinish();
+ if(self->egl_display) {
+ self->eglSwapBuffers(self->egl_display, self->egl_surface);
+ } else if(self->x11.window) {
+ self->glXSwapBuffers(self->x11.dpy, self->x11.window);
+ }
+}
- // TODO: pselect on wl_display_get_fd before doing dispatch
- wl_display_dispatch(self->wayland.dpy);
+gsr_display_server gsr_egl_get_display_server(const gsr_egl *self) {
+ if(self->wayland.dpy)
+ return GSR_DISPLAY_SERVER_WAYLAND;
+ else
+ return GSR_DISPLAY_SERVER_X11;
+}
+
+XEvent* gsr_egl_get_event_data(gsr_egl *self) {
+ if(gsr_egl_get_display_server(self) == GSR_DISPLAY_SERVER_X11)
+ return &self->x11.xev;
+ else
+ return NULL;
}
diff --git a/src/encoder/video/cuda.c b/src/encoder/video/cuda.c
index 2568bc7..6d26cdd 100644
--- a/src/encoder/video/cuda.c
+++ b/src/encoder/video/cuda.c
@@ -12,6 +12,8 @@ typedef struct {
unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
+
gsr_cuda cuda;
CUgraphicsResource cuda_graphics_resources[2];
CUarray mapped_arrays[2];
@@ -19,47 +21,46 @@ typedef struct {
} gsr_video_encoder_cuda;
static bool gsr_video_encoder_cuda_setup_context(gsr_video_encoder_cuda *self, AVCodecContext *video_codec_context) {
- AVBufferRef *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
- if(!device_ctx) {
+ self->device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
+ if(!self->device_ctx) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hardware device context\n");
return false;
}
- AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)device_ctx->data;
+ AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)self->device_ctx->data;
AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext*)hw_device_context->hwctx;
cuda_device_context->cuda_ctx = self->cuda.cu_ctx;
- if(av_hwdevice_ctx_init(device_ctx) < 0) {
+ if(av_hwdevice_ctx_init(self->device_ctx) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hardware device context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = self->params.hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
self->cuda_stream = cuda_device_context->stream;
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
return true;
}
@@ -108,7 +109,7 @@ static bool gsr_video_encoder_cuda_setup_textures(gsr_video_encoder_cuda *self,
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
- self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], !self->params.hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_textures: failed to create opengl texture\n");
return false;
@@ -125,22 +126,22 @@ static bool gsr_video_encoder_cuda_setup_textures(gsr_video_encoder_cuda *self,
static void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_cuda_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
+ gsr_video_encoder_cuda *self = encoder->priv;
- // TODO: Force set overclock to false if wayland
- if(!gsr_cuda_load(&encoder_cuda->cuda, encoder_cuda->params.egl->x11.dpy, encoder_cuda->params.overclock)) {
+ const bool overclock = gsr_egl_get_display_server(self->params.egl) == GSR_DISPLAY_SERVER_X11 ? self->params.overclock : false;
+ if(!gsr_cuda_load(&self->cuda, self->params.egl->x11.dpy, overclock)) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_start: failed to load cuda\n");
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_cuda_setup_context(encoder_cuda, video_codec_context)) {
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ if(!gsr_video_encoder_cuda_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_cuda_setup_textures(encoder_cuda, video_codec_context, frame)) {
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ if(!gsr_video_encoder_cuda_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
@@ -152,10 +153,10 @@ void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *v
self->target_textures[0] = 0;
self->target_textures[1] = 0;
- if(video_codec_context->hw_device_ctx)
- av_buffer_unref(&video_codec_context->hw_device_ctx);
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
if(self->cuda.cu_ctx) {
for(int i = 0; i < 2; ++i) {
@@ -170,8 +171,8 @@ void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *v
gsr_cuda_unload(&self->cuda);
}
-static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
+static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_cuda *self = encoder->priv;
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
CUDA_MEMCPY2D memcpy_struct;
@@ -183,26 +184,26 @@ static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *enc
memcpy_struct.dstY = 0;
memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
- memcpy_struct.srcArray = encoder_cuda->mapped_arrays[i];
+ memcpy_struct.srcArray = self->mapped_arrays[i];
memcpy_struct.srcPitch = frame->width / div[i];
memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
memcpy_struct.dstPitch = frame->linesize[i];
- memcpy_struct.WidthInBytes = frame->width * (encoder_cuda->params.hdr ? 2 : 1);
+ memcpy_struct.WidthInBytes = frame->width * (self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? 2 : 1);
memcpy_struct.Height = frame->height / div[i];
// TODO: Remove this copy if possible
- encoder_cuda->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, encoder_cuda->cuda_stream);
+ self->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, self->cuda_stream);
}
// TODO: needed?
- encoder_cuda->cuda.cuStreamSynchronize(encoder_cuda->cuda_stream);
+ self->cuda.cuStreamSynchronize(self->cuda_stream);
}
static void gsr_video_encoder_cuda_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
- textures[0] = encoder_cuda->target_textures[0];
- textures[1] = encoder_cuda->target_textures[1];
+ gsr_video_encoder_cuda *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_cuda->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_cuda_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
diff --git a/src/encoder/video/software.c b/src/encoder/video/software.c
index 4666ffd..be227f2 100644
--- a/src/encoder/video/software.c
+++ b/src/encoder/video/software.c
@@ -48,7 +48,7 @@ static bool gsr_video_encoder_software_setup_textures(gsr_video_encoder_software
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
- self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], !self->params.hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
return false;
@@ -61,7 +61,7 @@ static bool gsr_video_encoder_software_setup_textures(gsr_video_encoder_software
static void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_software_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
+ gsr_video_encoder_software *self = encoder->priv;
video_codec_context->width = FFALIGN(video_codec_context->width, LINESIZE_ALIGNMENT);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
@@ -69,8 +69,8 @@ static bool gsr_video_encoder_software_start(gsr_video_encoder *encoder, AVCodec
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
- if(!gsr_video_encoder_software_setup_textures(encoder_software, video_codec_context, frame)) {
- gsr_video_encoder_software_stop(encoder_software, video_codec_context);
+ if(!gsr_video_encoder_software_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_software_stop(self, video_codec_context);
return false;
}
@@ -84,24 +84,29 @@ void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecCo
self->target_textures[1] = 0;
}
-static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
+static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_software *self = encoder->priv;
// TODO: hdr support
const unsigned int formats[2] = { GL_RED, GL_RG };
for(int i = 0; i < 2; ++i) {
- encoder_software->params.egl->glBindTexture(GL_TEXTURE_2D, encoder_software->target_textures[i]);
- encoder_software->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
+ // We could use glGetTexSubImage and then we wouldn't have to use a specific linesize (LINESIZE_ALIGNMENT) that adds padding,
+ // but glGetTexSubImage is only available starting from opengl 4.5.
+ self->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
}
- encoder_software->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
// cap_kms->kms.base.egl->eglSwapBuffers(cap_kms->kms.base.egl->egl_display, cap_kms->kms.base.egl->egl_surface);
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
}
static void gsr_video_encoder_software_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
- textures[0] = encoder_software->target_textures[0];
- textures[1] = encoder_software->target_textures[1];
+ gsr_video_encoder_software *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_software->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_software_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
diff --git a/src/encoder/video/vaapi.c b/src/encoder/video/vaapi.c
index 2df140d..d558785 100644
--- a/src/encoder/video/vaapi.c
+++ b/src/encoder/video/vaapi.c
@@ -4,17 +4,20 @@
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_vaapi.h>
+#include <libavutil/intreadwrite.h>
#include <va/va_drmcommon.h>
#include <stdlib.h>
#include <unistd.h>
+#include <fcntl.h>
typedef struct {
gsr_video_encoder_vaapi_params params;
unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
VADisplay va_dpy;
VADRMPRIMESurfaceDescriptor prime;
} gsr_video_encoder_vaapi;
@@ -26,43 +29,40 @@ static bool gsr_video_encoder_vaapi_setup_context(gsr_video_encoder_vaapi *self,
return false;
}
- AVBufferRef *device_ctx;
- if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
+ if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hardware device context\n");
return false;
}
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = self->params.hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
//hw_frame_context->initial_pool_size = 20;
- AVVAAPIDeviceContext *vactx =((AVHWDeviceContext*)device_ctx->data)->hwctx;
+ AVVAAPIDeviceContext *vactx = ((AVHWDeviceContext*)self->device_ctx->data)->hwctx;
self->va_dpy = vactx->display;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
return true;
}
@@ -96,20 +96,22 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
self->params.egl->glGenTextures(2, self->target_textures);
for(int i = 0; i < 2; ++i) {
const int layer = i;
- const int plane = 0;
-
- const uint64_t modifier = self->prime.objects[self->prime.layers[layer].object_index[plane]].drm_format_modifier;
- const intptr_t img_attr[] = {
- EGL_LINUX_DRM_FOURCC_EXT, formats[i],
- EGL_WIDTH, self->prime.width / div[i],
- EGL_HEIGHT, self->prime.height / div[i],
- EGL_DMA_BUF_PLANE0_FD_EXT, self->prime.objects[self->prime.layers[layer].object_index[plane]].fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, self->prime.layers[layer].offset[plane],
- EGL_DMA_BUF_PLANE0_PITCH_EXT, self->prime.layers[layer].pitch[plane],
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
- EGL_NONE
- };
+
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(uint32_t j = 0; j < self->prime.layers[layer].num_planes; ++j) {
+ // TODO: Close these? in _stop, using self->prime
+ fds[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].fd;
+ offsets[j] = self->prime.layers[layer].offset[j];
+ pitches[j] = self->prime.layers[layer].pitch[j];
+ modifiers[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].drm_format_modifier;
+ }
+
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, formats[i], self->prime.width / div[i], self->prime.height / div[i],
+ fds, offsets, pitches, modifiers, self->prime.layers[layer].num_planes, true);
while(self->params.egl->eglGetError() != EGL_SUCCESS){}
EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
@@ -149,13 +151,13 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
static void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_vaapi *encoder_vaapi = encoder->priv;
+ gsr_video_encoder_vaapi *self = encoder->priv;
- if(encoder_vaapi->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
// TODO: dont do this if using ffmpeg reports that this is not needed (AMD driver bug that was fixed recently)
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
video_codec_context->height = FFALIGN(video_codec_context->height, 16);
- } else if(encoder_vaapi->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
+ } else if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
// TODO: Dont do this for VCN 5 and forward which should fix this hardware bug
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
// AMD driver has special case handling for 1080 height to set it to 1082 instead of 1088 (1080 aligned to 16).
@@ -167,13 +169,40 @@ static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecCon
}
}
- if(!gsr_video_encoder_vaapi_setup_context(encoder_vaapi, video_codec_context)) {
- gsr_video_encoder_vaapi_stop(encoder_vaapi, video_codec_context);
+ const int crop_top = (video_codec_context->height - frame->height) / 2;
+ const int crop_left = (video_codec_context->width - frame->width) / 2;
+ if(crop_top != 0 || crop_left != 0) {
+ fprintf(stderr, "gsr warning: gsr_video_encoder_vaapi_start: black bars have been added to the video because of a bug in AMD drivers/hardware. Record with h264 codec instead (-k h264) to get around this issue\n");
+#if 0
+ #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 10, 100)
+ const int crop_bottom = crop_top;
+ const int crop_right = crop_left;
+ fprintf(stderr, "gsr info: cropping metadata has been added to the file to try and workaround this issue. Video players that support this will remove the black bars when the video is playing\n");
+ const int frame_cropping_data_size = 4 * sizeof(uint32_t);
+ uint8_t *frame_cropping = av_malloc(frame_cropping_data_size);
+ if(frame_cropping) {
+ AV_WL32(frame_cropping + 0, crop_top);
+ AV_WL32(frame_cropping + 4, crop_bottom);
+ AV_WL32(frame_cropping + 8, crop_left);
+ AV_WL32(frame_cropping + 12, crop_right);
+ const bool sidedata_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_FRAME_CROPPING, frame_cropping, frame_cropping_data_size, 0) != NULL;
+ if(!sidedata_added)
+ av_free(frame_cropping);
+ }
+ #endif
+#endif
+ }
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ if(!gsr_video_encoder_vaapi_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_vaapi_setup_textures(encoder_vaapi, video_codec_context, frame)) {
- gsr_video_encoder_vaapi_stop(encoder_vaapi, video_codec_context);
+ if(!gsr_video_encoder_vaapi_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
@@ -185,10 +214,10 @@ void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext
self->target_textures[0] = 0;
self->target_textures[1] = 0;
- if(video_codec_context->hw_device_ctx)
- av_buffer_unref(&video_codec_context->hw_device_ctx);
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
for(uint32_t i = 0; i < self->prime.num_objects; ++i) {
if(self->prime.objects[i].fd > 0) {
@@ -199,11 +228,11 @@ void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext
}
static void gsr_video_encoder_vaapi_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_vaapi *encoder_vaapi = encoder->priv;
- textures[0] = encoder_vaapi->target_textures[0];
- textures[1] = encoder_vaapi->target_textures[1];
+ gsr_video_encoder_vaapi *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_vaapi->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_vaapi_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
@@ -227,7 +256,6 @@ gsr_video_encoder* gsr_video_encoder_vaapi_create(const gsr_video_encoder_vaapi_
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_vaapi_start,
- .copy_textures_to_frame = NULL,
.get_textures = gsr_video_encoder_vaapi_get_textures,
.destroy = gsr_video_encoder_vaapi_destroy,
.priv = encoder_vaapi
diff --git a/src/encoder/video/video.c b/src/encoder/video/video.c
index 9b0def0..76d53b0 100644
--- a/src/encoder/video/video.c
+++ b/src/encoder/video/video.c
@@ -9,10 +9,10 @@ bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_c
return res;
}
-void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
+void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
assert(encoder->started);
if(encoder->copy_textures_to_frame)
- encoder->copy_textures_to_frame(encoder, frame);
+ encoder->copy_textures_to_frame(encoder, frame, color_conversion);
}
void gsr_video_encoder_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
diff --git a/src/encoder/video/vulkan.c b/src/encoder/video/vulkan.c
new file mode 100644
index 0000000..0b6c380
--- /dev/null
+++ b/src/encoder/video/vulkan.c
@@ -0,0 +1,313 @@
+#include "../../../include/encoder/video/vulkan.h"
+#include "../../../include/utils.h"
+#include "../../../include/egl.h"
+
+#include <libavcodec/avcodec.h>
+#define VK_NO_PROTOTYPES
+#include <libavutil/hwcontext_vulkan.h>
+
+//#include <vulkan/vulkan_core.h>
+
+#define GL_TEXTURE_TILING_EXT 0x9580
+#define GL_OPTIMAL_TILING_EXT 0x9584
+#define GL_LINEAR_TILING_EXT 0x9585
+
+#define GL_PIXEL_PACK_BUFFER 0x88EB
+#define GL_PIXEL_UNPACK_BUFFER 0x88EC
+#define GL_STREAM_READ 0x88E1
+#define GL_STREAM_DRAW 0x88E0
+#define GL_READ_ONLY 0x88B8
+#define GL_WRITE_ONLY 0x88B9
+#define GL_READ_FRAMEBUFFER 0x8CA8
+
+typedef struct {
+ gsr_video_encoder_vulkan_params params;
+ unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
+ AVVulkanDeviceContext* vv;
+ unsigned int pbo_y[2];
+ unsigned int pbo_uv[2];
+ AVFrame *sw_frame;
+} gsr_video_encoder_vulkan;
+
+static bool gsr_video_encoder_vulkan_setup_context(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
+ AVDictionary *options = NULL;
+ //av_dict_set(&options, "linear_images", "1", 0);
+ //av_dict_set(&options, "disable_multiplane", "1", 0);
+
+ // TODO: Use correct device
+ if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VULKAN, NULL, options, 0) < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hardware device context\n");
+ return false;
+ }
+
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
+ if(!frame_context) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hwframe context\n");
+ av_buffer_unref(&self->device_ctx);
+ return false;
+ }
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
+ hw_frame_context->width = video_codec_context->width;
+ hw_frame_context->height = video_codec_context->height;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->format = video_codec_context->pix_fmt;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
+
+ //AVVulkanFramesContext *vk_frame_ctx = (AVVulkanFramesContext*)hw_frame_context->hwctx;
+ //hw_frame_context->initial_pool_size = 20;
+
+ if (av_hwframe_ctx_init(frame_context) < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to initialize hardware frame context "
+ "(note: ffmpeg version needs to be > 4.0)\n");
+ av_buffer_unref(&self->device_ctx);
+ //av_buffer_unref(&frame_context);
+ return false;
+ }
+
+ video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
+ return true;
+}
+
+static unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format) {
+ unsigned int texture_id = 0;
+ egl->glGenTextures(1, &texture_id);
+ egl->glBindTexture(GL_TEXTURE_2D, texture_id);
+ //egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_TILING_EXT, GL_OPTIMAL_TILING_EXT);
+ egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
+
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ egl->glBindTexture(GL_TEXTURE_2D, 0);
+ return texture_id;
+}
+
+static AVVulkanDeviceContext* video_codec_context_get_vulkan_data(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ if(device_context->type != AV_HWDEVICE_TYPE_VULKAN)
+ return NULL;
+
+ return (AVVulkanDeviceContext*)device_context->hwctx;
+}
+
+static bool gsr_video_encoder_vulkan_setup_textures(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context, AVFrame *frame) {
+ const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
+ if(res < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_textures: av_hwframe_get_buffer failed: %d\n", res);
+ return false;
+ }
+
+ //AVVkFrame *target_surface_id = (AVVkFrame*)frame->data[0];
+ self->vv = video_codec_context_get_vulkan_data(video_codec_context);
+
+ const unsigned int internal_formats_nv12[2] = { GL_RGBA8, GL_RGBA8 };
+ const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
+ const unsigned int formats[2] = { GL_RED, GL_RG };
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+
+ for(int i = 0; i < 2; ++i) {
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ if(self->target_textures[i] == 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_textures: failed to create opengl texture\n");
+ return false;
+ }
+ }
+
+ self->params.egl->glGenBuffers(2, self->pbo_y);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[0]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[1]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ self->params.egl->glGenBuffers(2, self->pbo_uv);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[0]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[1]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ self->sw_frame = av_frame_alloc();
+ self->sw_frame->format = AV_PIX_FMT_NV12;
+ self->sw_frame->width = frame->width;
+ self->sw_frame->height = frame->height;
+
+ // TODO: Remove
+ if(av_frame_get_buffer(self->sw_frame, 0) < 0) {
+ fprintf(stderr, "failed to allocate sw frame\n");
+ }
+
+ // TODO: Remove
+ if(av_frame_make_writable(self->sw_frame) < 0) {
+ fprintf(stderr, "failed to make writable\n");
+ }
+ return true;
+}
+
+static void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context);
+
+static bool gsr_video_encoder_vulkan_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+
+ if(!gsr_video_encoder_vulkan_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_vulkan_stop(self, video_codec_context);
+ return false;
+ }
+
+ if(!gsr_video_encoder_vulkan_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_vulkan_stop(self, video_codec_context);
+ return false;
+ }
+
+ return true;
+}
+
+void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
+ self->params.egl->glDeleteTextures(2, self->target_textures);
+ self->target_textures[0] = 0;
+ self->target_textures[1] = 0;
+
+ if(video_codec_context->hw_frames_ctx)
+ av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
+}
+
+static void nop_free(void *opaque, uint8_t *data) {
+
+}
+
+static void gsr_video_encoder_vulkan_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+
+ static int counter = 0;
+ ++counter;
+
+ // AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
+ // void (*free)(void *opaque, uint8_t *data),
+ // void *opaque, int flags);
+
+ while(self->params.egl->glGetError()){}
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, color_conversion->framebuffers[0]);
+ //fprintf(stderr, "1 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[counter % 2]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+ self->params.egl->glReadPixels(0, 0, frame->width, frame->height, GL_RED, GL_UNSIGNED_BYTE, 0);
+ //fprintf(stderr, "2 gl err: %d\n", self->params.egl->glGetError());
+
+ const int next_pbo_y = (counter + 1) % 2;
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_y]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+ //fprintf(stderr, "3 gl err: %d\n", self->params.egl->glGetError());
+ uint8_t *ptr_y = (uint8_t*)self->params.egl->glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
+ //fprintf(stderr, "4 gl err: %d\n", self->params.egl->glGetError());
+ if(!ptr_y) {
+ fprintf(stderr, "failed to map buffer y!\n");
+ }
+
+ while(self->params.egl->glGetError()){}
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, color_conversion->framebuffers[1]);
+ //fprintf(stderr, "5 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[counter % 2]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+ //fprintf(stderr, "5.5 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glReadPixels(0, 0, frame->width/2, frame->height/2, GL_RG, GL_UNSIGNED_BYTE, 0);
+ //fprintf(stderr, "6 gl err: %d\n", self->params.egl->glGetError());
+
+ const int next_pbo_uv = (counter + 1) % 2;
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[next_pbo_uv]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+ //fprintf(stderr, "7 gl err: %d\n", self->params.egl->glGetError());
+ uint8_t *ptr_uv = (uint8_t*)self->params.egl->glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
+ //fprintf(stderr, "8 gl err: %d\n", self->params.egl->glGetError());
+ if(!ptr_uv) {
+ fprintf(stderr, "failed to map buffer uv!\n");
+ }
+
+ //self->sw_frame->buf[0] = av_buffer_create(ptr_y, 3840 * 2160, nop_free, NULL, 0);
+ //self->sw_frame->buf[1] = av_buffer_create(ptr_uv, 1920 * 1080 * 2, nop_free, NULL, 0);
+ //self->sw_frame->data[0] = self->sw_frame->buf[0]->data;
+ //self->sw_frame->data[1] = self->sw_frame->buf[1]->data;
+ //self->sw_frame->extended_data[0] = self->sw_frame->data[0];
+ //self->sw_frame->extended_data[1] = self->sw_frame->data[1];
+
+ self->sw_frame->data[0] = ptr_y;
+ self->sw_frame->data[1] = ptr_uv;
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+
+ //self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[1]);
+ //self->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, sw_frame->data[1]);
+
+ //self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ int ret = av_hwframe_transfer_data(frame, self->sw_frame, 0);
+ if(ret < 0) {
+ fprintf(stderr, "transfer data failed, error: %s\n", av_err2str(ret));
+ }
+
+ //av_buffer_unref(&self->sw_frame->buf[0]);
+ //av_buffer_unref(&self->sw_frame->buf[1]);
+
+ //av_frame_free(&sw_frame);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_y]);
+ self->params.egl->glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_uv]);
+ self->params.egl->glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+}
+
+static void gsr_video_encoder_vulkan_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
+ *num_textures = 2;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+}
+
+static void gsr_video_encoder_vulkan_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
+ gsr_video_encoder_vulkan_stop(encoder->priv, video_codec_context);
+ free(encoder->priv);
+ free(encoder);
+}
+
+gsr_video_encoder* gsr_video_encoder_vulkan_create(const gsr_video_encoder_vulkan_params *params) {
+ gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
+ if(!encoder)
+ return NULL;
+
+ gsr_video_encoder_vulkan *encoder_vulkan = calloc(1, sizeof(gsr_video_encoder_vulkan));
+ if(!encoder_vulkan) {
+ free(encoder);
+ return NULL;
+ }
+
+ encoder_vulkan->params = *params;
+
+ *encoder = (gsr_video_encoder) {
+ .start = gsr_video_encoder_vulkan_start,
+ .copy_textures_to_frame = gsr_video_encoder_vulkan_copy_textures_to_frame,
+ .get_textures = gsr_video_encoder_vulkan_get_textures,
+ .destroy = gsr_video_encoder_vulkan_destroy,
+ .priv = encoder_vulkan
+ };
+
+ return encoder;
+}
diff --git a/src/main.cpp b/src/main.cpp
index 18a810a..de8c352 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -2,11 +2,20 @@ extern "C" {
#include "../include/capture/nvfbc.h"
#include "../include/capture/xcomposite.h"
#include "../include/capture/kms.h"
+#ifdef GSR_PORTAL
+#include "../include/capture/portal.h"
+#include "../include/dbus.h"
+#endif
#include "../include/encoder/video/cuda.h"
#include "../include/encoder/video/vaapi.h"
+#include "../include/encoder/video/vulkan.h"
#include "../include/encoder/video/software.h"
+#include "../include/codec_query/nvenc.h"
+#include "../include/codec_query/vaapi.h"
+#include "../include/codec_query/vulkan.h"
#include "../include/egl.h"
#include "../include/utils.h"
+#include "../include/damage.h"
#include "../include/color_conversion.h"
}
@@ -35,6 +44,7 @@ extern "C" {
#include <libswresample/swresample.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
+#include <libavutil/mastering_display_metadata.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
@@ -43,6 +53,10 @@ extern "C" {
#include <deque>
#include <future>
+#ifndef GSR_VERSION
+#define GSR_VERSION "unknown"
+#endif
+
// TODO: If options are not supported then they are returned (allocated) in the options. This should be free'd.
// TODO: Remove LIBAVUTIL_VERSION_MAJOR checks in the future when ubuntu, pop os LTS etc update ffmpeg to >= 5.0
@@ -85,8 +99,14 @@ enum class VideoCodec {
H264,
HEVC,
HEVC_HDR,
+ HEVC_10BIT,
AV1,
- AV1_HDR
+ AV1_HDR,
+ AV1_10BIT,
+ VP8,
+ VP9,
+ H264_VULKAN,
+ HEVC_VULKAN
};
enum class AudioCodec {
@@ -106,6 +126,11 @@ enum class FramerateMode {
CONTENT
};
+enum class BitrateMode {
+ QP,
+ VBR
+};
+
static int x11_error_handler(Display*, XErrorEvent*) {
return 0;
}
@@ -115,6 +140,7 @@ static int x11_io_error_handler(Display*) {
}
static bool video_codec_is_hdr(VideoCodec video_codec) {
+ // TODO: Vulkan
switch(video_codec) {
case VideoCodec::HEVC_HDR:
case VideoCodec::AV1_HDR:
@@ -124,6 +150,65 @@ static bool video_codec_is_hdr(VideoCodec video_codec) {
}
}
+static VideoCodec hdr_video_codec_to_sdr_video_codec(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::HEVC_HDR:
+ return VideoCodec::HEVC;
+ case VideoCodec::AV1_HDR:
+ return VideoCodec::AV1;
+ default:
+ return video_codec;
+ }
+}
+
+static gsr_color_depth video_codec_to_bit_depth(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return GSR_COLOR_DEPTH_10_BITS;
+ default:
+ return GSR_COLOR_DEPTH_8_BITS;
+ }
+}
+
+// static bool video_codec_is_hevc(VideoCodec video_codec) {
+// TODO: Vulkan
+// switch(video_codec) {
+// case VideoCodec::HEVC:
+// case VideoCodec::HEVC_HDR:
+// case VideoCodec::HEVC_10BIT:
+// return true;
+// default:
+// return false;
+// }
+// }
+
+static bool video_codec_is_av1(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool video_codec_is_vulkan(VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264_VULKAN:
+ case VideoCodec::HEVC_VULKAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct PacketData {
PacketData() {}
PacketData(const PacketData&) = delete;
@@ -223,7 +308,8 @@ static AVCodecID audio_codec_get_id(AudioCodec audio_codec) {
return AV_CODEC_ID_AAC;
}
-static AVSampleFormat audio_codec_get_sample_format(AudioCodec audio_codec, const AVCodec *codec, bool mix_audio) {
+static AVSampleFormat audio_codec_get_sample_format(AVCodecContext *audio_codec_context, AudioCodec audio_codec, const AVCodec *codec, bool mix_audio) {
+ (void)audio_codec_context;
switch(audio_codec) {
case AudioCodec::AAC: {
return AV_SAMPLE_FMT_FLTP;
@@ -232,13 +318,32 @@ static AVSampleFormat audio_codec_get_sample_format(AudioCodec audio_codec, cons
bool supports_s16 = false;
bool supports_flt = false;
- for(size_t i = 0; codec->sample_fmts && codec->sample_fmts[i] != -1; ++i) {
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(61, 15, 0)
+ for(size_t i = 0; codec->sample_fmts && codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) {
if(codec->sample_fmts[i] == AV_SAMPLE_FMT_S16) {
supports_s16 = true;
} else if(codec->sample_fmts[i] == AV_SAMPLE_FMT_FLT) {
supports_flt = true;
}
}
+ #else
+ const enum AVSampleFormat *sample_fmts = NULL;
+ if(avcodec_get_supported_config(audio_codec_context, codec, AV_CODEC_CONFIG_SAMPLE_FORMAT, 0, (const void**)&sample_fmts, NULL) >= 0) {
+ if(sample_fmts) {
+ for(size_t i = 0; sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) {
+ if(sample_fmts[i] == AV_SAMPLE_FMT_S16) {
+ supports_s16 = true;
+ } else if(sample_fmts[i] == AV_SAMPLE_FMT_FLT) {
+ supports_flt = true;
+ }
+ }
+ } else {
+ // What a dumb API. It returns NULL if all formats are supported
+ supports_s16 = true;
+ supports_flt = true;
+ }
+ }
+ #endif
// Amix only works with float audio
if(mix_audio)
@@ -307,7 +412,7 @@ static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_code
assert(codec->type == AVMEDIA_TYPE_AUDIO);
codec_context->codec_id = codec->id;
- codec_context->sample_fmt = audio_codec_get_sample_format(audio_codec, codec, mix_audio);
+ codec_context->sample_fmt = audio_codec_get_sample_format(codec_context, audio_codec, codec, mix_audio);
codec_context->bit_rate = audio_bitrate == 0 ? audio_codec_get_get_bitrate(audio_codec) : audio_bitrate;
codec_context->sample_rate = AUDIO_SAMPLE_RATE;
if(audio_codec == AudioCodec::AAC)
@@ -327,10 +432,62 @@ static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_code
return codec_context;
}
+static int vbr_get_quality_parameter(AVCodecContext *codec_context, VideoQuality video_quality, bool hdr) {
+ // 8 bit / 10 bit = 80%
+ const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 160 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 130 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 110 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 90 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ }
+ assert(false);
+ return 22 * qp_multiply;
+}
+
static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
VideoQuality video_quality,
int fps, const AVCodec *codec, bool low_latency, gsr_gpu_vendor vendor, FramerateMode framerate_mode,
- bool hdr, gsr_color_range color_range, float keyint) {
+ bool hdr, gsr_color_range color_range, float keyint, bool use_software_video_encoder, BitrateMode bitrate_mode, VideoCodec video_codec) {
AVCodecContext *codec_context = avcodec_alloc_context3(codec);
@@ -372,69 +529,91 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
}
//codec_context->chroma_sample_location = AVCHROMA_LOC_CENTER;
if(codec->id == AV_CODEC_ID_HEVC)
- codec_context->codec_tag = MKTAG('h', 'v', 'c', '1');
- switch(video_quality) {
- case VideoQuality::MEDIUM:
- //codec_context->qmin = 35;
- //codec_context->qmax = 35;
- codec_context->bit_rate = 100000;//4500000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::HIGH:
- //codec_context->qmin = 34;
- //codec_context->qmax = 34;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::VERY_HIGH:
- //codec_context->qmin = 28;
- //codec_context->qmax = 28;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::ULTRA:
- //codec_context->qmin = 22;
- //codec_context->qmax = 22;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- }
- //codec_context->profile = FF_PROFILE_H264_MAIN;
- if (codec_context->codec_id == AV_CODEC_ID_MPEG1VIDEO)
- codec_context->mb_decision = 2;
-
- // stream->time_base = codec_context->time_base;
- // codec_context->ticks_per_frame = 30;
- //av_opt_set(codec_context->priv_data, "tune", "hq", 0);
- // TODO: Do this for better file size? also allows setting qmin, qmax per frame? which can then be used to dynamically set bitrate to reduce quality
- // if live streaming is slow or if the users harddrive is cant handle writing megabytes of data per second.
- #if 0
- char qmin_str[32];
- snprintf(qmin_str, sizeof(qmin_str), "%d", codec_context->qmin);
-
- char qmax_str[32];
- snprintf(qmax_str, sizeof(qmax_str), "%d", codec_context->qmax);
-
- av_opt_set(codec_context->priv_data, "cq", qmax_str, 0);
- av_opt_set(codec_context->priv_data, "rc", "vbr", 0);
- av_opt_set(codec_context->priv_data, "qmin", qmin_str, 0);
- av_opt_set(codec_context->priv_data, "qmax", qmax_str, 0);
- codec_context->bit_rate = 0;
- #endif
+ codec_context->codec_tag = MKTAG('h', 'v', 'c', '1'); // QuickTime on MacOS requires this or the video wont be playable
- // 8 bit / 10 bit = 80%, and increase it even more
- const float quality_multiply = hdr ? (8.0f/10.0f * 0.7f) : 1.0f;
- if(vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(bitrate_mode == BitrateMode::VBR) {
+ const int quality = vbr_get_quality_parameter(codec_context, video_quality, hdr);
switch(video_quality) {
case VideoQuality::MEDIUM:
- codec_context->global_quality = 180 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//4500000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::HIGH:
- codec_context->global_quality = 140 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::VERY_HIGH:
- codec_context->global_quality = 120 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::ULTRA:
- codec_context->global_quality = 100 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
}
+
+ codec_context->rc_max_rate = codec_context->bit_rate;
+ codec_context->rc_min_rate = codec_context->bit_rate;
+ codec_context->rc_buffer_size = codec_context->bit_rate;//codec_context->bit_rate / 10;
+ codec_context->rc_initial_buffer_occupancy = 100000;//codec_context->bit_rate * 1000;
+ }
+ //codec_context->profile = FF_PROFILE_H264_MAIN;
+ if (codec_context->codec_id == AV_CODEC_ID_MPEG1VIDEO)
+ codec_context->mb_decision = 2;
+
+ if(!use_software_video_encoder && vendor != GSR_GPU_VENDOR_NVIDIA) {
+ // 8 bit / 10 bit = 80%, and increase it even more
+ const float quality_multiply = hdr ? (8.0f/10.0f * 0.7f) : 1.0f;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1 || codec_context->codec_id == AV_CODEC_ID_H264 || codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 150 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 120 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 100 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 90 * quality_multiply;
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 35 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 30 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 20 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 10 * quality_multiply;
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 35 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 30 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 20 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 10 * quality_multiply;
+ break;
+ }
+ }
}
av_opt_set_int(codec_context->priv_data, "b_ref_mode", 0, 0);
@@ -443,158 +622,37 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
if(vendor != GSR_GPU_VENDOR_NVIDIA) {
// TODO: More options, better options
//codec_context->bit_rate = codec_context->width * codec_context->height;
- av_opt_set(codec_context->priv_data, "rc_mode", "CQP", 0);
+ switch(bitrate_mode) {
+ case BitrateMode::QP: {
+ if(video_codec_is_vulkan(video_codec))
+ av_opt_set(codec_context->priv_data, "rc_mode", "cqp", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_opt_set(codec_context->priv_data, "rc", "constqp", 0);
+ else
+ av_opt_set(codec_context->priv_data, "rc_mode", "CQP", 0);
+ break;
+ }
+ case BitrateMode::VBR: {
+ if(video_codec_is_vulkan(video_codec))
+ av_opt_set(codec_context->priv_data, "rc_mode", "vbr", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_opt_set(codec_context->priv_data, "rc", "vbr", 0);
+ else
+ av_opt_set(codec_context->priv_data, "rc_mode", "VBR", 0);
+ break;
+ }
+ }
//codec_context->global_quality = 4;
//codec_context->compression_level = 2;
}
//av_opt_set(codec_context->priv_data, "bsf", "hevc_metadata=colour_primaries=9:transfer_characteristics=16:matrix_coefficients=9", 0);
- //codec_context->rc_max_rate = codec_context->bit_rate;
- //codec_context->rc_min_rate = codec_context->bit_rate;
- //codec_context->rc_buffer_size = codec_context->bit_rate / 10;
- // TODO: Do this when not using cqp
- //codec_context->rc_initial_buffer_occupancy = codec_context->bit_rate * 1000;
-
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
return codec_context;
}
-static bool vaapi_create_codec_context(AVCodecContext *video_codec_context, const char *card_path) {
- char render_path[128];
- if(!gsr_card_path_get_render_path(card_path, render_path)) {
- fprintf(stderr, "gsr error: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
- return false;
- }
-
- AVBufferRef *device_ctx;
- if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
- fprintf(stderr, "Error: Failed to create hardware device context\n");
- return false;
- }
-
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
- if(!frame_context) {
- fprintf(stderr, "Error: Failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
- return false;
- }
-
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
- hw_frame_context->width = video_codec_context->width;
- hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = AV_PIX_FMT_NV12;
- hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
-
- //hw_frame_context->initial_pool_size = 1;
-
- if (av_hwframe_ctx_init(frame_context) < 0) {
- fprintf(stderr, "Error: Failed to initialize hardware frame context "
- "(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
- //av_buffer_unref(&frame_context);
- return false;
- }
-
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
- video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
- return true;
-}
-
-static bool check_if_codec_valid_for_hardware(const AVCodec *codec, gsr_gpu_vendor vendor, const char *card_path) {
- // Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
- AVCodecContext *codec_context = create_video_codec_context(vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_VAAPI, VideoQuality::VERY_HIGH, 60, codec, false, vendor, FramerateMode::CONSTANT, false, GSR_COLOR_RANGE_LIMITED, 2);
- if(!codec_context)
- return false;
-
- codec_context->width = 512;
- codec_context->height = 512;
-
- if(vendor != GSR_GPU_VENDOR_NVIDIA) {
- if(!vaapi_create_codec_context(codec_context, card_path)) {
- avcodec_free_context(&codec_context);
- return false;
- }
- }
-
- bool success = false;
- success = avcodec_open2(codec_context, codec_context->codec, NULL) == 0;
- if(codec_context->hw_device_ctx)
- av_buffer_unref(&codec_context->hw_device_ctx);
- if(codec_context->hw_frames_ctx)
- av_buffer_unref(&codec_context->hw_frames_ctx);
- avcodec_free_context(&codec_context);
- return success;
-}
-
-static const AVCodec* find_h264_software_encoder() {
- return avcodec_find_encoder_by_name("libx264");
-}
-
-static const AVCodec* find_h264_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "h264_nvenc" : "h264_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_h264" : "vaapi_h264");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
-static const AVCodec* find_hevc_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "hevc_nvenc" : "hevc_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_hevc" : "vaapi_hevc");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
-static const AVCodec* find_av1_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- // Workaround bug with av1 nvidia in older ffmpeg versions that causes the whole application to crash
- // when avcodec_open2 is opened with av1_nvenc
- if(vendor == GSR_GPU_VENDOR_NVIDIA && LIBAVCODEC_BUILD < AV_VERSION_INT(60, 30, 100)) {
- return nullptr;
- }
-
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "av1_nvenc" : "av1_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_av1" : "vaapi_av1");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
static void open_audio(AVCodecContext *audio_codec_context) {
AVDictionary *options = nullptr;
av_dict_set(&options, "strict", "experimental", 0);
@@ -633,64 +691,110 @@ static AVFrame* create_audio_frame(AVCodecContext *audio_codec_context) {
return frame;
}
-static void open_video_software(AVCodecContext *codec_context, VideoQuality video_quality, PixelFormat pixel_format, bool hdr) {
- (void)pixel_format; // TODO:
- AVDictionary *options = nullptr;
+static void dict_set_profile(AVCodecContext *codec_context, gsr_gpu_vendor vendor, gsr_color_depth color_depth, AVDictionary **options) {
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(61, 17, 100)
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: Only for vaapi
+ //if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ // av_dict_set(options, "profile", "high10", 0);
+ //else
+ av_dict_set(options, "profile", "high", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "highbitdepth", 1, 0);
+ } else {
+ av_dict_set(options, "profile", "main", 0); // TODO: use professional instead?
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set(options, "profile", "main10", 0);
+ else
+ av_dict_set(options, "profile", "main", 0);
+ }
+ #else
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: Only for vaapi
+ //if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ // av_dict_set_int(options, "profile", AV_PROFILE_H264_HIGH_10, 0);
+ //else
+ av_dict_set_int(options, "profile", AV_PROFILE_H264_HIGH, 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "highbitdepth", 1, 0);
+ } else {
+ av_dict_set_int(options, "profile", AV_PROFILE_AV1_MAIN, 0); // TODO: use professional instead?
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "profile", AV_PROFILE_HEVC_MAIN_10, 0);
+ else
+ av_dict_set_int(options, "profile", AV_PROFILE_HEVC_MAIN, 0);
+ }
+ #endif
+}
+static void video_software_set_qp(AVCodecContext *codec_context, VideoQuality video_quality, bool hdr, AVDictionary **options) {
+ // 8 bit / 10 bit = 80%
const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
} else {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
}
+}
+
+static void open_video_software(AVCodecContext *codec_context, VideoQuality video_quality, PixelFormat pixel_format, bool hdr, gsr_color_depth color_depth, BitrateMode bitrate_mode) {
+ (void)pixel_format; // TODO:
+ AVDictionary *options = nullptr;
+
+ if(bitrate_mode == BitrateMode::QP)
+ video_software_set_qp(codec_context, video_quality, hdr, &options);
av_dict_set(&options, "preset", "medium", 0);
- if(hdr) {
- av_dict_set(&options, "profile", "high10", 0);
- } else {
- av_dict_set(&options, "profile", "high", 0);
- }
+ dict_set_profile(codec_context, GSR_GPU_VENDOR_INTEL, color_depth, &options);
// TODO: If streaming or piping output set this to zerolatency
av_dict_set(&options, "tune", "fastdecode", 0);
@@ -707,131 +811,94 @@ static void open_video_software(AVCodecContext *codec_context, VideoQuality vide
}
}
-static void open_video_hardware(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu, gsr_gpu_vendor vendor, PixelFormat pixel_format, bool hdr) {
- (void)very_old_gpu;
- AVDictionary *options = nullptr;
+static void video_set_rc(VideoCodec video_codec, gsr_gpu_vendor vendor, BitrateMode bitrate_mode, AVDictionary **options) {
+ switch(bitrate_mode) {
+ case BitrateMode::QP: {
+ if(video_codec_is_vulkan(video_codec))
+ av_dict_set(options, "rc_mode", "cqp", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_dict_set(options, "rc", "constqp", 0);
+ else
+ av_dict_set(options, "rc_mode", "CQP", 0);
+ break;
+ }
+ case BitrateMode::VBR: {
+ if(video_codec_is_vulkan(video_codec))
+ av_dict_set(options, "rc_mode", "vbr", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_dict_set(options, "rc", "vbr", 0);
+ else
+ av_dict_set(options, "rc_mode", "VBR", 0);
+ break;
+ }
+ }
+}
+
+static void video_hardware_set_qp(AVCodecContext *codec_context, VideoQuality video_quality, gsr_gpu_vendor vendor, bool hdr, AVDictionary **options) {
// 8 bit / 10 bit = 80%
const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
if(vendor == GSR_GPU_VENDOR_NVIDIA) {
- // Disable setting preset since some nvidia gpus cant handle it nicely and greatly reduce encoding performance (from more than 60 fps to less than 45 fps) (such as Nvidia RTX A2000)
- #if 0
- bool supports_p4 = false;
- bool supports_p5 = false;
-
- const AVOption *opt = nullptr;
- while((opt = av_opt_next(codec_context->priv_data, opt))) {
- if(opt->type == AV_OPT_TYPE_CONST) {
- if(strcmp(opt->name, "p4") == 0)
- supports_p4 = true;
- else if(strcmp(opt->name, "p5") == 0)
- supports_p5 = true;
- }
- }
- #endif
-
+ // TODO: Test if these should be in the same range as vaapi
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
- } else {
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
- }
-
- #if 0
- if(!supports_p4 && !supports_p5)
- fprintf(stderr, "Info: your ffmpeg version is outdated. It's recommended that you use the flatpak version of gpu-screen-recorder version instead, which you can find at https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder\n");
-
- //if(is_livestream) {
- // av_dict_set_int(&options, "zerolatency", 1, 0);
- // //av_dict_set(&options, "preset", "llhq", 0);
- //}
-
- // I want to use a good preset for the gpu but all gpus prefer different
- // presets. Nvidia and ffmpeg used to support "hq" preset that chose the best preset for the gpu
- // with pretty good performance but you now have to choose p1-p7, which are gpu agnostic and on
- // older gpus p5-p7 slow the gpu down to a crawl...
- // "hq" is now just an alias for p7 in ffmpeg :(
- // TODO: Temporary disable because of stuttering?
-
- // TODO: Preset is set to p5 for now but it should ideally be p6 or p7.
- // This change is needed because for certain sizes of a window (or monitor?) such as 971x780 causes encoding to freeze
- // when using h264 codec. This is a new(?) nvidia driver bug.
- if(very_old_gpu)
- av_dict_set(&options, "preset", supports_p4 ? "p4" : "medium", 0);
- else
- av_dict_set(&options, "preset", supports_p5 ? "p5" : "slow", 0);
- #endif
-
- av_dict_set(&options, "tune", "hq", 0);
- av_dict_set(&options, "rc", "constqp", 0);
-
- // TODO: Enable multipass
-
- if(codec_context->codec_id == AV_CODEC_ID_H264) {
- switch(pixel_format) {
- case PixelFormat::YUV420:
- av_dict_set(&options, "profile", "high", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
- case PixelFormat::YUV444:
- av_dict_set(&options, "profile", "high444p", 0);
+ case VideoQuality::HIGH:
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
- }
- } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
- switch(pixel_format) {
- case PixelFormat::YUV420:
- av_dict_set(&options, "rgb_mode", "yuv420", 0);
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
- case PixelFormat::YUV444:
- av_dict_set(&options, "rgb_mode", "yuv444", 0);
+ case VideoQuality::ULTRA:
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
- } else {
- //av_dict_set(&options, "profile", "main10", 0);
- //av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
- if(hdr) {
- av_dict_set(&options, "profile", "main10", 0);
- } else {
- av_dict_set(&options, "profile", "main", 0);
- }
}
} else {
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
@@ -839,54 +906,109 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
- } else {
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
}
+ }
+}
+
+static void open_video_hardware(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu, gsr_gpu_vendor vendor, PixelFormat pixel_format, bool hdr, gsr_color_depth color_depth, BitrateMode bitrate_mode, VideoCodec video_codec, bool low_power) {
+ (void)very_old_gpu;
+ AVDictionary *options = nullptr;
+
+ if(bitrate_mode == BitrateMode::QP)
+ video_hardware_set_qp(codec_context, video_quality, vendor, hdr, &options);
+
+ video_set_rc(video_codec, vendor, bitrate_mode, &options);
+
+ // TODO: Enable multipass
+
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ av_dict_set(&options, "tune", "hq", 0);
+
+ dict_set_profile(codec_context, vendor, color_depth, &options);
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: h264 10bit?
+ // TODO:
+ // switch(pixel_format) {
+ // case PixelFormat::YUV420:
+ // av_dict_set_int(&options, "profile", AV_PROFILE_H264_HIGH, 0);
+ // break;
+ // case PixelFormat::YUV444:
+ // av_dict_set_int(&options, "profile", AV_PROFILE_H264_HIGH_444, 0);
+ // break;
+ // }
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(pixel_format) {
+ case PixelFormat::YUV420:
+ av_dict_set(&options, "rgb_mode", "yuv420", 0);
+ break;
+ case PixelFormat::YUV444:
+ av_dict_set(&options, "rgb_mode", "yuv444", 0);
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ //av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
+ }
+ } else {
// TODO: More quality options
- av_dict_set(&options, "rc_mode", "CQP", 0);
- //av_dict_set_int(&options, "low_power", 1, 0);
+ if(low_power)
+ av_dict_set_int(&options, "low_power", 1, 0);
+ // Improves performance but increases vram
+ //av_dict_set_int(&options, "async_depth", 8, 0);
if(codec_context->codec_id == AV_CODEC_ID_H264) {
- av_dict_set(&options, "profile", "high", 0);
// Removed because it causes stutter in games for some people
//av_dict_set_int(&options, "quality", 5, 0); // quality preset
} else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
- av_dict_set(&options, "profile", "main", 0); // TODO: use professional instead?
av_dict_set(&options, "tier", "main", 0);
- } else {
- if(hdr) {
- av_dict_set(&options, "profile", "main10", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(hdr)
av_dict_set(&options, "sei", "hdr", 0);
- } else {
- av_dict_set(&options, "profile", "main", 0);
- }
}
+
+ // TODO: vp8/vp9 10bit
}
if(codec_context->codec_id == AV_CODEC_ID_H264) {
@@ -905,24 +1027,27 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
static void usage_header() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
- fprintf(stderr, "usage: %s -w <window_id|monitor|focused> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|hevc_hdr|av1|av1_hdr] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-cr limited|full] [-mf yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [-h|--help]\n", program_name);
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name);
}
+// TODO: Update with portal info
static void usage_full() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
usage_header();
fprintf(stderr, "\n");
fprintf(stderr, "OPTIONS:\n");
- fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\" or \"focused\".\n");
- fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded.\n");
+ fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\", \"focused\" or \"portal\".\n");
+ fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with pipewire will be used. Portal option is only available on Wayland.\n");
+ fprintf(stderr, " If you select to save the session (token) in the desktop portal capture popup then the session will be saved for the next time you use \"portal\",\n");
+ fprintf(stderr, " but the session will be ignored unless you run GPU Screen Recorder with the '-restore-portal-session yes' option.\n");
+ fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded on Nvidia X11. On AMD/Intel or wayland \"screen\" will record the first monitor found.\n");
fprintf(stderr, " \"screen-direct-force\" is not recommended unless you use a VRR (G-SYNC) monitor on Nvidia X11 and you are aware that using this option can cause games to freeze/crash or other issues because of Nvidia driver issues.\n");
fprintf(stderr, " \"screen-direct-force\" option is only available on Nvidia X11. VRR works without this option on other systems.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode.\n");
fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
- fprintf(stderr, " Only containers that support h264, hevc or av1 are supported, which means that only mp4, mkv, flv (and some others) are supported.\n");
- fprintf(stderr, " WebM is not supported yet (most hardware doesn't support WebM video encoding).\n");
+ fprintf(stderr, " Only containers that support h264, hevc, av1, vp8 or vp9 are supported, which means that only mp4, mkv, flv, webm (and some others) are supported.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. This option is only supported (and required) when -w is \"focused\".\n");
fprintf(stderr, "\n");
@@ -934,6 +1059,7 @@ static void usage_full() {
fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
+ fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
fprintf(stderr, " Optional, no audio track is added by default.\n");
fprintf(stderr, "\n");
@@ -944,12 +1070,14 @@ static void usage_full() {
fprintf(stderr, " and the video will only be saved when the gpu-screen-recorder is closed. This feature is similar to Nvidia's instant replay feature.\n");
fprintf(stderr, " This option has be between 5 and 1200. Note that the replay buffer size will not always be precise, because of keyframes. Optional, disabled by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -k Video codec to use. Should be either 'auto', 'h264', 'hevc', 'av1', 'hevc_hdr' or 'av1_hdr'. Optional, defaults to 'auto' which defaults to 'h264'.\n");
- fprintf(stderr, " Forcefully set to 'h264' if the file container type is 'flv'.\n");
- fprintf(stderr, " 'hevc_hdr' and 'av1_hdr' option is not available on X11.\n");
- fprintf(stderr, " Note: hdr metadata is not included in the video when recording with 'hevc_hdr'/'av1_hdr' because of bugs in AMD, Intel and NVIDIA drivers (amazin', they are all bugged).\n");
+ fprintf(stderr, " -k Video codec to use. Should be either 'auto', 'h264', 'hevc', 'av1', 'vp8', 'vp9', 'hevc_hdr', 'av1_hdr', 'hevc_10bit' or 'av1_10bit'.\n");
+ fprintf(stderr, " Optional, set to 'auto' by default which defaults to 'h264'. Forcefully set to 'h264' if the file container type is 'flv'.\n");
+ fprintf(stderr, " 'hevc_hdr' and 'av1_hdr' option is not available on X11 nor when using the portal capture option.\n");
+ fprintf(stderr, " 'hevc_10bit' and 'av1_10bit' options allow you to select 10 bit color depth which can reduce banding and improve quality in darker areas, but not all video players support 10 bit color depth\n");
+ fprintf(stderr, " and if you upload the video to a website the website might reduce 10 bit to 8 bit.\n");
+ fprintf(stderr, " Note that when using 'hevc_hdr' or 'av1_hdr' the color depth is also 10 bits.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -ac Audio codec to use. Should be either 'aac', 'opus' or 'flac'. Defaults to 'opus' for .mp4/.mkv files, otherwise defaults to 'aac'.\n");
+ fprintf(stderr, " -ac Audio codec to use. Should be either 'aac', 'opus' or 'flac'. Optional, set to 'opus' for .mp4/.mkv files, otherwise set to 'aac'.\n");
fprintf(stderr, " 'opus' and 'flac' is only supported by .mp4/.mkv files. 'opus' is recommended for best performance and smallest audio size.\n");
fprintf(stderr, " Flac audio codec is option is disable at the moment because of a temporary issue.\n");
fprintf(stderr, "\n");
@@ -960,37 +1088,66 @@ static void usage_full() {
fprintf(stderr, " is dropped when you record a game. Only needed if you are recording a game that is bottlenecked by GPU. The same issue exists on Wayland but overclocking is not possible on Wayland.\n");
fprintf(stderr, " Works only if your have \"Coolbits\" set to \"12\" in NVIDIA X settings, see README for more information. Note! use at your own risk! Optional, disabled by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -fm Framerate mode. Should be either 'cfr' (constant frame rate), 'vfr' (variable frame rate) or 'content'. Defaults to 'vfr'.\n");
+ fprintf(stderr, " -fm Framerate mode. Should be either 'cfr' (constant frame rate), 'vfr' (variable frame rate) or 'content'. Optional, set to 'vfr' by default.\n");
fprintf(stderr, " 'vfr' is recommended for recording for less issue with very high system load but some applications such as video editors may not support it properly.\n");
- fprintf(stderr, " 'content' is currently only supported when recording a single window, on X11. The 'content' option matches the recording frame rate to the captured content.\n");
+ fprintf(stderr, " 'content' is currently only supported on X11 or when using portal capture option. The 'content' option matches the recording frame rate to the captured content.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -bm Bitrate mode. Should be either 'auto', 'qp' (constant quality) or 'vbr' (variable bitrate). Optional, set to 'auto' by default which defaults to 'qp' on all devices\n");
+ fprintf(stderr, " except steam deck that has broken drivers and doesn't support qp.\n");
+ fprintf(stderr, " 'vbr' option is not supported when using '-encoder cpu' option.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -cr Color range. Should be either 'limited' (aka mpeg) or 'full' (aka jpeg). Defaults to 'limited'.\n");
+ fprintf(stderr, " -cr Color range. Should be either 'limited' (aka mpeg) or 'full' (aka jpeg). Optional, set to 'limited' by default.\n");
fprintf(stderr, " Limited color range means that colors are in range 16-235 (4112-60395 for hdr) while full color range means that colors are in range 0-255 (0-65535 for hdr).\n");
- fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range.\n");
+ fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range and when upload the video to websites the website\n");
+ fprintf(stderr, " might re-encoder the video to make the video limited color range.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -mf Organise replays in folders based on the current date.\n");
+ fprintf(stderr, " -df Organise replays in folders based on the current date.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -sc Run a script on the saved video file (non-blocking). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
+ fprintf(stderr, " -sc Run a script on the saved video file (asynchronously). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
fprintf(stderr, " Not applicable for live streams.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -cursor\n");
- fprintf(stderr, " Record cursor. Defaults to 'yes'.\n");
+ fprintf(stderr, " Record cursor. Optional, set to 'yes' by default.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -keyint\n");
fprintf(stderr, " Specifies the keyframe interval in seconds, the max amount of time to wait to generate a keyframe. Keyframes can be generated more often than this.\n");
fprintf(stderr, " This also affects seeking in the video and may affect how the replay video is cut. If this is set to 10 for example then you can only seek in 10-second chunks in the video.\n");
fprintf(stderr, " Setting this to a higher value reduces the video file size if you are ok with the previously described downside. This option is expected to be a floating point number.\n");
fprintf(stderr, " By default this value is set to 2.0.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -restore-portal-session\n");
+ fprintf(stderr, " If GPU Screen Recorder should use the same capture option as the last time. Using this option removes the popup asking what you want to record the next time you record with '-w portal' if you selected the option to save session (token) in the desktop portal screencast popup.\n");
+ fprintf(stderr, " This option may not have any effect on your Wayland compositor and your systems desktop portal needs to support ScreenCast version 5 or later. Optional, set to 'no' by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -portal-session-token-filepath\n");
+ fprintf(stderr, " This option is used together with -restore-portal-session option to specify the file path to save/restore the portal session token to/from.\n");
+ fprintf(stderr, " This can be used to remember different portal capture options depending on different recording option (such as recording/replay).\n");
+ fprintf(stderr, " Optional, set to \"$XDG_CONFIG_HOME/gpu-screen-recorder/restore_token\" by default ($XDG_CONFIG_HOME defaults to \"$HOME/.config\").\n");
+ fprintf(stderr, " Note: the directory to the portal session token file is created automatically if it doesn't exist.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -encoder\n");
fprintf(stderr, " Which device should be used for video encoding. Should either be 'gpu' or 'cpu'. Does currently only work with h264 codec option (-k).\n");
fprintf(stderr, " Optional, set to 'gpu' by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " --list-supported-video-codecs\n");
- fprintf(stderr, " List supported video codecs and exits. Prints h264, hevc, hevc_hdr, av1 and av1_hdr (if supported).\n");
+ fprintf(stderr, " --info\n");
+ fprintf(stderr, " List info about the system (for use by GPU Screen Recorder UI). Lists the following information (prints them to stdout and exits):\n");
+ fprintf(stderr, " Supported video codecs (h264, h264_software, hevc, hevc_hdr, hevc_10bit, av1, av1_hdr, av1_10bit, vp8, vp9 (if supported)).\n");
+ fprintf(stderr, " Supported capture options (window, focused, screen, monitors and portal, if supported by the system).\n");
+ fprintf(stderr, " If opengl initialization fails then the program exits with 22, if no usable drm device is found then it exits with 23. On success it exits with 0.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " --list-audio-devices\n");
+ fprintf(stderr, " List audio devices (for use by GPU Screen Recorder UI). Lists audio devices in the following format (prints them to stdout and exits):\n");
+ fprintf(stderr, " <audio_device_name>|<audio_device_name_in_human_readable_format>\n");
+ fprintf(stderr, " For example:\n");
+ fprintf(stderr, " bluez_input.88:C9:E8:66:A2:27|WH-1000XM4\n");
+ fprintf(stderr, " The <audio_device_name> is the name to pass to GPU Screen Recorder in a -a option.\n");
+ fprintf(stderr, " --version\n");
+ fprintf(stderr, " Print version (%s) and exit\n", GSR_VERSION);
fprintf(stderr, "\n");
- //fprintf(stderr, " -pixfmt The pixel format to use for the output video. yuv420 is the most common format and is best supported, but the color is compressed, so colors can look washed out and certain colors of text can look bad. Use yuv444 for no color compression, but the video may not work everywhere and it may not work with hardware video decoding. Optional, defaults to yuv420\n");
+ //fprintf(stderr, " -pixfmt The pixel format to use for the output video. yuv420 is the most common format and is best supported, but the color is compressed, so colors can look washed out and certain colors of text can look bad. Use yuv444 for no color compression, but the video may not work everywhere and it may not work with hardware video decoding. Optional, set to 'yuv420' by default\n");
fprintf(stderr, " -o The output file path. If omitted then the encoded data is sent to stdout. Required in replay mode (when using -r).\n");
fprintf(stderr, " In replay mode this has to be a directory instead of a file.\n");
- fprintf(stderr, " The directory to the file is created (recursively) if it doesn't already exist.\n");
+ fprintf(stderr, " Note: the directory to the file is created automatically if it doesn't already exist.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -v Prints per second, fps updates. Optional, set to 'yes' by default.\n");
fprintf(stderr, "\n");
@@ -1003,9 +1160,11 @@ static void usage_full() {
fprintf(stderr, " Send signal SIGUSR2 to gpu-screen-recorder (killall -SIGUSR2 gpu-screen-recorder) to pause/unpause recording. Only applicable and useful when recording (not streaming nor replay).\n");
fprintf(stderr, "\n");
fprintf(stderr, "EXAMPLES:\n");
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor|$(pactl get-default-source)\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"default_output|default_input\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w portal -f 60 -a default_output -restore-portal-session yes -o \"$HOME/Videos/video.mp4\"\n", program_name);
//fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
_exit(1);
}
@@ -1176,43 +1335,62 @@ struct AudioTrack {
int64_t pts = 0;
};
-static std::future<void> save_replay_thread;
-static std::vector<std::shared_ptr<PacketData>> save_replay_packets;
-static std::string save_replay_output_filepath;
+static bool add_hdr_metadata_to_video_stream(gsr_capture *cap, AVStream *video_stream) {
+ size_t light_metadata_size = 0;
+ size_t mastering_display_metadata_size = 0;
+ AVContentLightMetadata *light_metadata = av_content_light_metadata_alloc(&light_metadata_size);
+ #if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(59, 37, 100)
+ AVMasteringDisplayMetadata *mastering_display_metadata = av_mastering_display_metadata_alloc();
+ mastering_display_metadata_size = sizeof(*mastering_display_metadata);
+ #else
+ AVMasteringDisplayMetadata *mastering_display_metadata = av_mastering_display_metadata_alloc_size(&mastering_display_metadata_size);
+ #endif
-static int create_directory_recursive(char *path) {
- int path_len = strlen(path);
- char *p = path;
- char *end = path + path_len;
- for(;;) {
- char *slash_p = strchr(p, '/');
+ if(!light_metadata || !mastering_display_metadata) {
+ if(light_metadata)
+ av_freep(light_metadata);
- // Skips first '/', we don't want to try and create the root directory
- if(slash_p == path) {
- ++p;
- continue;
- }
+ if(mastering_display_metadata)
+ av_freep(mastering_display_metadata);
- if(!slash_p)
- slash_p = end;
+ return false;
+ }
- char prev_char = *slash_p;
- *slash_p = '\0';
- int err = mkdir(path, S_IRWXU);
- *slash_p = prev_char;
+ if(!gsr_capture_set_hdr_metadata(cap, mastering_display_metadata, light_metadata)) {
+ av_freep(light_metadata);
+ av_freep(mastering_display_metadata);
+ return false;
+ }
- if(err == -1 && errno != EEXIST)
- return err;
+ // TODO: More error checking
- if(slash_p == end)
- break;
- else
- p = slash_p + 1;
- }
- return 0;
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 31, 102)
+ const bool content_light_level_added = av_stream_add_side_data(video_stream, AV_PKT_DATA_CONTENT_LIGHT_LEVEL, (uint8_t*)light_metadata, light_metadata_size) == 0;
+ #else
+ const bool content_light_level_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_CONTENT_LIGHT_LEVEL, light_metadata, light_metadata_size, 0) != NULL;
+ #endif
+
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 31, 102)
+ const bool mastering_display_metadata_added = av_stream_add_side_data(video_stream, AV_PKT_DATA_MASTERING_DISPLAY_METADATA, (uint8_t*)mastering_display_metadata, mastering_display_metadata_size) == 0;
+ #else
+ const bool mastering_display_metadata_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_MASTERING_DISPLAY_METADATA, mastering_display_metadata, mastering_display_metadata_size, 0) != NULL;
+ #endif
+
+ if(!content_light_level_added)
+ av_freep(light_metadata);
+
+ if(!mastering_display_metadata_added)
+ av_freep(mastering_display_metadata);
+
+ // Return true even on failure because we dont want to retry adding hdr metadata on failure
+ return true;
}
-static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, std::deque<std::shared_ptr<PacketData>> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex, bool make_folders) {
+static std::future<void> save_replay_thread;
+static std::vector<std::shared_ptr<PacketData>> save_replay_packets;
+static std::string save_replay_output_filepath;
+
+static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, std::deque<std::shared_ptr<PacketData>> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex, bool date_folders, bool hdr, gsr_capture *capture) {
if(save_replay_thread.valid())
return;
@@ -1255,7 +1433,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
}
}
- if (make_folders) {
+ if (date_folders) {
std::string output_folder = output_dir + '/' + get_date_only_str();
create_directory_recursive(&output_folder[0]);
save_replay_output_filepath = output_folder + "/Replay_" + get_time_only_str() + "." + file_extension;
@@ -1264,36 +1442,42 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
}
- save_replay_thread = std::async(std::launch::async, [video_stream_index, container_format, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks]() mutable {
- AVFormatContext *av_format_context;
- avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
+ AVFormatContext *av_format_context;
+ avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
- AVStream *video_stream = create_stream(av_format_context, video_codec_context);
- avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
+ AVStream *video_stream = create_stream(av_format_context, video_codec_context);
+ avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
- std::unordered_map<int, AudioTrack*> stream_index_to_audio_track_map;
- for(AudioTrack &audio_track : audio_tracks) {
- stream_index_to_audio_track_map[audio_track.stream_index] = &audio_track;
- AVStream *audio_stream = create_stream(av_format_context, audio_track.codec_context);
- avcodec_parameters_from_context(audio_stream->codecpar, audio_track.codec_context);
- audio_track.stream = audio_stream;
- }
+ std::unordered_map<int, AudioTrack*> stream_index_to_audio_track_map;
+ for(AudioTrack &audio_track : audio_tracks) {
+ stream_index_to_audio_track_map[audio_track.stream_index] = &audio_track;
+ AVStream *audio_stream = create_stream(av_format_context, audio_track.codec_context);
+ avcodec_parameters_from_context(audio_stream->codecpar, audio_track.codec_context);
+ audio_track.stream = audio_stream;
+ }
- int ret = avio_open(&av_format_context->pb, save_replay_output_filepath.c_str(), AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Error: Could not open '%s': %s. Make sure %s is an existing directory with write access\n", save_replay_output_filepath.c_str(), av_error_to_string(ret), save_replay_output_filepath.c_str());
- return;
- }
+ const int open_ret = avio_open(&av_format_context->pb, save_replay_output_filepath.c_str(), AVIO_FLAG_WRITE);
+ if (open_ret < 0) {
+ fprintf(stderr, "Error: Could not open '%s': %s. Make sure %s is an existing directory with write access\n", save_replay_output_filepath.c_str(), av_error_to_string(open_ret), save_replay_output_filepath.c_str());
+ return;
+ }
- AVDictionary *options = nullptr;
- av_dict_set(&options, "strict", "experimental", 0);
+ AVDictionary *options = nullptr;
+ av_dict_set(&options, "strict", "experimental", 0);
- ret = avformat_write_header(av_format_context, &options);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(ret));
- return;
- }
+ const int header_write_ret = avformat_write_header(av_format_context, &options);
+ if (header_write_ret < 0) {
+ fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(header_write_ret));
+ avio_close(av_format_context->pb);
+ avformat_free_context(av_format_context);
+ av_dict_free(&options);
+ return;
+ }
+
+ if(hdr)
+ add_hdr_metadata_to_video_stream(capture, video_stream);
+ save_replay_thread = std::async(std::launch::async, [video_stream_index, video_stream, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks, stream_index_to_audio_track_map, av_format_context, options]() mutable {
for(size_t i = start_index; i < save_replay_packets.size(); ++i) {
// TODO: Check if successful
AVPacket av_packet;
@@ -1325,7 +1509,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
av_packet.stream_index = stream->index;
av_packet_rescale_ts(&av_packet, codec_context->time_base, stream->time_base);
- ret = av_write_frame(av_format_context, &av_packet);
+ const int ret = av_write_frame(av_format_context, &av_packet);
if(ret < 0)
fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", stream->index, av_error_to_string(ret), ret);
@@ -1498,6 +1682,69 @@ static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph
return 0;
}
+static gsr_video_encoder* create_video_encoder(gsr_egl *egl, bool overclock, gsr_color_depth color_depth, bool use_software_video_encoder, VideoCodec video_codec) {
+ gsr_video_encoder *video_encoder = nullptr;
+
+ if(use_software_video_encoder) {
+ gsr_video_encoder_software_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_software_create(&params);
+ return video_encoder;
+ }
+
+ if(video_codec_is_vulkan(video_codec)) {
+ gsr_video_encoder_vulkan_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_vulkan_create(&params);
+ return video_encoder;
+ }
+
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ case GSR_GPU_VENDOR_INTEL: {
+ gsr_video_encoder_vaapi_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_vaapi_create(&params);
+ break;
+ }
+ case GSR_GPU_VENDOR_NVIDIA: {
+ gsr_video_encoder_cuda_params params;
+ params.egl = egl;
+ params.overclock = overclock;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_cuda_create(&params);
+ break;
+ }
+ }
+
+ return video_encoder;
+}
+
+static bool get_supported_video_codecs(gsr_egl *egl, VideoCodec video_codec, bool use_software_video_encoder, bool cleanup, gsr_supported_video_codecs *video_codecs) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+
+ if(use_software_video_encoder) {
+ video_codecs->h264.supported = true;
+ return true;
+ }
+
+ if(video_codec_is_vulkan(video_codec))
+ return gsr_get_supported_video_codecs_vulkan(video_codecs, egl->card_path, cleanup);
+
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ case GSR_GPU_VENDOR_INTEL:
+ return gsr_get_supported_video_codecs_vaapi(video_codecs, egl->card_path, cleanup);
+ case GSR_GPU_VENDOR_NVIDIA:
+ return gsr_get_supported_video_codecs_nvenc(video_codecs, cleanup);
+ }
+
+ return false;
+}
+
static void xwayland_check_callback(const gsr_monitor *monitor, void *userdata) {
bool *xwayland_found = (bool*)userdata;
if(monitor->name_len >= 8 && strncmp(monitor->name, "XWAYLAND", 8) == 0)
@@ -1512,11 +1759,195 @@ static bool is_xwayland(Display *display) {
return true;
bool xwayland_found = false;
- for_each_active_monitor_output_x11(display, xwayland_check_callback, &xwayland_found);
+ for_each_active_monitor_output_x11_not_cached(display, xwayland_check_callback, &xwayland_found);
return xwayland_found;
}
-static void list_supported_video_codecs() {
+static bool is_using_prime_run() {
+ const char *prime_render_offload = getenv("__NV_PRIME_RENDER_OFFLOAD");
+ return prime_render_offload && strcmp(prime_render_offload, "1") == 0;
+}
+
+static void disable_prime_run() {
+ unsetenv("__NV_PRIME_RENDER_OFFLOAD");
+ unsetenv("__NV_PRIME_RENDER_OFFLOAD_PROVIDER");
+ unsetenv("__GLX_VENDOR_LIBRARY_NAME");
+ unsetenv("__VK_LAYER_NV_optimus");
+}
+
+static void list_system_info(bool wayland) {
+ printf("display_server|%s\n", wayland ? "wayland" : "x11");
+}
+
+static void list_gpu_info(gsr_egl *egl) {
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ printf("vendor|amd\n");
+ break;
+ case GSR_GPU_VENDOR_INTEL:
+ printf("vendor|intel\n");
+ break;
+ case GSR_GPU_VENDOR_NVIDIA:
+ printf("vendor|nvidia\n");
+ break;
+ }
+}
+
+static const AVCodec* get_ffmpeg_video_codec(VideoCodec video_codec, gsr_gpu_vendor vendor) {
+ switch(video_codec) {
+ case VideoCodec::H264:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "h264_nvenc" : "h264_vaapi");
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "hevc_nvenc" : "hevc_vaapi");
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "av1_nvenc" : "av1_vaapi");
+ case VideoCodec::VP8:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "vp8_nvenc" : "vp8_vaapi");
+ case VideoCodec::VP9:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "vp9_nvenc" : "vp9_vaapi");
+ case VideoCodec::H264_VULKAN:
+ return avcodec_find_encoder_by_name("h264_vulkan");
+ case VideoCodec::HEVC_VULKAN:
+ return avcodec_find_encoder_by_name("hevc_vulkan");
+ }
+ return nullptr;
+}
+
+static void set_supported_video_codecs_ffmpeg(gsr_supported_video_codecs *supported_video_codecs, gsr_supported_video_codecs *supported_video_codecs_vulkan, gsr_gpu_vendor vendor) {
+ if(!get_ffmpeg_video_codec(VideoCodec::H264, vendor)) {
+ supported_video_codecs->h264.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::HEVC, vendor)) {
+ supported_video_codecs->hevc.supported = false;
+ supported_video_codecs->hevc_hdr.supported = false;
+ supported_video_codecs->hevc_10bit.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::AV1, vendor)) {
+ supported_video_codecs->av1.supported = false;
+ supported_video_codecs->av1_hdr.supported = false;
+ supported_video_codecs->av1_10bit.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::VP8, vendor)) {
+ supported_video_codecs->vp8.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::VP9, vendor)) {
+ supported_video_codecs->vp9.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::H264_VULKAN, vendor)) {
+ supported_video_codecs_vulkan->h264.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::HEVC_VULKAN, vendor)) {
+ supported_video_codecs_vulkan->hevc.supported = false;
+ supported_video_codecs_vulkan->hevc_hdr.supported = false;
+ supported_video_codecs_vulkan->hevc_10bit.supported = false;
+ }
+}
+
+static void list_supported_video_codecs(gsr_egl *egl, bool wayland) {
+ // Dont clean it up on purpose to increase shutdown speed
+ gsr_supported_video_codecs supported_video_codecs;
+ get_supported_video_codecs(egl, VideoCodec::H264, false, false, &supported_video_codecs);
+
+ gsr_supported_video_codecs supported_video_codecs_vulkan;
+ get_supported_video_codecs(egl, VideoCodec::H264_VULKAN, false, false, &supported_video_codecs_vulkan);
+
+ set_supported_video_codecs_ffmpeg(&supported_video_codecs, &supported_video_codecs_vulkan, egl->gpu_info.vendor);
+
+ if(supported_video_codecs.h264.supported)
+ puts("h264");
+ if(avcodec_find_encoder_by_name("libx264"))
+ puts("h264_software");
+ if(supported_video_codecs.hevc.supported)
+ puts("hevc");
+ if(supported_video_codecs.hevc_hdr.supported && wayland)
+ puts("hevc_hdr");
+ if(supported_video_codecs.hevc_10bit.supported)
+ puts("hevc_10bit");
+ if(supported_video_codecs.av1.supported)
+ puts("av1");
+ if(supported_video_codecs.av1_hdr.supported && wayland)
+ puts("av1_hdr");
+ if(supported_video_codecs.av1_10bit.supported)
+ puts("av1_10bit");
+ if(supported_video_codecs.vp8.supported)
+ puts("vp8");
+ if(supported_video_codecs.vp9.supported)
+ puts("vp9");
+ //if(supported_video_codecs_vulkan.h264.supported)
+ // puts("h264_vulkan");
+ //if(supported_video_codecs_vulkan.hevc.supported)
+ // puts("hevc_vulkan"); // TODO: hdr, 10 bit
+}
+
+static bool monitor_capture_use_drm(gsr_egl *egl, bool wayland) {
+ return wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA;
+}
+
+typedef struct {
+ bool wayland;
+ gsr_egl *egl;
+} capture_options_callback;
+
+static void output_monitor_info(const gsr_monitor *monitor, void *userdata) {
+ const capture_options_callback *options = (capture_options_callback*)userdata;
+ if(options->wayland && monitor_capture_use_drm(options->egl, options->wayland)) {
+ vec2i monitor_size = monitor->size;
+ const gsr_monitor_rotation rot = drm_monitor_get_display_server_rotation(options->egl, monitor);
+ if(rot == GSR_MONITOR_ROT_90 || rot == GSR_MONITOR_ROT_270)
+ std::swap(monitor_size.x, monitor_size.y);
+ printf("%.*s|%dx%d\n", monitor->name_len, monitor->name, monitor_size.x, monitor_size.y);
+ } else {
+ printf("%.*s|%dx%d\n", monitor->name_len, monitor->name, monitor->size.x, monitor->size.y);
+ }
+}
+
+static void list_supported_capture_options(gsr_egl *egl, bool wayland) {
+ if(!wayland) {
+ puts("window");
+ puts("focused");
+ }
+
+ capture_options_callback options;
+ options.wayland = wayland;
+ options.egl = egl;
+ if(monitor_capture_use_drm(egl, wayland)) {
+ const bool is_x11 = gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+ for_each_active_monitor_output(egl, connection_type, output_monitor_info, &options);
+ } else {
+ puts("screen"); // All monitors in one, only available on Nvidia X11
+ for_each_active_monitor_output(egl, GSR_CONNECTION_X11, output_monitor_info, &options);
+ }
+
+#ifdef GSR_PORTAL
+ // Desktop portal capture on x11 doesn't seem to be hardware accelerated
+ if(!wayland)
+ return;
+
+ gsr_dbus dbus;
+ if(!gsr_dbus_init(&dbus, NULL))
+ return;
+
+ char *session_handle = NULL;
+ if(gsr_dbus_screencast_create_session(&dbus, &session_handle) == 0) {
+ free(session_handle);
+ puts("portal");
+ }
+ gsr_dbus_deinit(&dbus);
+#endif
+}
+
+static void info_command() {
bool wayland = false;
Display *dpy = XOpenDisplay(nullptr);
if (!dpy) {
@@ -1530,46 +1961,81 @@ static void list_supported_video_codecs() {
if(!wayland)
wayland = is_xwayland(dpy);
+ if(!wayland && is_using_prime_run()) {
+ // Disable prime-run and similar options as it doesn't work, the monitor to capture has to be run on the same device.
+ // This is fine on wayland since nvidia uses drm interface there and the monitor query checks the monitors connected
+ // to the drm device.
+ fprintf(stderr, "Warning: use of prime-run on X11 is not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
gsr_egl egl;
if(!gsr_egl_load(&egl, dpy, wayland, false)) {
fprintf(stderr, "gsr error: failed to load opengl\n");
- _exit(1);
+ _exit(22);
}
- char card_path[128];
- card_path[0] = '\0';
- if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ egl.card_path[0] = '\0';
+ if(monitor_capture_use_drm(&egl, wayland)) {
// TODO: Allow specifying another card, and in other places
- if(!gsr_get_valid_card_path(&egl, card_path, false)) {
- fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it. Also make sure that you have at least one connected monitor or record a single window instead on X11\n");
- _exit(2);
+ if(!gsr_get_valid_card_path(&egl, egl.card_path, false)) {
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. Make sure that you have at least one monitor connected\n");
+ _exit(23);
}
}
av_log_set_level(AV_LOG_FATAL);
- // TODO: Output hdr
- if(find_h264_encoder(egl.gpu_info.vendor, card_path))
- puts("h264");
- if(find_hevc_encoder(egl.gpu_info.vendor, card_path))
- puts("hevc");
- if(find_av1_encoder(egl.gpu_info.vendor, card_path))
- puts("av1");
+ puts("section=system_info");
+ list_system_info(wayland);
+ if(egl.gpu_info.is_steam_deck)
+ puts("is_steam_deck|yes");
+ else
+ puts("is_steam_deck|no");
+ puts("section=gpu_info");
+ list_gpu_info(&egl);
+ puts("section=video_codecs");
+ list_supported_video_codecs(&egl, wayland);
+ puts("section=capture_options");
+ list_supported_capture_options(&egl, wayland);
fflush(stdout);
- gsr_egl_unload(&egl);
- if(dpy)
- XCloseDisplay(dpy);
+ // Not needed as this will just slow down shutdown
+ //gsr_egl_unload(&egl);
+ //if(dpy)
+ // XCloseDisplay(dpy);
+
+ _exit(0);
}
-static gsr_capture* create_capture_impl(const char *window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, bool overclock, VideoCodec video_codec, gsr_color_range color_range, bool record_cursor, bool track_damage, bool use_software_video_encoder) {
+static void list_audio_devices_command() {
+ const AudioDevices audio_devices = get_pulseaudio_inputs();
+
+ if(!audio_devices.default_output.empty())
+ puts("default_output|Default output");
+
+ if(!audio_devices.default_input.empty())
+ puts("default_input|Default input");
+
+ for(const auto &audio_input : audio_devices.audio_inputs) {
+ printf("%s|%s\n", audio_input.name.c_str(), audio_input.description.c_str());
+ }
+
+ fflush(stdout);
+ _exit(0);
+}
+
+static gsr_capture* create_capture_impl(std::string &window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
+ bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
+ gsr_color_depth color_depth)
+{
vec2i region_size = { 0, 0 };
Window src_window_id = None;
bool follow_focused = false;
gsr_capture *capture = nullptr;
- if(strcmp(window_str, "focused") == 0) {
+ if(strcmp(window_str.c_str(), "focused") == 0) {
if(wayland) {
fprintf(stderr, "Error: GPU Screen Recorder window capture only works in a pure X11 session. Xwayland is not supported. You can record a monitor instead on wayland\n");
_exit(2);
@@ -1591,35 +2057,60 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
follow_focused = true;
- } else if(contains_non_hex_number(window_str)) {
- if(wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
- if(strcmp(window_str, "screen") == 0) {
+ } else if(strcmp(window_str.c_str(), "portal") == 0) {
+#ifdef GSR_PORTAL
+ // Desktop portal capture on x11 doesn't seem to be hardware accelerated
+ if(!wayland) {
+ fprintf(stderr, "Error: desktop portal capture is not supported on X11\n");
+ _exit(1);
+ }
+
+ gsr_capture_portal_params portal_params;
+ portal_params.egl = egl;
+ portal_params.color_depth = color_depth;
+ portal_params.color_range = color_range;
+ portal_params.record_cursor = record_cursor;
+ portal_params.restore_portal_session = restore_portal_session;
+ portal_params.portal_session_token_filepath = portal_session_token_filepath;
+ capture = gsr_capture_portal_create(&portal_params);
+ if(!capture)
+ _exit(1);
+#else
+ fprintf(stderr, "Error: option '-w portal' used but GPU Screen Recorder was compiled without desktop portal support\n");
+ _exit(2);
+#endif
+ } else if(contains_non_hex_number(window_str.c_str())) {
+ if(monitor_capture_use_drm(egl, wayland)) {
+ const bool is_x11 = gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+
+ if(strcmp(window_str.c_str(), "screen") == 0) {
FirstOutputCallback first_output;
first_output.output_name = NULL;
- for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, get_first_output, &first_output);
+ for_each_active_monitor_output(egl, connection_type, get_first_output, &first_output);
if(first_output.output_name) {
window_str = first_output.output_name;
} else {
- fprintf(stderr, "Error: no available output found\n");
+ fprintf(stderr, "Error: no usable output found\n");
+ _exit(1);
+ }
+ } else {
+ gsr_monitor gmon;
+ if(!get_monitor_by_name(egl, connection_type, window_str.c_str(), &gmon)) {
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str.c_str());
+ fprintf(stderr, " \"screen\"\n");
+ for_each_active_monitor_output(egl, connection_type, monitor_output_callback_print, NULL);
_exit(1);
}
- }
-
- gsr_monitor gmon;
- if(!get_monitor_by_name(egl, GSR_CONNECTION_DRM, window_str, &gmon)) {
- fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
- fprintf(stderr, " \"screen\"\n");
- for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, monitor_output_callback_print, NULL);
- _exit(1);
}
} else {
- if(strcmp(window_str, "screen") != 0 && strcmp(window_str, "screen-direct") != 0 && strcmp(window_str, "screen-direct-force") != 0) {
+ if(strcmp(window_str.c_str(), "screen") != 0 && strcmp(window_str.c_str(), "screen-direct") != 0 && strcmp(window_str.c_str(), "screen-direct-force") != 0) {
gsr_monitor gmon;
- if(!get_monitor_by_name(egl, GSR_CONNECTION_X11, window_str, &gmon)) {
+ if(!get_monitor_by_name(egl, GSR_CONNECTION_X11, window_str.c_str(), &gmon)) {
const int screens_width = XWidthOfScreen(DefaultScreenOfDisplay(egl->x11.dpy));
const int screens_height = XWidthOfScreen(DefaultScreenOfDisplay(egl->x11.dpy));
- fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str.c_str());
fprintf(stderr, " \"screen\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
fprintf(stderr, " \"screen-direct\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
fprintf(stderr, " \"screen-direct-force\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
@@ -1629,70 +2120,47 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
}
- if(use_software_video_encoder && (wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA)) {
+ if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && !wayland) {
+ const char *capture_target = window_str.c_str();
+ bool direct_capture = strcmp(window_str.c_str(), "screen-direct") == 0;
+ if(direct_capture) {
+ capture_target = "screen";
+ // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
+ direct_capture = false;
+ fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
+ }
+
+ if(strcmp(window_str.c_str(), "screen-direct-force") == 0) {
+ direct_capture = true;
+ capture_target = "screen";
+ }
+
+ gsr_capture_nvfbc_params nvfbc_params;
+ nvfbc_params.egl = egl;
+ nvfbc_params.display_to_capture = capture_target;
+ nvfbc_params.fps = fps;
+ nvfbc_params.pos = { 0, 0 };
+ nvfbc_params.size = { 0, 0 };
+ nvfbc_params.direct_capture = direct_capture;
+ nvfbc_params.color_depth = color_depth;
+ nvfbc_params.color_range = color_range;
+ nvfbc_params.record_cursor = record_cursor;
+ nvfbc_params.use_software_video_encoder = use_software_video_encoder;
+ capture = gsr_capture_nvfbc_create(&nvfbc_params);
+ if(!capture)
+ _exit(1);
+ } else {
gsr_capture_kms_params kms_params;
kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.display_to_capture = window_str.c_str();
+ kms_params.color_depth = color_depth;
kms_params.color_range = color_range;
kms_params.record_cursor = record_cursor;
+ kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.fps = fps;
capture = gsr_capture_kms_create(&kms_params);
if(!capture)
_exit(1);
- } else {
- if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA) {
- if(wayland) {
- gsr_capture_kms_params kms_params;
- kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
- kms_params.color_range = color_range;
- kms_params.record_cursor = record_cursor;
- capture = gsr_capture_kms_create(&kms_params);
- if(!capture)
- _exit(1);
- } else {
- const char *capture_target = window_str;
- bool direct_capture = strcmp(window_str, "screen-direct") == 0;
- if(direct_capture) {
- capture_target = "screen";
- // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
- direct_capture = false;
- fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
- }
-
- if(strcmp(window_str, "screen-direct-force") == 0) {
- direct_capture = true;
- capture_target = "screen";
- }
-
- gsr_capture_nvfbc_params nvfbc_params;
- nvfbc_params.egl = egl;
- nvfbc_params.display_to_capture = capture_target;
- nvfbc_params.fps = fps;
- nvfbc_params.pos = { 0, 0 };
- nvfbc_params.size = { 0, 0 };
- nvfbc_params.direct_capture = direct_capture;
- nvfbc_params.overclock = overclock;
- nvfbc_params.hdr = video_codec_is_hdr(video_codec);
- nvfbc_params.color_range = color_range;
- nvfbc_params.record_cursor = record_cursor;
- nvfbc_params.use_software_video_encoder = use_software_video_encoder;
- capture = gsr_capture_nvfbc_create(&nvfbc_params);
- if(!capture)
- _exit(1);
- }
- } else {
- gsr_capture_kms_params kms_params;
- kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
- kms_params.color_range = color_range;
- kms_params.record_cursor = record_cursor;
- capture = gsr_capture_kms_create(&kms_params);
- if(!capture)
- _exit(1);
- }
}
} else {
if(wayland) {
@@ -1701,9 +2169,9 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
errno = 0;
- src_window_id = strtol(window_str, nullptr, 0);
+ src_window_id = strtol(window_str.c_str(), nullptr, 0);
if(src_window_id == None || errno == EINVAL) {
- fprintf(stderr, "Invalid window number %s\n", window_str);
+ fprintf(stderr, "Invalid window number %s\n", window_str.c_str());
usage();
}
}
@@ -1716,7 +2184,7 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
xcomposite_params.region_size = region_size;
xcomposite_params.color_range = color_range;
xcomposite_params.record_cursor = record_cursor;
- xcomposite_params.track_damage = track_damage;
+ xcomposite_params.color_depth = color_depth;
capture = gsr_capture_xcomposite_create(&xcomposite_params);
if(!capture)
_exit(1);
@@ -1725,44 +2193,14 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
return capture;
}
-static gsr_video_encoder* create_video_encoder(gsr_egl *egl, bool overclock, bool hdr, bool use_software_video_encoder) {
- gsr_video_encoder *video_encoder = nullptr;
-
- if(use_software_video_encoder) {
- gsr_video_encoder_software_params params;
- params.egl = egl;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_software_create(&params);
- return video_encoder;
- }
-
- switch(egl->gpu_info.vendor) {
- case GSR_GPU_VENDOR_AMD:
- case GSR_GPU_VENDOR_INTEL: {
- gsr_video_encoder_vaapi_params params;
- params.egl = egl;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_vaapi_create(&params);
- break;
- }
- case GSR_GPU_VENDOR_NVIDIA: {
- gsr_video_encoder_cuda_params params;
- params.egl = egl;
- params.overclock = overclock;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_cuda_create(&params);
- break;
- }
- }
-
- return video_encoder;
-}
-
-static AVPixelFormat get_pixel_format(gsr_gpu_vendor vendor, bool use_software_video_encoder) {
+static AVPixelFormat get_pixel_format(VideoCodec video_codec, gsr_gpu_vendor vendor, bool use_software_video_encoder) {
if(use_software_video_encoder) {
return AV_PIX_FMT_NV12;
} else {
- return vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI;
+ if(video_codec_is_vulkan(video_codec))
+ return AV_PIX_FMT_VULKAN;
+ else
+ return vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI;
}
}
@@ -1778,6 +2216,350 @@ struct Arg {
}
};
+// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
+// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
+// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
+static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, bool &uses_amix) {
+ std::vector<MergedAudioInputs> requested_audio_inputs;
+ uses_amix = false;
+
+ for(const char *audio_input : audio_input_arg.values) {
+ if(!audio_input || audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input)});
+ if(requested_audio_inputs.back().audio_inputs.size() > 1)
+ uses_amix = true;
+
+ for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
+ bool match = false;
+
+ if(!audio_devices.default_output.empty() && request_audio_input.name == "default_output") {
+ request_audio_input.name = audio_devices.default_output;
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-Default output";
+ match = true;
+ }
+
+ if(!audio_devices.default_input.empty() && request_audio_input.name == "default_input") {
+ request_audio_input.name = audio_devices.default_input;
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-Default input";
+ match = true;
+ }
+
+ for(const auto &existing_audio_input : audio_devices.audio_inputs) {
+ if(request_audio_input.name == existing_audio_input.name) {
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-" + existing_audio_input.description;
+
+ match = true;
+ break;
+ }
+ }
+
+ if(!match) {
+ fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device, expected one of:\n", request_audio_input.name.c_str());
+ if(!audio_devices.default_output.empty())
+ fprintf(stderr, " default_output (Default output)\n");
+ if(!audio_devices.default_input.empty())
+ fprintf(stderr, " default_input (Default input)\n");
+ for(const auto &existing_audio_input : audio_devices.audio_inputs) {
+ fprintf(stderr, " %s (%s)\n", existing_audio_input.name.c_str(), existing_audio_input.description.c_str());
+ }
+ _exit(2);
+ }
+ }
+ }
+
+ return requested_audio_inputs;
+}
+
+static AudioCodec select_audio_codec_with_fallback(AudioCodec audio_codec, const std::string &file_extension,bool uses_amix) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: {
+ if(file_extension == "webm") {
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from aac to opus\n");
+ }
+ break;
+ }
+ case AudioCodec::OPUS: {
+ // TODO: Also check mpegts?
+ if(file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm") {
+ //audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: opus audio codec is only supported by .mp4, .mkv and .webm files, falling back to aac instead\n");
+ }
+ break;
+ }
+ case AudioCodec::FLAC: {
+ // TODO: Also check mpegts?
+ if(file_extension == "webm") {
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from flac to opus\n");
+ } else if(file_extension != "mp4" && file_extension != "mkv") {
+ //audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: flac audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
+ } else if(uses_amix) {
+ // TODO: remove this? is it true anymore?
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: flac audio codec is not supported when mixing audio sources, falling back to opus instead\n");
+ }
+ break;
+ }
+ }
+ return audio_codec;
+}
+
+static const char* video_codec_to_string(VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264: return "h264";
+ case VideoCodec::HEVC: return "hevc";
+ case VideoCodec::HEVC_HDR: return "hevc_hdr";
+ case VideoCodec::HEVC_10BIT: return "hevc_10bit";
+ case VideoCodec::AV1: return "av1";
+ case VideoCodec::AV1_HDR: return "av1_hdr";
+ case VideoCodec::AV1_10BIT: return "av1_10bit";
+ case VideoCodec::VP8: return "vp8";
+ case VideoCodec::VP9: return "vp9";
+ case VideoCodec::H264_VULKAN: return "h264_vulkan";
+ case VideoCodec::HEVC_VULKAN: return "hevc_vulkan";
+ }
+ return "";
+}
+
+static bool video_codec_only_supports_low_power_mode(const gsr_supported_video_codecs &supported_video_codecs, VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264: return supported_video_codecs.h264.low_power;
+ case VideoCodec::HEVC: return supported_video_codecs.hevc.low_power;
+ case VideoCodec::HEVC_HDR: return supported_video_codecs.hevc_hdr.low_power;
+ case VideoCodec::HEVC_10BIT: return supported_video_codecs.hevc_10bit.low_power;
+ case VideoCodec::AV1: return supported_video_codecs.av1.low_power;
+ case VideoCodec::AV1_HDR: return supported_video_codecs.av1_hdr.low_power;
+ case VideoCodec::AV1_10BIT: return supported_video_codecs.av1_10bit.low_power;
+ case VideoCodec::VP8: return supported_video_codecs.vp8.low_power;
+ case VideoCodec::VP9: return supported_video_codecs.vp9.low_power;
+ case VideoCodec::H264_VULKAN: return supported_video_codecs.h264.low_power;
+ case VideoCodec::HEVC_VULKAN: return supported_video_codecs.hevc.low_power; // TODO: hdr, 10 bit
+ }
+ return false;
+}
+
+static const AVCodec* pick_video_codec(VideoCodec *video_codec, gsr_egl *egl, bool use_software_video_encoder, bool video_codec_auto, const char *video_codec_to_use, bool is_flv, bool *low_power) {
+ // TODO: software encoder for hevc, av1, vp8 and vp9
+ *low_power = false;
+
+ gsr_supported_video_codecs supported_video_codecs;
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+
+ const AVCodec *video_codec_f = nullptr;
+
+ switch(*video_codec) {
+ case VideoCodec::H264: {
+ if(use_software_video_encoder)
+ video_codec_f = avcodec_find_encoder_by_name("libx264");
+ else if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC: {
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_HDR: {
+ if(supported_video_codecs.hevc_hdr.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_10BIT: {
+ if(supported_video_codecs.hevc_10bit.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1: {
+ if(supported_video_codecs.av1.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1_HDR: {
+ if(supported_video_codecs.av1_hdr.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1_10BIT: {
+ if(supported_video_codecs.av1_10bit.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP8: {
+ if(supported_video_codecs.vp8.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP9: {
+ if(supported_video_codecs.vp9.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::H264_VULKAN: {
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_VULKAN: {
+ // TODO: hdr, 10 bit
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ }
+
+ if(!video_codec_auto && !video_codec_f && !is_flv) {
+ switch(*video_codec) {
+ case VideoCodec::H264: {
+ fprintf(stderr, "Warning: selected video codec h264 is not supported, trying hevc instead\n");
+ video_codec_to_use = "hevc";
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT: {
+ fprintf(stderr, "Warning: selected video codec hevc is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT: {
+ fprintf(stderr, "Warning: selected video codec av1 is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP8:
+ case VideoCodec::VP9:
+ // TODO: Cant fallback to other codec because webm only supports vp8/vp9
+ break;
+ case VideoCodec::H264_VULKAN: {
+ fprintf(stderr, "Warning: selected video codec h264_vulkan is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ // Need to do a query again because this time it's without vulkan
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_VULKAN: {
+ fprintf(stderr, "Warning: selected video codec hevc_vulkan is not supported, trying hevc instead\n");
+ video_codec_to_use = "hevc";
+ *video_codec = VideoCodec::HEVC;
+ // Need to do a query again because this time it's without vulkan
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ }
+ }
+
+ (void)video_codec_to_use;
+
+ if(!video_codec_f) {
+ const char *video_codec_name = video_codec_to_string(*video_codec);
+ fprintf(stderr, "Error: your gpu does not support '%s' video codec. If you are sure that your gpu does support '%s' video encoding and you are using an AMD/Intel GPU,\n"
+ " then make sure you have installed the GPU specific vaapi packages (intel-media-driver, libva-intel-driver, libva-mesa-driver and linux-firmware).\n"
+ " It's also possible that your distro has disabled hardware accelerated video encoding for '%s' video codec.\n"
+ " This may be the case on corporate distros such as Manjaro, Fedora or OpenSUSE.\n"
+ " You can test this by running 'vainfo | grep VAEntrypointEncSlice' to see if it matches any H264/HEVC/AV1/VP8/VP9 profile.\n"
+ " On such distros, you need to manually install mesa from source to enable H264/HEVC hardware acceleration, or use a more user friendly distro. Alternatively record with AV1 if supported by your GPU.\n"
+ " You can alternatively use the flatpak version of GPU Screen Recorder (https://flathub.org/apps/com.dec05eba.gpu_screen_recorder) which bypasses system issues with patented H264/HEVC codecs.\n"
+ " Make sure you have mesa-extra freedesktop runtime installed when using the flatpak (this should be the default), which can be installed with this command:\n"
+ " flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra\n"
+ " If your GPU doesn't support hardware accelerated video encoding then you can use '-encoder cpu' option to encode with your cpu instead.\n", video_codec_name, video_codec_name, video_codec_name);
+ _exit(2);
+ }
+
+ *low_power = video_codec_only_supports_low_power_mode(supported_video_codecs, *video_codec);
+
+ return video_codec_f;
+}
+
+static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec, const char *video_codec_to_use, const char *file_extension, bool use_software_video_encoder, gsr_egl *egl, bool *low_power) {
+ const bool video_codec_auto = strcmp(video_codec_to_use, "auto") == 0;
+ if(video_codec_auto) {
+ if(strcmp(file_extension, "webm") == 0) {
+ fprintf(stderr, "Info: using vp8 encoder because a codec was not specified and the file extension is .webm\n");
+ video_codec_to_use = "vp8";
+ *video_codec = VideoCodec::VP8;
+ } else {
+ fprintf(stderr, "Info: using h264 encoder because a codec was not specified\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ }
+ }
+
+ // TODO: Allow hevc, vp9 and av1 in (enhanced) flv (supported since ffmpeg 6.1)
+ const bool is_flv = strcmp(file_extension, "flv") == 0;
+ if(is_flv) {
+ if(*video_codec != VideoCodec::H264) {
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ fprintf(stderr, "Warning: hevc/av1 is not compatible with flv, falling back to h264 instead.\n");
+ }
+
+ // if(audio_codec != AudioCodec::AAC) {
+ // audio_codec_to_use = "aac";
+ // audio_codec = AudioCodec::AAC;
+ // fprintf(stderr, "Warning: flv only supports aac, falling back to aac instead.\n");
+ // }
+ }
+
+ const bool is_hls = strcmp(file_extension, "m3u8") == 0;
+ if(is_hls) {
+ if(video_codec_is_av1(*video_codec)) {
+ video_codec_to_use = "hevc";
+ *video_codec = VideoCodec::HEVC;
+ fprintf(stderr, "Warning: av1 is not compatible with hls (m3u8), falling back to hevc instead.\n");
+ }
+
+ // if(audio_codec != AudioCodec::AAC) {
+ // audio_codec_to_use = "aac";
+ // audio_codec = AudioCodec::AAC;
+ // fprintf(stderr, "Warning: hls (m3u8) only supports aac, falling back to aac instead.\n");
+ // }
+ }
+
+ if(use_software_video_encoder && *video_codec != VideoCodec::H264) {
+ fprintf(stderr, "Error: \"-encoder cpu\" option is currently only available when using h264 codec option (-k)\n");
+ usage();
+ }
+
+ return pick_video_codec(video_codec, egl, use_software_video_encoder, video_codec_auto, video_codec_to_use, is_flv, low_power);
+}
+
int main(int argc, char **argv) {
signal(SIGINT, stop_handler);
signal(SIGUSR1, save_replay_handler);
@@ -1803,8 +2585,18 @@ int main(int argc, char **argv) {
if(argc == 2 && (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0))
usage_full();
- if(argc == 2 && strcmp(argv[1], "--list-supported-video-codecs") == 0) {
- list_supported_video_codecs();
+ if(argc == 2 && strcmp(argv[1], "--info") == 0) {
+ info_command();
+ _exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "--list-audio-devices") == 0) {
+ list_audio_devices_command();
+ _exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "--version") == 0) {
+ puts(GSR_VERSION);
_exit(0);
}
@@ -1824,14 +2616,17 @@ int main(int argc, char **argv) {
{ "-ab", Arg { {}, true, false } },
{ "-oc", Arg { {}, true, false } },
{ "-fm", Arg { {}, true, false } },
+ { "-bm", Arg { {}, true, false } },
{ "-pixfmt", Arg { {}, true, false } },
{ "-v", Arg { {}, true, false } },
- { "-mf", Arg { {}, true, false } },
+ { "-mf", Arg { {}, true, false } }, // TODO: Remove, this exists for backwards compatibility. -df should be used instead
+ { "-df", Arg { {}, true, false } },
{ "-sc", Arg { {}, true, false } },
{ "-cr", Arg { {}, true, false } },
{ "-cursor", Arg { {}, true, false } },
- { "-gopm", Arg { {}, true, false } }, // deprecated, used keyint instead
{ "-keyint", Arg { {}, true, false } },
+ { "-restore-portal-session", Arg { {}, true, false } },
+ { "-portal-session-token-filepath", Arg { {}, true, false } },
{ "-encoder", Arg { {}, true, false } },
};
@@ -1873,12 +2668,24 @@ int main(int argc, char **argv) {
video_codec = VideoCodec::HEVC;
} else if(strcmp(video_codec_to_use, "hevc_hdr") == 0) {
video_codec = VideoCodec::HEVC_HDR;
+ } else if(strcmp(video_codec_to_use, "hevc_10bit") == 0) {
+ video_codec = VideoCodec::HEVC_10BIT;
} else if(strcmp(video_codec_to_use, "av1") == 0) {
video_codec = VideoCodec::AV1;
} else if(strcmp(video_codec_to_use, "av1_hdr") == 0) {
video_codec = VideoCodec::AV1_HDR;
+ } else if(strcmp(video_codec_to_use, "av1_10bit") == 0) {
+ video_codec = VideoCodec::AV1_10BIT;
+ } else if(strcmp(video_codec_to_use, "vp8") == 0) {
+ video_codec = VideoCodec::VP8;
+ } else if(strcmp(video_codec_to_use, "vp9") == 0) {
+ video_codec = VideoCodec::VP9;
+ //} else if(strcmp(video_codec_to_use, "h264_vulkan") == 0) {
+ // video_codec = VideoCodec::H264_VULKAN;
+ //} else if(strcmp(video_codec_to_use, "hevc_vulkan") == 0) {
+ // video_codec = VideoCodec::HEVC_VULKAN;
} else if(strcmp(video_codec_to_use, "auto") != 0) {
- fprintf(stderr, "Error: -k should either be either 'auto', 'h264', 'hevc', 'hevc_hdr', 'av1' or 'av1_hdr', got: '%s'\n", video_codec_to_use);
+ fprintf(stderr, "Error: -k should either be either 'auto', 'h264', 'hevc', 'av1', 'vp8', 'vp9', 'hevc_hdr', 'av1_hdr', 'hevc_10bit' or 'av1_10bit', got: '%s'\n", video_codec_to_use);
usage();
}
@@ -1982,20 +2789,48 @@ int main(int argc, char **argv) {
usage();
}
- bool make_folders = false;
- const char *make_folders_str = args["-mf"].value();
- if(!make_folders_str)
- make_folders_str = "no";
+ bool date_folders = false;
+ const char *date_folders_str = args["-df"].value();
+ if(!date_folders_str) {
+ date_folders_str = args["-mf"].value();
+ if(date_folders_str)
+ fprintf(stderr, "Warning: -mf is deprecated, use -df instead\n");
+ }
+ if(!date_folders_str)
+ date_folders_str = "no";
+
+ if(strcmp(date_folders_str, "yes") == 0) {
+ date_folders = true;
+ } else if(strcmp(date_folders_str, "no") == 0) {
+ date_folders = false;
+ } else {
+ fprintf(stderr, "Error: -df should either be either 'yes' or 'no', got: '%s'\n", date_folders_str);
+ usage();
+ }
+
+ bool restore_portal_session = false;
+ const char *restore_portal_session_str = args["-restore-portal-session"].value();
+ if(!restore_portal_session_str)
+ restore_portal_session_str = "no";
- if(strcmp(make_folders_str, "yes") == 0) {
- make_folders = true;
- } else if(strcmp(make_folders_str, "no") == 0) {
- make_folders = false;
+ if(strcmp(restore_portal_session_str, "yes") == 0) {
+ restore_portal_session = true;
+ } else if(strcmp(restore_portal_session_str, "no") == 0) {
+ restore_portal_session = false;
} else {
- fprintf(stderr, "Error: -mf should either be either 'yes' or 'no', got: '%s'\n", make_folders_str);
+ fprintf(stderr, "Error: -restore-portal-session should either be either 'yes' or 'no', got: '%s'\n", restore_portal_session_str);
usage();
}
+ const char *portal_session_token_filepath = args["-portal-session-token-filepath"].value();
+ if(portal_session_token_filepath) {
+ int len = strlen(portal_session_token_filepath);
+ if(len > 0 && portal_session_token_filepath[len - 1] == '/') {
+ fprintf(stderr, "Error: -portal-session-token-filepath should be a path to a file but it ends with a /: %s\n", portal_session_token_filepath);
+ _exit(1);
+ }
+ }
+
const char *recording_saved_script = args["-sc"].value();
if(recording_saved_script) {
struct stat buf;
@@ -2025,44 +2860,12 @@ int main(int argc, char **argv) {
}
const Arg &audio_input_arg = args["-a"];
- std::vector<AudioInput> audio_inputs;
+ AudioDevices audio_devices;
if(!audio_input_arg.values.empty())
- audio_inputs = get_pulseaudio_inputs();
- std::vector<MergedAudioInputs> requested_audio_inputs;
- bool uses_amix = false;
+ audio_devices = get_pulseaudio_inputs();
- // Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
- // Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
- // OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
- for(const char *audio_input : audio_input_arg.values) {
- if(!audio_input || audio_input[0] == '\0')
- continue;
-
- requested_audio_inputs.push_back({parse_audio_input_arg(audio_input)});
- if(requested_audio_inputs.back().audio_inputs.size() > 1)
- uses_amix = true;
-
- for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
- bool match = false;
- for(const auto &existing_audio_input : audio_inputs) {
- if(strcmp(request_audio_input.name.c_str(), existing_audio_input.name.c_str()) == 0) {
- if(request_audio_input.description.empty())
- request_audio_input.description = "gsr-" + existing_audio_input.description;
-
- match = true;
- break;
- }
- }
-
- if(!match) {
- fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device, expected one of:\n", request_audio_input.name.c_str());
- for(const auto &existing_audio_input : audio_inputs) {
- fprintf(stderr, " %s (%s)\n", existing_audio_input.name.c_str(), existing_audio_input.description.c_str());
- }
- _exit(2);
- }
- }
- }
+ bool uses_amix = false;
+ std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, uses_amix);
const char *container_format = args["-c"].value();
if(container_format && strcmp(container_format, "mkv") == 0)
@@ -2105,7 +2908,12 @@ int main(int argc, char **argv) {
replay_buffer_size_secs += std::ceil(keyint); // Add a few seconds to account of lost packets because of non-keyframe packets skipped
}
- const char *window_str = strdup(args["-w"].value());
+ std::string window_str = args["-w"].value();
+ const bool is_portal_capture = strcmp(window_str.c_str(), "portal") == 0;
+
+ if(!restore_portal_session && is_portal_capture) {
+ fprintf(stderr, "gsr info: option '-w portal' was used without '-restore-portal-session yes'. The previous screencast session will be ignored\n");
+ }
bool wayland = false;
Display *dpy = XOpenDisplay(nullptr);
@@ -2120,18 +2928,36 @@ int main(int argc, char **argv) {
if(!wayland)
wayland = is_xwayland(dpy);
+ if(!wayland && is_using_prime_run()) {
+ // Disable prime-run and similar options as it doesn't work, the monitor to capture has to be run on the same device.
+ // This is fine on wayland since nvidia uses drm interface there and the monitor query checks the monitors connected
+ // to the drm device.
+ fprintf(stderr, "Warning: use of prime-run on X11 is not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
+ if(is_portal_capture && is_using_prime_run()) {
+ fprintf(stderr, "Warning: use of prime-run with -w portal option is currently not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
if(video_codec_is_hdr(video_codec) && !wayland) {
fprintf(stderr, "Error: hdr video codec option %s is not available on X11\n", video_codec_to_use);
_exit(1);
}
- const bool is_monitor_capture = strcmp(window_str, "focused") != 0 && contains_non_hex_number(window_str);
+ const bool is_monitor_capture = strcmp(window_str.c_str(), "focused") != 0 && !is_portal_capture && contains_non_hex_number(window_str.c_str());
gsr_egl egl;
if(!gsr_egl_load(&egl, dpy, wayland, is_monitor_capture)) {
fprintf(stderr, "gsr error: failed to load opengl\n");
_exit(1);
}
+ if(egl.gpu_info.is_steam_deck) {
+ fprintf(stderr, "gsr warning: steam deck has multiple driver issues. One of them has been reported here: https://github.com/ValveSoftware/SteamOS/issues/1609\n"
+ "If you have issues with GPU Screen Recorder on steam deck that you don't have on a desktop computer then report the issue to Valve and/or AMD.\n");
+ }
+
bool very_old_gpu = false;
if(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && egl.gpu_info.gpu_version != 0 && egl.gpu_info.gpu_version < 900) {
@@ -2150,14 +2976,19 @@ int main(int argc, char **argv) {
}
egl.card_path[0] = '\0';
- if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(monitor_capture_use_drm(&egl, wayland)) {
// TODO: Allow specifying another card, and in other places
if(!gsr_get_valid_card_path(&egl, egl.card_path, is_monitor_capture)) {
- fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it. Also make sure that you have at least one connected monitor or record a single window instead on X11\n");
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. Make sure that you have at least one monitor connected or record a single window instead on X11 or record with the -w portal option\n");
_exit(2);
}
}
+ // if(wayland && is_monitor_capture) {
+ // fprintf(stderr, "gsr warning: it's not possible to sync video to recorded monitor exactly on wayland when recording a monitor."
+ // " If you experience stutter in the video then record with portal capture option instead (-w portal) or use X11 instead\n");
+ // }
+
// TODO: Fix constant framerate not working properly on amd/intel because capture framerate gets locked to the same framerate as
// game framerate, which doesn't work well when you need to encode multiple duplicate frames (AMD/Intel is slow at encoding!).
// It also appears to skip audio frames on nvidia wayland? why? that should be fine, but it causes video stuttering because of audio/video sync.
@@ -2177,11 +3008,35 @@ int main(int argc, char **argv) {
usage();
}
- if(framerate_mode == FramerateMode::CONTENT && (wayland || is_monitor_capture)) {
- fprintf(stderr, "Error: -fm 'content' is currently only supported on X11 and when capturing a single window.\n");
+ if(framerate_mode == FramerateMode::CONTENT && wayland && !is_portal_capture) {
+ fprintf(stderr, "Error: -fm 'content' is currently only supported on X11 or when using portal capture option\n");
+ usage();
+ }
+
+ BitrateMode bitrate_mode = BitrateMode::QP;
+ const char *bitrate_mode_str = args["-bm"].value();
+ if(!bitrate_mode_str)
+ bitrate_mode_str = "auto";
+
+ if(strcmp(bitrate_mode_str, "qp") == 0) {
+ bitrate_mode = BitrateMode::QP;
+ } else if(strcmp(bitrate_mode_str, "vbr") == 0) {
+ bitrate_mode = BitrateMode::VBR;
+ } else if(strcmp(bitrate_mode_str, "auto") != 0) {
+ fprintf(stderr, "Error: -bm should either be either 'auto', 'qp', 'vbr', got: '%s'\n", bitrate_mode_str);
usage();
}
+ if(strcmp(bitrate_mode_str, "auto") == 0) {
+ // QP is broken on steam deck, see https://github.com/ValveSoftware/SteamOS/issues/1609
+ bitrate_mode = egl.gpu_info.is_steam_deck ? BitrateMode::VBR : BitrateMode::QP;
+ }
+
+ if(use_software_video_encoder && bitrate_mode != BitrateMode::QP) {
+ fprintf(stderr, "Warning: bitrate mode has been forcefully set to qp because software encoding option doesn't support vbr option\n");
+ bitrate_mode = BitrateMode::QP;
+ }
+
gsr_color_range color_range = GSR_COLOR_RANGE_LIMITED;
const char *color_range_str = args["-cr"].value();
if(!color_range_str)
@@ -2198,7 +3053,7 @@ int main(int argc, char **argv) {
const char *screen_region = args["-s"].value();
- if(screen_region && strcmp(window_str, "focused") != 0) {
+ if(screen_region && strcmp(window_str.c_str(), "focused") != 0) {
fprintf(stderr, "Error: option -s is only available when using -w focused\n");
usage();
}
@@ -2215,7 +3070,7 @@ int main(int argc, char **argv) {
} else {
if(replay_buffer_size_secs == -1) {
char directory_buf[PATH_MAX];
- strcpy(directory_buf, filename);
+ snprintf(directory_buf, sizeof(directory_buf), "%s", filename);
char *directory = dirname(directory_buf);
if(strcmp(directory, ".") != 0 && strcmp(directory, "/") != 0) {
if(create_directory_recursive(directory) != 0) {
@@ -2275,172 +3130,19 @@ int main(int argc, char **argv) {
}
const bool force_no_audio_offset = is_livestream || is_output_piped || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm");
-
- switch(audio_codec) {
- case AudioCodec::AAC: {
- if(file_extension == "webm") {
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from aac to opus\n");
- }
- break;
- }
- case AudioCodec::OPUS: {
- // TODO: Also check mpegts?
- if(file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm") {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: opus audio codec is only supported by .mp4, .mkv and .webm files, falling back to aac instead\n");
- }
- break;
- }
- case AudioCodec::FLAC: {
- // TODO: Also check mpegts?
- if(file_extension == "webm") {
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from flac to opus\n");
- } else if(file_extension != "mp4" && file_extension != "mkv") {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: flac audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
- } else if(uses_amix) {
- // TODO: remove this? is it true anymore?
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: flac audio codec is not supported when mixing audio sources, falling back to opus instead\n");
- }
- break;
- }
- }
-
const double target_fps = 1.0 / (double)fps;
- const bool video_codec_auto = strcmp(video_codec_to_use, "auto") == 0;
- if(video_codec_auto) {
- fprintf(stderr, "Info: using h264 encoder because a codec was not specified\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
+ if(video_codec_is_hdr(video_codec) && is_portal_capture) {
+ fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (pipewire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
+ video_codec = hdr_video_codec_to_sdr_video_codec(video_codec);
}
- // TODO: Allow hevc, vp9 and av1 in (enhanced) flv (supported since ffmpeg 6.1)
- const bool is_flv = strcmp(file_extension.c_str(), "flv") == 0;
- if(is_flv) {
- if(video_codec != VideoCodec::H264) {
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- fprintf(stderr, "Warning: hevc/av1 is not compatible with flv, falling back to h264 instead.\n");
- }
+ audio_codec = select_audio_codec_with_fallback(audio_codec, file_extension, uses_amix);
+ bool low_power = false;
+ const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power);
- if(audio_codec != AudioCodec::AAC) {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: flv only supports aac, falling back to aac instead.\n");
- }
- }
-
- const bool is_hls = strcmp(file_extension.c_str(), "m3u8") == 0;
- if(is_hls) {
- if(video_codec == VideoCodec::AV1 || video_codec == VideoCodec::AV1_HDR) {
- video_codec_to_use = "hevc";
- video_codec = VideoCodec::HEVC;
- fprintf(stderr, "Warning: av1 is not compatible with hls (m3u8), falling back to hevc instead.\n");
- }
-
- if(audio_codec != AudioCodec::AAC) {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: hls (m3u8) only supports aac, falling back to aac instead.\n");
- }
- }
-
- if(use_software_video_encoder && video_codec != VideoCodec::H264) {
- fprintf(stderr, "Error: \"-encoder cpu\" option is currently only available when using h264 codec option (-k)\n");
- usage();
- }
-
- const AVCodec *video_codec_f = nullptr;
- switch(video_codec) {
- case VideoCodec::H264: {
- if(use_software_video_encoder) {
- video_codec_f = find_h264_software_encoder();
- } else {
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- }
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR:
- // TODO: software encoder
- video_codec_f = find_hevc_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR:
- // TODO: software encoder
- video_codec_f = find_av1_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
-
- if(!video_codec_auto && !video_codec_f && !is_flv) {
- switch(video_codec) {
- case VideoCodec::H264: {
- fprintf(stderr, "Warning: selected video codec h264 is not supported, trying hevc instead\n");
- video_codec_to_use = "hevc";
- video_codec = VideoCodec::HEVC;
- video_codec_f = find_hevc_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR: {
- fprintf(stderr, "Warning: selected video codec hevc is not supported, trying h264 instead\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR: {
- fprintf(stderr, "Warning: selected video codec av1 is not supported, trying h264 instead\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- }
- }
-
- if(!video_codec_f) {
- const char *video_codec_name = "";
- switch(video_codec) {
- case VideoCodec::H264: {
- video_codec_name = "h264";
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR: {
- video_codec_name = "hevc";
- break;
- }
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR: {
- video_codec_name = "av1";
- break;
- }
- }
-
- fprintf(stderr, "Error: your gpu does not support '%s' video codec. If you are sure that your gpu does support '%s' video encoding and you are using an AMD/Intel GPU,\n"
- " then make sure you have installed the GPU specific vaapi packages (intel-media-driver, libva-intel-driver or libva-mesa-driver).\n"
- " It's also possible that your distro has disabled hardware accelerated video encoding for '%s' video codec.\n"
- " This may be the case on corporate distros such as Manjaro, Fedora or OpenSUSE.\n"
- " You can test this by running 'vainfo | grep VAEntrypointEncSlice' to see if it matches any H264/HEVC profile.\n"
- " On such distros, you need to manually install mesa from source to enable H264/HEVC hardware acceleration, or use a more user friendly distro. Alternatively record with AV1 if supported by your GPU.\n"
- " You can alternatively use the flatpak version of GPU Screen Recorder (https://flathub.org/apps/com.dec05eba.gpu_screen_recorder) which bypasses system issues with patented H264/HEVC codecs.\n"
- " Make sure you have mesa-extra freedesktop runtime installed when using the flatpak (this should be the default), which can be installed with this command:\n"
- " flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra", video_codec_name, video_codec_name, video_codec_name);
- _exit(2);
- }
-
- gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, overclock, video_codec, color_range, record_cursor, framerate_mode == FramerateMode::CONTENT, use_software_video_encoder);
+ const gsr_color_depth color_depth = video_codec_to_bit_depth(video_codec);
+ gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth);
// (Some?) livestreaming services require at least one audio track to work.
// If not audio is provided then create one silent audio track.
@@ -2461,7 +3163,8 @@ int main(int argc, char **argv) {
const bool hdr = video_codec_is_hdr(video_codec);
const bool low_latency_recording = is_livestream || is_output_piped;
- AVCodecContext *video_codec_context = create_video_codec_context(get_pixel_format(egl.gpu_info.vendor, use_software_video_encoder), quality, fps, video_codec_f, low_latency_recording, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint);
+ const enum AVPixelFormat video_pix_fmt = get_pixel_format(video_codec, egl.gpu_info.vendor, use_software_video_encoder);
+ AVCodecContext *video_codec_context = create_video_codec_context(video_pix_fmt, quality, fps, video_codec_f, low_latency_recording, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint, use_software_video_encoder, bitrate_mode, video_codec);
if(replay_buffer_size_secs == -1)
video_stream = create_stream(av_format_context, video_codec_context);
@@ -2485,7 +3188,7 @@ int main(int argc, char **argv) {
_exit(capture_result);
}
- gsr_video_encoder *video_encoder = create_video_encoder(&egl, overclock, hdr, use_software_video_encoder);
+ gsr_video_encoder *video_encoder = create_video_encoder(&egl, overclock, color_depth, use_software_video_encoder, video_codec);
if(!video_encoder) {
fprintf(stderr, "Error: failed to create video encoder\n");
_exit(1);
@@ -2513,9 +3216,9 @@ int main(int argc, char **argv) {
gsr_color_conversion_clear(&color_conversion);
if(use_software_video_encoder) {
- open_video_software(video_codec_context, quality, pixel_format, hdr);
+ open_video_software(video_codec_context, quality, pixel_format, hdr, color_depth, bitrate_mode);
} else {
- open_video_hardware(video_codec_context, quality, very_old_gpu, egl.gpu_info.vendor, pixel_format, hdr);
+ open_video_hardware(video_codec_context, quality, very_old_gpu, egl.gpu_info.vendor, pixel_format, hdr, color_depth, bitrate_mode, video_codec, low_power);
}
if(video_stream)
avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
@@ -2561,7 +3264,7 @@ int main(int argc, char **argv) {
const double audio_startup_time_seconds = force_no_audio_offset ? 0 : audio_codec_get_desired_delay(audio_codec, fps);// * ((double)audio_codec_context->frame_size / 1024.0);
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
- std::vector<AudioDevice> audio_devices;
+ std::vector<AudioDevice> audio_track_audio_devices;
for(size_t i = 0; i < merged_audio_inputs.audio_inputs.size(); ++i) {
auto &audio_input = merged_audio_inputs.audio_inputs[i];
AVFilterContext *src_ctx = nullptr;
@@ -2585,13 +3288,13 @@ int main(int argc, char **argv) {
audio_device.frame = create_audio_frame(audio_codec_context);
audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
- audio_devices.push_back(std::move(audio_device));
+ audio_track_audio_devices.push_back(std::move(audio_device));
}
AudioTrack audio_track;
audio_track.codec_context = audio_codec_context;
audio_track.stream = audio_stream;
- audio_track.audio_devices = std::move(audio_devices);
+ audio_track.audio_devices = std::move(audio_track_audio_devices);
audio_track.graph = graph;
audio_track.sink = sink;
audio_track.stream_index = audio_stream_index;
@@ -2627,7 +3330,7 @@ int main(int argc, char **argv) {
}
double fps_start_time = clock_get_monotonic_seconds();
- double frame_timer_start = fps_start_time - target_fps; // We want to capture the first frame immediately
+ double frame_timer_start = fps_start_time;
int fps_counter = 0;
int damage_fps_counter = 0;
@@ -2706,7 +3409,7 @@ int main(int argc, char **argv) {
if(paused) {
if(!audio_device.sound_device.handle)
- usleep(timeout_ms * 1000);
+ av_usleep(timeout_ms * 1000);
continue;
}
@@ -2769,7 +3472,7 @@ int main(int argc, char **argv) {
}
if(!audio_device.sound_device.handle)
- usleep(timeout_ms * 1000);
+ av_usleep(timeout_ms * 1000);
if(got_audio_data) {
// TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format?
@@ -2806,57 +3509,105 @@ int main(int argc, char **argv) {
}
}
+ std::thread amix_thread;
+ if(uses_amix) {
+ amix_thread = std::thread([&]() {
+ AVFrame *aframe = av_frame_alloc();
+ while(running) {
+ {
+ std::lock_guard<std::mutex> lock(audio_filter_mutex);
+ for(AudioTrack &audio_track : audio_tracks) {
+ if(!audio_track.sink)
+ continue;
+
+ int err = 0;
+ while ((err = av_buffersink_get_frame(audio_track.sink, aframe)) >= 0) {
+ aframe->pts = audio_track.pts;
+ err = avcodec_send_frame(audio_track.codec_context, aframe);
+ if(err >= 0){
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Failed to encode audio!\n");
+ }
+ av_frame_unref(aframe);
+ audio_track.pts += audio_track.codec_context->frame_size;
+ }
+ }
+ }
+ av_usleep(5 * 1000); // 5 milliseconds
+ }
+ av_frame_free(&aframe);
+ });
+ }
+
// Set update_fps to 24 to test if duplicate/delayed frames cause video/audio desync or too fast/slow video.
const double update_fps = fps + 190;
bool should_stop_error = false;
- AVFrame *aframe = av_frame_alloc();
-
int64_t video_pts_counter = 0;
int64_t video_prev_pts = 0;
+ bool hdr_metadata_set = false;
+
+ double damage_timeout_seconds = framerate_mode == FramerateMode::CONTENT ? 0.5 : 0.1;
+ damage_timeout_seconds = std::max(damage_timeout_seconds, target_fps);
+
+ bool use_damage_tracking = false;
+ gsr_damage damage;
+ memset(&damage, 0, sizeof(damage));
+ if(gsr_egl_get_display_server(&egl) == GSR_DISPLAY_SERVER_X11) {
+ gsr_damage_init(&damage, &egl, record_cursor);
+ use_damage_tracking = true;
+ }
+
+ if(is_monitor_capture)
+ gsr_damage_set_target_monitor(&damage, window_str.c_str());
+
while(running) {
- double frame_start = clock_get_monotonic_seconds();
+ const double frame_start = clock_get_monotonic_seconds();
+
+ while(gsr_egl_process_event(&egl)) {
+ gsr_damage_on_event(&damage, gsr_egl_get_event_data(&egl));
+ gsr_capture_on_event(capture, &egl);
+ }
+ gsr_damage_tick(&damage);
+ gsr_capture_tick(capture);
+
+ if(!is_monitor_capture) {
+ Window damage_target_window = 0;
+ if(capture->get_window_id)
+ damage_target_window = capture->get_window_id(capture);
+
+ if(damage_target_window != 0)
+ gsr_damage_set_target_window(&damage, damage_target_window);
+ }
- gsr_capture_tick(capture, video_codec_context);
should_stop_error = false;
if(gsr_capture_should_stop(capture, &should_stop_error)) {
running = 0;
break;
}
- // TODO: Move to another thread, since this shouldn't be locked to video encoding fps
- {
- std::lock_guard<std::mutex> lock(audio_filter_mutex);
- for(AudioTrack &audio_track : audio_tracks) {
- if(!audio_track.sink)
- continue;
+ bool damaged = false;
+ if(use_damage_tracking)
+ damaged = gsr_damage_is_damaged(&damage);
+ else if(capture->is_damaged)
+ damaged = capture->is_damaged(capture);
+ else
+ damaged = true;
- int err = 0;
- while ((err = av_buffersink_get_frame(audio_track.sink, aframe)) >= 0) {
- aframe->pts = audio_track.pts;
- err = avcodec_send_frame(audio_track.codec_context, aframe);
- if(err >= 0){
- // TODO: Move to separate thread because this could write to network (for example when livestreaming)
- receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
- } else {
- fprintf(stderr, "Failed to encode audio!\n");
- }
- av_frame_unref(aframe);
- audio_track.pts += audio_track.codec_context->frame_size;
- }
- }
- }
+ // TODO: Readd wayland sync warning when removing this
+ if(framerate_mode != FramerateMode::CONTENT)
+ damaged = true;
- const bool damaged = !capture->is_damaged || capture->is_damaged(capture);
- if(damaged) {
+ if(damaged)
++damage_fps_counter;
- }
++fps_counter;
- double time_now = clock_get_monotonic_seconds();
- double frame_timer_elapsed = time_now - frame_timer_start;
- double elapsed = time_now - fps_start_time;
+ const double time_now = clock_get_monotonic_seconds();
+ const double frame_timer_elapsed = time_now - frame_timer_start;
+ const double elapsed = time_now - fps_start_time;
if (elapsed >= 1.0) {
if(verbose) {
fprintf(stderr, "update fps: %d, damage fps: %d\n", fps_counter, damage_fps_counter);
@@ -2867,10 +3618,11 @@ int main(int argc, char **argv) {
}
double frame_time_overflow = frame_timer_elapsed - target_fps;
- if (frame_time_overflow >= 0.0 && damaged) {
+ if ((frame_time_overflow >= 0.0 || video_pts_counter == 0) && damaged) {
+ gsr_damage_clear(&damage);
if(capture->clear_damage)
capture->clear_damage(capture);
- frame_time_overflow = std::min(frame_time_overflow, target_fps);
+ frame_time_overflow = std::min(std::max(0.0, frame_time_overflow), target_fps);
frame_timer_start = time_now - frame_time_overflow;
const double this_video_frame_time = clock_get_monotonic_seconds() - paused_time_offset;
@@ -2878,8 +3630,13 @@ int main(int argc, char **argv) {
const int num_frames = framerate_mode == FramerateMode::CONSTANT ? std::max((int64_t)0LL, expected_frames - video_pts_counter) : 1;
if(num_frames > 0 && !paused) {
+ egl.glClear(0);
gsr_capture_capture(capture, video_frame, &color_conversion);
- gsr_video_encoder_copy_textures_to_frame(video_encoder, video_frame);
+ gsr_egl_swap_buffers(&egl);
+ gsr_video_encoder_copy_textures_to_frame(video_encoder, video_frame, &color_conversion);
+
+ if(hdr && !hdr_metadata_set && replay_buffer_size_secs == -1 && add_hdr_metadata_to_video_stream(capture, video_stream))
+ hdr_metadata_set = true;
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
for(int i = 0; i < num_frames; ++i) {
@@ -2903,7 +3660,6 @@ int main(int argc, char **argv) {
}
}
- gsr_capture_capture_end(capture, video_frame);
video_pts_counter += num_frames;
}
}
@@ -2934,14 +3690,14 @@ int main(int argc, char **argv) {
if(save_replay == 1 && !save_replay_thread.valid() && replay_buffer_size_secs != -1) {
save_replay = 0;
- save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex, make_folders);
+ save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex, date_folders, hdr, capture);
}
double frame_end = clock_get_monotonic_seconds();
double frame_sleep_fps = 1.0 / update_fps;
double sleep_time = frame_sleep_fps - (frame_end - frame_start);
if(sleep_time > 0.0)
- usleep(sleep_time * 1000.0 * 1000.0);
+ av_usleep(sleep_time * 1000.0 * 1000.0);
}
running = 0;
@@ -2963,7 +3719,8 @@ int main(int argc, char **argv) {
}
}
- av_frame_free(&aframe);
+ if(amix_thread.joinable())
+ amix_thread.join();
if (replay_buffer_size_secs == -1 && av_write_trailer(av_format_context) != 0) {
fprintf(stderr, "Failed to write trailer\n");
@@ -2972,6 +3729,7 @@ int main(int argc, char **argv) {
if(replay_buffer_size_secs == -1 && !(output_format->flags & AVFMT_NOFILE))
avio_close(av_format_context->pb);
+ gsr_damage_deinit(&damage);
gsr_color_conversion_deinit(&color_conversion);
gsr_video_encoder_destroy(video_encoder, video_codec_context);
gsr_capture_destroy(capture, video_codec_context);
@@ -2985,7 +3743,6 @@ int main(int argc, char **argv) {
}
//av_frame_free(&video_frame);
- free((void*)window_str);
free(empty_audio);
// We do an _exit here because cuda uses at_exit to do _something_ that causes the program to freeze,
// but only on some nvidia driver versions on some gpus (RTX?), and _exit exits the program without calling
diff --git a/src/overclock.c b/src/overclock.c
index 2cba623..df2ae66 100644
--- a/src/overclock.c
+++ b/src/overclock.c
@@ -4,12 +4,10 @@
#include <string.h>
#include <stdlib.h>
-// HACK!!!: When a program uses cuda (including nvenc) then the nvidia driver drops to performance level 2 (memory transfer rate is dropped and possibly graphics clock).
+// HACK!!!: When a program uses cuda (including nvenc) then the nvidia driver drops to max performance level - 1 (memory transfer rate is dropped and possibly graphics clock).
// Nvidia does this because in some very extreme cases of cuda there can be memory corruption when running at max memory transfer rate.
// So to get around this we overclock memory transfer rate (maybe this should also be done for graphics clock?) to the best performance level while GPU Screen Recorder is running.
-// TODO: Does it always drop to performance level 2?
-
static int min_int(int a, int b) {
return a < b ? a : b;
}
diff --git a/src/pipewire.c b/src/pipewire.c
new file mode 100644
index 0000000..3bf54db
--- /dev/null
+++ b/src/pipewire.c
@@ -0,0 +1,788 @@
+#include "../include/pipewire.h"
+#include "../include/egl.h"
+#include "../include/utils.h"
+
+#include <pipewire/pipewire.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/debug/types.h>
+
+#include <libdrm/drm_fourcc.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+
+/* This code is partially based on xr-video-player pipewire implementation which is based on obs-studio's pipewire implementation */
+
+/* TODO: Make gsr_pipewire_init asynchronous */
+/* TODO: Support 10-bit capture (hdr) when pipewire supports it */
+/* TODO: Test all of the image formats */
+
+#ifndef SPA_POD_PROP_FLAG_DONT_FIXATE
+#define SPA_POD_PROP_FLAG_DONT_FIXATE (1 << 4)
+#endif
+
+#define CURSOR_META_SIZE(width, height) \
+ (sizeof(struct spa_meta_cursor) + sizeof(struct spa_meta_bitmap) + \
+ width * height * 4)
+
+static bool parse_pw_version(gsr_pipewire_data_version *dst, const char *version) {
+ const int n_matches = sscanf(version, "%d.%d.%d", &dst->major, &dst->minor, &dst->micro);
+ return n_matches == 3;
+}
+
+static bool check_pw_version(const gsr_pipewire_data_version *pw_version, int major, int minor, int micro) {
+ if (pw_version->major != major)
+ return pw_version->major > major;
+ if (pw_version->minor != minor)
+ return pw_version->minor > minor;
+ return pw_version->micro >= micro;
+}
+
+static void update_pw_versions(gsr_pipewire *self, const char *version) {
+ fprintf(stderr, "gsr info: pipewire: server version: %s\n", version);
+ fprintf(stderr, "gsr info: pipewire: library version: %s\n", pw_get_library_version());
+ fprintf(stderr, "gsr info: pipewire: header version: %s\n", pw_get_headers_version());
+ if(!parse_pw_version(&self->server_version, version))
+ fprintf(stderr, "gsr error: pipewire: failed to parse server version\n");
+}
+
+static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
+ gsr_pipewire *self = user_data;
+ update_pw_versions(self, info->version);
+}
+
+static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
+ gsr_pipewire *self = user_data;
+ fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
+ gsr_pipewire *self = user_data;
+ if (id == PW_ID_CORE && self->server_version_sync == seq)
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static bool is_cursor_format_supported(const enum spa_video_format format) {
+ switch(format) {
+ case SPA_VIDEO_FORMAT_RGBx: return true;
+ case SPA_VIDEO_FORMAT_BGRx: return true;
+ case SPA_VIDEO_FORMAT_xRGB: return true;
+ case SPA_VIDEO_FORMAT_xBGR: return true;
+ case SPA_VIDEO_FORMAT_RGBA: return true;
+ case SPA_VIDEO_FORMAT_BGRA: return true;
+ case SPA_VIDEO_FORMAT_ARGB: return true;
+ case SPA_VIDEO_FORMAT_ABGR: return true;
+ default: break;
+ }
+ return false;
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .info = on_core_info_cb,
+ .done = on_core_done_cb,
+ .error = on_core_error_cb,
+};
+
+static void on_process_cb(void *user_data) {
+ gsr_pipewire *self = user_data;
+ struct spa_meta_cursor *cursor = NULL;
+ //struct spa_meta *video_damage = NULL;
+
+ /* Find the most recent buffer */
+ struct pw_buffer *pw_buf = NULL;
+ for(;;) {
+ struct pw_buffer *aux = pw_stream_dequeue_buffer(self->stream);
+ if(!aux)
+ break;
+ if(pw_buf)
+ pw_stream_queue_buffer(self->stream, pw_buf);
+ pw_buf = aux;
+ }
+
+ if(!pw_buf) {
+ fprintf(stderr, "gsr info: pipewire: out of buffers!\n");
+ return;
+ }
+
+ struct spa_buffer *buffer = pw_buf->buffer;
+ const bool has_buffer = buffer->datas[0].chunk->size != 0;
+ if(!has_buffer)
+ goto read_metadata;
+
+ pthread_mutex_lock(&self->mutex);
+
+ if(buffer->datas[0].type == SPA_DATA_DmaBuf) {
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = -1;
+ }
+ }
+
+ self->dmabuf_num_planes = buffer->n_datas;
+ if(self->dmabuf_num_planes > GSR_PIPEWIRE_DMABUF_MAX_PLANES)
+ self->dmabuf_num_planes = GSR_PIPEWIRE_DMABUF_MAX_PLANES;
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ self->dmabuf_data[i].fd = dup(buffer->datas[i].fd);
+ self->dmabuf_data[i].offset = buffer->datas[i].chunk->offset;
+ self->dmabuf_data[i].stride = buffer->datas[i].chunk->stride;
+ }
+
+ self->damaged = true;
+ } else {
+ // TODO:
+ }
+
+ // TODO: Move down to read_metadata
+ struct spa_meta_region *region = spa_buffer_find_meta_data(buffer, SPA_META_VideoCrop, sizeof(*region));
+ if(region && spa_meta_region_is_valid(region)) {
+ // fprintf(stderr, "gsr info: pipewire: crop Region available (%dx%d+%d+%d)\n",
+ // region->region.position.x, region->region.position.y,
+ // region->region.size.width, region->region.size.height);
+ self->crop.x = region->region.position.x;
+ self->crop.y = region->region.position.y;
+ self->crop.width = region->region.size.width;
+ self->crop.height = region->region.size.height;
+ self->crop.valid = true;
+ } else {
+ self->crop.valid = false;
+ }
+
+ pthread_mutex_unlock(&self->mutex);
+
+read_metadata:
+
+ // video_damage = spa_buffer_find_meta(buffer, SPA_META_VideoDamage);
+ // if(video_damage) {
+ // struct spa_meta_region *r = spa_meta_first(video_damage);
+ // if(spa_meta_check(r, video_damage)) {
+ // //fprintf(stderr, "damage: %d,%d %ux%u\n", r->region.position.x, r->region.position.y, r->region.size.width, r->region.size.height);
+ // pthread_mutex_lock(&self->mutex);
+ // self->damaged = true;
+ // pthread_mutex_unlock(&self->mutex);
+ // }
+ // }
+
+ cursor = spa_buffer_find_meta_data(buffer, SPA_META_Cursor, sizeof(*cursor));
+ self->cursor.valid = cursor && spa_meta_cursor_is_valid(cursor);
+
+ if (self->cursor.visible && self->cursor.valid) {
+ pthread_mutex_lock(&self->mutex);
+
+ struct spa_meta_bitmap *bitmap = NULL;
+ if (cursor->bitmap_offset)
+ bitmap = SPA_MEMBER(cursor, cursor->bitmap_offset, struct spa_meta_bitmap);
+
+ if (bitmap && bitmap->size.width > 0 && bitmap->size.height && is_cursor_format_supported(bitmap->format)) {
+ const uint8_t *bitmap_data = SPA_MEMBER(bitmap, bitmap->offset, uint8_t);
+ fprintf(stderr, "gsr info: pipewire: cursor bitmap update, size: %dx%d, format: %s\n",
+ (int)bitmap->size.width, (int)bitmap->size.height, spa_debug_type_find_name(spa_type_video_format, bitmap->format));
+
+ const size_t bitmap_size = bitmap->size.width * bitmap->size.height * 4;
+ uint8_t *new_bitmap_data = realloc(self->cursor.data, bitmap_size);
+ if(new_bitmap_data) {
+ self->cursor.data = new_bitmap_data;
+ /* TODO: Convert bgr and other image formats to rgb here */
+ memcpy(self->cursor.data, bitmap_data, bitmap_size);
+ }
+
+ self->cursor.hotspot_x = cursor->hotspot.x;
+ self->cursor.hotspot_y = cursor->hotspot.y;
+ self->cursor.width = bitmap->size.width;
+ self->cursor.height = bitmap->size.height;
+ }
+
+ self->cursor.x = cursor->position.x;
+ self->cursor.y = cursor->position.y;
+ pthread_mutex_unlock(&self->mutex);
+
+ //fprintf(stderr, "gsr info: pipewire: cursor: %d %d %d %d\n", cursor->hotspot.x, cursor->hotspot.y, cursor->position.x, cursor->position.y);
+ }
+
+ pw_stream_queue_buffer(self->stream, pw_buf);
+}
+
+static void on_param_changed_cb(void *user_data, uint32_t id, const struct spa_pod *param) {
+ gsr_pipewire *self = user_data;
+
+ if (!param || id != SPA_PARAM_Format)
+ return;
+
+ int result = spa_format_parse(param, &self->format.media_type, &self->format.media_subtype);
+ if (result < 0)
+ return;
+
+ if (self->format.media_type != SPA_MEDIA_TYPE_video || self->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
+ return;
+
+ pthread_mutex_lock(&self->mutex);
+ spa_format_video_raw_parse(param, &self->format.info.raw);
+ pthread_mutex_unlock(&self->mutex);
+
+ uint32_t buffer_types = 0;
+ const bool has_modifier = spa_pod_find_prop(param, NULL, SPA_FORMAT_VIDEO_modifier) != NULL;
+ if(has_modifier || check_pw_version(&self->server_version, 0, 3, 24))
+ buffer_types |= 1 << SPA_DATA_DmaBuf;
+
+ fprintf(stderr, "gsr info: pipewire: negotiated format:\n");
+
+ fprintf(stderr, "gsr info: pipewire: Format: %d (%s)\n",
+ self->format.info.raw.format,
+ spa_debug_type_find_name(spa_type_video_format, self->format.info.raw.format));
+
+ if(has_modifier) {
+ fprintf(stderr, "gsr info: pipewire: Modifier: 0x%" PRIx64 "\n", self->format.info.raw.modifier);
+ }
+
+ fprintf(stderr, "gsr info: pipewire: Size: %dx%d\n", self->format.info.raw.size.width, self->format.info.raw.size.height);
+ fprintf(stderr, "gsr info: pipewire: Framerate: %d/%d\n", self->format.info.raw.framerate.num, self->format.info.raw.framerate.denom);
+
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[4];
+
+ params[0] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_region)));
+
+ params[1] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+
+ params[2] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size,
+ SPA_POD_CHOICE_RANGE_Int(CURSOR_META_SIZE(64, 64),
+ CURSOR_META_SIZE(1, 1),
+ CURSOR_META_SIZE(1024, 1024)));
+
+ params[3] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_Int(buffer_types));
+
+ pw_stream_update_params(self->stream, params, 4);
+ self->negotiated = true;
+}
+
+static void on_state_changed_cb(void *user_data, enum pw_stream_state old, enum pw_stream_state state, const char *error) {
+ (void)old;
+ gsr_pipewire *self = user_data;
+
+ fprintf(stderr, "gsr info: pipewire: stream %p state: \"%s\" (error: %s)\n",
+ (void*)self->stream, pw_stream_state_as_string(state),
+ error ? error : "none");
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_state_changed_cb,
+ .param_changed = on_param_changed_cb,
+ .process = on_process_cb,
+};
+
+static inline struct spa_pod *build_format(struct spa_pod_builder *b,
+ const gsr_pipewire_video_info *ovi,
+ uint32_t format, const uint64_t *modifiers,
+ size_t modifier_count)
+{
+ struct spa_pod_frame format_frame;
+
+ spa_pod_builder_push_object(b, &format_frame, SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+
+ if (modifier_count > 0) {
+ struct spa_pod_frame modifier_frame;
+
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
+ spa_pod_builder_push_choice(b, &modifier_frame, SPA_CHOICE_Enum, 0);
+
+ /* The first element of choice pods is the preferred value. Here
+ * we arbitrarily pick the first modifier as the preferred one.
+ */
+ // TODO:
+ spa_pod_builder_long(b, modifiers[0]);
+
+ for(uint32_t i = 0; i < modifier_count; i++)
+ spa_pod_builder_long(b, modifiers[i]);
+
+ spa_pod_builder_pop(b, &modifier_frame);
+ }
+
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(32, 32),
+ &SPA_RECTANGLE(1, 1),
+ &SPA_RECTANGLE(16384, 16384)),
+ SPA_FORMAT_VIDEO_framerate,
+ SPA_POD_CHOICE_RANGE_Fraction(
+ &SPA_FRACTION(ovi->fps_num, ovi->fps_den),
+ &SPA_FRACTION(0, 1), &SPA_FRACTION(500, 1)),
+ 0);
+ return spa_pod_builder_pop(b, &format_frame);
+}
+
+/* https://gstreamer.freedesktop.org/documentation/additional/design/mediatype-video-raw.html?gi-language=c#formats */
+/* For some reason gstreamer formats are in opposite order to drm formats */
+static int64_t spa_video_format_to_drm_format(const enum spa_video_format format) {
+ switch(format) {
+ case SPA_VIDEO_FORMAT_RGBx: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGRx: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_RGBA: return DRM_FORMAT_ABGR8888;
+ case SPA_VIDEO_FORMAT_BGRA: return DRM_FORMAT_ARGB8888;
+ case SPA_VIDEO_FORMAT_RGB: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGR: return DRM_FORMAT_XRGB8888;
+ default: break;
+ }
+ return DRM_FORMAT_INVALID;
+}
+
+static const enum spa_video_format video_formats[] = {
+ SPA_VIDEO_FORMAT_BGRA,
+ SPA_VIDEO_FORMAT_BGRx,
+ SPA_VIDEO_FORMAT_BGR,
+ SPA_VIDEO_FORMAT_RGBx,
+ SPA_VIDEO_FORMAT_RGBA,
+ SPA_VIDEO_FORMAT_RGB,
+};
+
+static bool gsr_pipewire_build_format_params(gsr_pipewire *self, struct spa_pod_builder *pod_builder, struct spa_pod **params, uint32_t *num_params) {
+ *num_params = 0;
+
+ if(!check_pw_version(&self->server_version, 0, 3, 33))
+ return false;
+
+ for(size_t i = 0; i < GSR_PIPEWIRE_NUM_VIDEO_FORMATS; i++) {
+ if(self->supported_video_formats[i].modifiers_size == 0)
+ continue;
+ params[i] = build_format(pod_builder, &self->video_info, self->supported_video_formats[i].format, self->modifiers + self->supported_video_formats[i].modifiers_index, self->supported_video_formats[i].modifiers_size);
+ ++(*num_params);
+ }
+
+ return true;
+}
+
+static void renegotiate_format(void *data, uint64_t expirations) {
+ (void)expirations;
+ gsr_pipewire *self = (gsr_pipewire*)data;
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ struct spa_pod *params[GSR_PIPEWIRE_NUM_VIDEO_FORMATS];
+ uint32_t num_video_formats = 0;
+ uint8_t params_buffer[2048];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ if (!gsr_pipewire_build_format_params(self, &pod_builder, params, &num_video_formats)) {
+ pw_thread_loop_unlock(self->thread_loop);
+ return;
+ }
+
+ pw_stream_update_params(self->stream, (const struct spa_pod**)params, num_video_formats);
+ pw_thread_loop_unlock(self->thread_loop);
+}
+
+static bool spa_video_format_get_modifiers(gsr_pipewire *self, const enum spa_video_format format, uint64_t *modifiers, int32_t max_modifiers, int32_t *num_modifiers) {
+ *num_modifiers = 0;
+
+ if(max_modifiers == 0) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: no space for modifiers left\n");
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ return false;
+ }
+
+ if(!self->egl->eglQueryDmaBufModifiersEXT) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: failed to initialize modifiers because eglQueryDmaBufModifiersEXT is not available\n");
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ return false;
+ }
+
+ const int64_t drm_format = spa_video_format_to_drm_format(format);
+ if(!self->egl->eglQueryDmaBufModifiersEXT(self->egl->egl_display, drm_format, max_modifiers, modifiers, NULL, num_modifiers)) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: eglQueryDmaBufModifiersEXT failed with drm format %d, %" PRIi64 "\n", (int)format, drm_format);
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ *num_modifiers = 0;
+ return false;
+ }
+
+ // if(*num_modifiers + 2 <= max_modifiers) {
+ // modifiers[*num_modifiers + 0] = DRM_FORMAT_MOD_LINEAR;
+ // modifiers[*num_modifiers + 1] = DRM_FORMAT_MOD_INVALID;
+ // *num_modifiers += 2;
+ // }
+ return true;
+}
+
+static void gsr_pipewire_init_modifiers(gsr_pipewire *self) {
+ for(size_t i = 0; i < GSR_PIPEWIRE_NUM_VIDEO_FORMATS; i++) {
+ self->supported_video_formats[i].format = video_formats[i];
+ int32_t num_modifiers = 0;
+ spa_video_format_get_modifiers(self, self->supported_video_formats[i].format, self->modifiers + self->num_modifiers, GSR_PIPEWIRE_MAX_MODIFIERS - self->num_modifiers, &num_modifiers);
+ self->supported_video_formats[i].modifiers_index = self->num_modifiers;
+ self->supported_video_formats[i].modifiers_size = num_modifiers;
+ }
+}
+
+static bool gsr_pipewire_setup_stream(gsr_pipewire *self) {
+ struct spa_pod *params[GSR_PIPEWIRE_NUM_VIDEO_FORMATS];
+ uint32_t num_video_formats = 0;
+ uint8_t params_buffer[2048];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+
+ self->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
+ if(!self->thread_loop) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create pipewire thread\n");
+ goto error;
+ }
+
+ self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
+ if(!self->context) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create pipewire context\n");
+ goto error;
+ }
+
+ if(pw_thread_loop_start(self->thread_loop) < 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to start thread\n");
+ goto error;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ // TODO: Why pass 5 to fcntl?
+ self->core = pw_context_connect_fd(self->context, fcntl(self->fd, F_DUPFD_CLOEXEC, 5), NULL, 0);
+ if(!self->core) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to connect to fd %d\n", self->fd);
+ goto error;
+ }
+
+ // TODO: Error check
+ pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
+
+ gsr_pipewire_init_modifiers(self);
+
+ // TODO: Cleanup?
+ self->reneg = pw_loop_add_event(pw_thread_loop_get_loop(self->thread_loop), renegotiate_format, self);
+ if(!self->reneg) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: pw_loop_add_event failed\n");
+ goto error;
+ }
+
+ self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, 0);
+ pw_thread_loop_wait(self->thread_loop);
+
+ self->stream = pw_stream_new(self->core, "com.dec05eba.gpu_screen_recorder",
+ pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Screen", NULL));
+ if(!self->stream) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create stream\n");
+ goto error;
+ }
+ pw_stream_add_listener(self->stream, &self->stream_listener, &stream_events, self);
+
+ if(!gsr_pipewire_build_format_params(self, &pod_builder, params, &num_video_formats)) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to build format params\n");
+ goto error;
+ }
+
+ if(pw_stream_connect(
+ self->stream, PW_DIRECTION_INPUT, self->node,
+ PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS, (const struct spa_pod**)params,
+ num_video_formats) < 0)
+ {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to connect stream\n");
+ goto error;
+ }
+
+ pw_thread_loop_unlock(self->thread_loop);
+ return true;
+
+ error:
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->stream) {
+ pw_stream_disconnect(self->stream);
+ pw_stream_destroy(self->stream);
+ self->stream = NULL;
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+ return false;
+}
+
+static int pw_init_counter = 0;
+bool gsr_pipewire_init(gsr_pipewire *self, int pipewire_fd, uint32_t pipewire_node, int fps, bool capture_cursor, gsr_egl *egl) {
+ if(pw_init_counter == 0)
+ pw_init(NULL, NULL);
+ ++pw_init_counter;
+
+ memset(self, 0, sizeof(*self));
+ self->egl = egl;
+ self->fd = pipewire_fd;
+ self->node = pipewire_node;
+ if(pthread_mutex_init(&self->mutex, NULL) != 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_init: failed to initialize mutex\n");
+ gsr_pipewire_deinit(self);
+ return false;
+ }
+ self->mutex_initialized = true;
+ self->video_info.fps_num = fps;
+ self->video_info.fps_den = 1;
+ self->cursor.visible = capture_cursor;
+
+ if(!gsr_pipewire_setup_stream(self)) {
+ gsr_pipewire_deinit(self);
+ return false;
+ }
+
+ return true;
+}
+
+void gsr_pipewire_deinit(gsr_pipewire *self) {
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->stream) {
+ pw_stream_disconnect(self->stream);
+ pw_stream_destroy(self->stream);
+ self->stream = NULL;
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+
+ if(self->fd > 0) {
+ close(self->fd);
+ self->fd = -1;
+ }
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = -1;
+ }
+ }
+ self->dmabuf_num_planes = 0;
+
+ self->negotiated = false;
+
+ if(self->mutex_initialized) {
+ pthread_mutex_destroy(&self->mutex);
+ self->mutex_initialized = false;
+ }
+
+ if(self->cursor.data) {
+ free(self->cursor.data);
+ self->cursor.data = NULL;
+ }
+
+ --pw_init_counter;
+ if(pw_init_counter == 0) {
+#if PW_CHECK_VERSION(0, 3, 49)
+ pw_deinit();
+#endif
+ }
+}
+
+static EGLImage gsr_pipewire_create_egl_image(gsr_pipewire *self, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, spa_video_format_to_drm_format(self->format.info.raw.format), self->format.info.raw.size.width, self->format.info.raw.size.height,
+ fds, offsets, pitches, modifiers, self->dmabuf_num_planes, use_modifiers);
+ while(self->egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->egl->eglCreateImage(self->egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image || self->egl->eglGetError() != EGL_SUCCESS) {
+ if(image)
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ return NULL;
+ }
+ return image;
+}
+
+static EGLImage gsr_pipewire_create_egl_image_with_fallback(gsr_pipewire *self) {
+ int fds[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint32_t offsets[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint32_t pitches[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint64_t modifiers[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ fds[i] = self->dmabuf_data[i].fd;
+ offsets[i] = self->dmabuf_data[i].offset;
+ pitches[i] = self->dmabuf_data[i].stride;
+ modifiers[i] = self->format.info.raw.modifier;
+ }
+
+ EGLImage image = NULL;
+ if(self->no_modifiers_fallback) {
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, false);
+ } else {
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, true);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_pipewire_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
+ self->no_modifiers_fallback = true;
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, false);
+ }
+ }
+ return image;
+}
+
+static bool gsr_pipewire_bind_image_to_texture(gsr_pipewire *self, EGLImage image, unsigned int texture_id, bool external_texture) {
+ const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ while(self->egl->glGetError() != 0){}
+ self->egl->glBindTexture(texture_target, texture_id);
+ self->egl->glEGLImageTargetTexture2DOES(texture_target, image);
+ const bool success = self->egl->glGetError() == 0;
+ self->egl->glBindTexture(texture_target, 0);
+ return success;
+}
+
+static void gsr_pipewire_bind_image_to_texture_with_fallback(gsr_pipewire *self, gsr_texture_map texture_map, EGLImage image) {
+ if(self->external_texture_fallback) {
+ gsr_pipewire_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
+ } else {
+ if(!gsr_pipewire_bind_image_to_texture(self, image, texture_map.texture_id, false)) {
+ fprintf(stderr, "gsr error: gsr_pipewire_map_texture: failed to bind image to texture, trying with external texture\n");
+ self->external_texture_fallback = true;
+ gsr_pipewire_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
+ }
+ }
+}
+
+static void gsr_pipewire_update_cursor_texture(gsr_pipewire *self, gsr_texture_map texture_map) {
+ if(!self->cursor.data)
+ return;
+
+ self->egl->glBindTexture(GL_TEXTURE_2D, texture_map.cursor_texture_id);
+ // TODO: glTextureSubImage2D if same size
+ self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->cursor.width, self->cursor.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, self->cursor.data);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ free(self->cursor.data);
+ self->cursor.data = NULL;
+}
+
+bool gsr_pipewire_map_texture(gsr_pipewire *self, gsr_texture_map texture_map, gsr_pipewire_region *region, gsr_pipewire_region *cursor_region, gsr_pipewire_dmabuf_data *dmabuf_data, int *num_dmabuf_data, uint32_t *fourcc, uint64_t *modifiers, bool *using_external_image) {
+ for(int i = 0; i < GSR_PIPEWIRE_DMABUF_MAX_PLANES; ++i) {
+ memset(&dmabuf_data[i], 0, sizeof(gsr_pipewire_dmabuf_data));
+ }
+ *num_dmabuf_data = 0;
+ *using_external_image = self->external_texture_fallback;
+ *fourcc = 0;
+ *modifiers = 0;
+ pthread_mutex_lock(&self->mutex);
+
+ if(!self->negotiated || self->dmabuf_data[0].fd <= 0) {
+ pthread_mutex_unlock(&self->mutex);
+ return false;
+ }
+
+ EGLImage image = gsr_pipewire_create_egl_image_with_fallback(self);
+ if(image) {
+ gsr_pipewire_bind_image_to_texture_with_fallback(self, texture_map, image);
+ *using_external_image = self->external_texture_fallback;
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ }
+
+ gsr_pipewire_update_cursor_texture(self, texture_map);
+
+ region->x = 0;
+ region->y = 0;
+
+ region->width = self->format.info.raw.size.width;
+ region->height = self->format.info.raw.size.height;
+
+ if(self->crop.valid) {
+ region->x = self->crop.x;
+ region->y = self->crop.y;
+
+ region->width = self->crop.width;
+ region->height = self->crop.height;
+ }
+
+ /* TODO: Test if cursor hotspot is correct */
+ cursor_region->x = self->cursor.x - self->cursor.hotspot_x;
+ cursor_region->y = self->cursor.y - self->cursor.hotspot_y;
+
+ cursor_region->width = self->cursor.width;
+ cursor_region->height = self->cursor.height;
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ dmabuf_data[i] = self->dmabuf_data[i];
+ self->dmabuf_data[i].fd = -1;
+ }
+ *num_dmabuf_data = self->dmabuf_num_planes;
+ *fourcc = spa_video_format_to_drm_format(self->format.info.raw.format);
+ *modifiers = self->format.info.raw.modifier;
+ self->dmabuf_num_planes = 0;
+
+ pthread_mutex_unlock(&self->mutex);
+ return true;
+}
+
+bool gsr_pipewire_is_damaged(gsr_pipewire *self) {
+ bool damaged = false;
+ pthread_mutex_lock(&self->mutex);
+ damaged = self->damaged;
+ pthread_mutex_unlock(&self->mutex);
+ return damaged;
+}
+
+void gsr_pipewire_clear_damage(gsr_pipewire *self) {
+ pthread_mutex_lock(&self->mutex);
+ self->damaged = false;
+ pthread_mutex_unlock(&self->mutex);
+}
diff --git a/src/sound.cpp b/src/sound.cpp
index 53000bd..d0f2a80 100644
--- a/src/sound.cpp
+++ b/src/sound.cpp
@@ -327,12 +327,66 @@ static void pa_sourcelist_cb(pa_context *ctx, const pa_source_info *source_info,
if(eol > 0)
return;
- std::vector<AudioInput> *inputs = (std::vector<AudioInput>*)userdata;
- inputs->push_back({ source_info->name, source_info->description });
+ AudioDevices *audio_devices = (AudioDevices*)userdata;
+ audio_devices->audio_inputs.push_back({ source_info->name, source_info->description });
}
-std::vector<AudioInput> get_pulseaudio_inputs() {
- std::vector<AudioInput> inputs;
+static void pa_server_info_cb(pa_context*, const pa_server_info *server_info, void *userdata) {
+ AudioDevices *audio_devices = (AudioDevices*)userdata;
+ if(server_info->default_sink_name)
+ audio_devices->default_output = std::string(server_info->default_sink_name) + ".monitor";
+ if(server_info->default_source_name)
+ audio_devices->default_input = server_info->default_source_name;
+}
+
+static void get_pulseaudio_default_inputs(AudioDevices &audio_devices) {
+ pa_mainloop *main_loop = pa_mainloop_new();
+
+ pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder-gtk");
+ pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL);
+ int state = 0;
+ int pa_ready = 0;
+ pa_context_set_state_callback(ctx, pa_state_cb, &pa_ready);
+
+ pa_operation *pa_op = NULL;
+
+ for(;;) {
+ // Not ready
+ if(pa_ready == 0) {
+ pa_mainloop_iterate(main_loop, 1, NULL);
+ continue;
+ }
+
+ switch(state) {
+ case 0: {
+ pa_op = pa_context_get_server_info(ctx, pa_server_info_cb, &audio_devices);
+ ++state;
+ break;
+ }
+ }
+
+ // Couldn't get connection to the server
+ if(pa_ready == 2 || (state == 1 && pa_op && pa_operation_get_state(pa_op) == PA_OPERATION_DONE)) {
+ if(pa_op)
+ pa_operation_unref(pa_op);
+ pa_context_disconnect(ctx);
+ pa_context_unref(ctx);
+ pa_mainloop_free(main_loop);
+ return;
+ }
+
+ pa_mainloop_iterate(main_loop, 1, NULL);
+ }
+
+ pa_mainloop_free(main_loop);
+}
+
+AudioDevices get_pulseaudio_inputs() {
+ AudioDevices audio_devices;
+
+ // TODO: Do this in the same connection below instead of two separate connections
+ get_pulseaudio_default_inputs(audio_devices);
+
pa_mainloop *main_loop = pa_mainloop_new();
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
@@ -352,7 +406,7 @@ std::vector<AudioInput> get_pulseaudio_inputs() {
switch(state) {
case 0: {
- pa_op = pa_context_get_source_info_list(ctx, pa_sourcelist_cb, &inputs);
+ pa_op = pa_context_get_source_info_list(ctx, pa_sourcelist_cb, &audio_devices);
++state;
break;
}
@@ -371,5 +425,5 @@ std::vector<AudioInput> get_pulseaudio_inputs() {
}
pa_mainloop_free(main_loop);
- return inputs;
+ return audio_devices;
}
diff --git a/src/utils.c b/src/utils.c
index e00f3c5..42f4c40 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -1,13 +1,23 @@
#include "../include/utils.h"
+
#include <time.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <assert.h>
+
#include <xf86drmMode.h>
#include <xf86drm.h>
-#include <stdlib.h>
+#include <libdrm/drm_fourcc.h>
#include <X11/Xatom.h>
+#include <X11/extensions/Xrandr.h>
+#include <va/va_drmcommon.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/hwcontext_vaapi.h>
double clock_get_monotonic_seconds(void) {
struct timespec ts;
@@ -17,6 +27,16 @@ double clock_get_monotonic_seconds(void) {
return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
}
+static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
+ switch(rot) {
+ case 0: return GSR_MONITOR_ROT_0;
+ case 1: return GSR_MONITOR_ROT_90;
+ case 2: return GSR_MONITOR_ROT_180;
+ case 3: return GSR_MONITOR_ROT_270;
+ }
+ return GSR_MONITOR_ROT_0;
+}
+
static const XRRModeInfo* get_mode_info(const XRRScreenResources *sr, RRMode id) {
for(int i = 0; i < sr->nmode; ++i) {
if(sr->modes[i].id == id)
@@ -35,16 +55,6 @@ static gsr_monitor_rotation x11_rotation_to_gsr_rotation(int rot) {
return GSR_MONITOR_ROT_0;
}
-static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
- switch(rot) {
- case 0: return GSR_MONITOR_ROT_0;
- case 1: return GSR_MONITOR_ROT_90;
- case 2: return GSR_MONITOR_ROT_180;
- case 3: return GSR_MONITOR_ROT_270;
- }
- return GSR_MONITOR_ROT_0;
-}
-
static uint32_t x11_output_get_connector_id(Display *dpy, RROutput output, Atom randr_connector_id_atom) {
Atom type = 0;
int format = 0;
@@ -61,7 +71,7 @@ static uint32_t x11_output_get_connector_id(Display *dpy, RROutput output, Atom
return result;
}
-void for_each_active_monitor_output_x11(Display *display, active_monitor_callback callback, void *userdata) {
+void for_each_active_monitor_output_x11_not_cached(Display *display, active_monitor_callback callback, void *userdata) {
XRRScreenResources *screen_res = XRRGetScreenResources(display, DefaultRootWindow(display));
if(!screen_res)
return;
@@ -76,18 +86,15 @@ void for_each_active_monitor_output_x11(Display *display, active_monitor_callbac
if(crt_info && crt_info->mode) {
const XRRModeInfo *mode_info = get_mode_info(screen_res, crt_info->mode);
if(mode_info && out_info->nameLen < (int)sizeof(display_name)) {
- memcpy(display_name, out_info->name, out_info->nameLen);
- display_name[out_info->nameLen] = '\0';
-
+ snprintf(display_name, sizeof(display_name), "%.*s", (int)out_info->nameLen, out_info->name);
const gsr_monitor monitor = {
.name = display_name,
.name_len = out_info->nameLen,
.pos = { .x = crt_info->x, .y = crt_info->y },
.size = { .x = (int)crt_info->width, .y = (int)crt_info->height },
- .crt_info = crt_info,
.connector_id = x11_output_get_connector_id(display, screen_res->outputs[i], randr_connector_id_atom),
.rotation = x11_rotation_to_gsr_rotation(crt_info->rotation),
- .monitor_identifier = 0
+ .monitor_identifier = out_info->crtc
};
callback(&monitor, userdata);
}
@@ -102,6 +109,22 @@ void for_each_active_monitor_output_x11(Display *display, active_monitor_callbac
XRRFreeScreenResources(screen_res);
}
+void for_each_active_monitor_output_x11(const gsr_egl *egl, active_monitor_callback callback, void *userdata) {
+ for(int i = 0; i < egl->x11.num_outputs; ++i) {
+ const gsr_x11_output *output = &egl->x11.outputs[i];
+ const gsr_monitor monitor = {
+ .name = output->name,
+ .name_len = strlen(output->name),
+ .pos = output->pos,
+ .size = output->size,
+ .connector_id = output->connector_id,
+ .rotation = output->rotation,
+ .monitor_identifier = output->monitor_identifier
+ };
+ callback(&monitor, userdata);
+ }
+}
+
typedef struct {
int type;
int count;
@@ -142,7 +165,7 @@ static bool connector_get_property_by_name(int drmfd, drmModeConnectorPtr props,
return false;
}
-/* TODO: Support more connector types*/
+/* TODO: Support more connector types */
static int get_connector_type_by_name(const char *name) {
int len = strlen(name);
if(len >= 5 && strncmp(name, "HDMI-", 5) == 0)
@@ -185,7 +208,6 @@ static void for_each_active_monitor_output_wayland(const gsr_egl *egl, active_mo
.name_len = strlen(output->name),
.pos = { .x = output->pos.x, .y = output->pos.y },
.size = { .x = output->size.x, .y = output->size.y },
- .crt_info = NULL,
.connector_id = 0,
.rotation = wayland_transform_to_gsr_rotation(output->transform),
.monitor_identifier = connector_type ? monitor_identifier_from_type_and_count(connector_type_index, connector_type->count_active) : 0
@@ -233,12 +255,11 @@ static void for_each_active_monitor_output_drm(const gsr_egl *egl, active_monito
if(connector_type && crtc_id > 0 && crtc && connection_name_len + 5 < (int)sizeof(display_name)) {
const int display_name_len = snprintf(display_name, sizeof(display_name), "%s-%d", connection_name, connector_type->count);
const int connector_type_index_name = get_connector_type_by_name(display_name);
- const gsr_monitor monitor = {
+ gsr_monitor monitor = {
.name = display_name,
.name_len = display_name_len,
.pos = { .x = crtc->x, .y = crtc->y },
.size = { .x = (int)crtc->width, .y = (int)crtc->height },
- .crt_info = NULL,
.connector_id = connector->connector_id,
.rotation = GSR_MONITOR_ROT_0,
.monitor_identifier = connector_type_index_name != -1 ? monitor_identifier_from_type_and_count(connector_type_index_name, connector_type->count_active) : 0
@@ -260,7 +281,7 @@ static void for_each_active_monitor_output_drm(const gsr_egl *egl, active_monito
void for_each_active_monitor_output(const gsr_egl *egl, gsr_connection_type connection_type, active_monitor_callback callback, void *userdata) {
switch(connection_type) {
case GSR_CONNECTION_X11:
- for_each_active_monitor_output_x11(egl->x11.dpy, callback, userdata);
+ for_each_active_monitor_output_x11(egl, callback, userdata);
break;
case GSR_CONNECTION_WAYLAND:
for_each_active_monitor_output_wayland(egl, callback, userdata);
@@ -322,7 +343,7 @@ static void get_monitor_by_connector_id_callback(const gsr_monitor *monitor, voi
}
gsr_monitor_rotation drm_monitor_get_display_server_rotation(const gsr_egl *egl, const gsr_monitor *monitor) {
- if(egl->wayland.dpy) {
+ if(gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_WAYLAND) {
{
get_monitor_by_connector_id_userdata userdata;
userdata.monitor = monitor;
@@ -345,7 +366,7 @@ gsr_monitor_rotation drm_monitor_get_display_server_rotation(const gsr_egl *egl,
userdata.monitor = monitor;
userdata.rotation = GSR_MONITOR_ROT_0;
userdata.match_found = false;
- for_each_active_monitor_output_x11(egl->x11.dpy, get_monitor_by_connector_id_callback, &userdata);
+ for_each_active_monitor_output_x11(egl, get_monitor_by_connector_id_callback, &userdata);
return userdata.rotation;
}
@@ -359,6 +380,7 @@ bool gl_get_gpu_info(gsr_egl *egl, gsr_gpu_info *info) {
const unsigned char *gl_renderer = egl->glGetString(GL_RENDERER);
info->gpu_version = 0;
+ info->is_steam_deck = false;
if(!gl_vendor) {
fprintf(stderr, "gsr error: failed to get gpu vendor\n");
@@ -391,6 +413,7 @@ bool gl_get_gpu_info(gsr_egl *egl, gsr_gpu_info *info) {
if(gl_renderer) {
if(info->vendor == GSR_GPU_VENDOR_NVIDIA)
sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &info->gpu_version);
+ info->is_steam_deck = strstr((const char*)gl_renderer, "vangogh") != NULL;
}
end:
@@ -480,3 +503,384 @@ bool gsr_card_path_get_render_path(const char *card_path, char *render_path) {
close(fd);
return false;
}
+
+int create_directory_recursive(char *path) {
+ int path_len = strlen(path);
+ char *p = path;
+ char *end = path + path_len;
+ for(;;) {
+ char *slash_p = strchr(p, '/');
+
+ // Skips first '/', we don't want to try and create the root directory
+ if(slash_p == path) {
+ ++p;
+ continue;
+ }
+
+ if(!slash_p)
+ slash_p = end;
+
+ char prev_char = *slash_p;
+ *slash_p = '\0';
+ int err = mkdir(path, S_IRWXU);
+ *slash_p = prev_char;
+
+ if(err == -1 && errno != EEXIST)
+ return err;
+
+ if(slash_p == end)
+ break;
+ else
+ p = slash_p + 1;
+ }
+ return 0;
+}
+
+void setup_dma_buf_attrs(intptr_t *img_attr, uint32_t format, uint32_t width, uint32_t height, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes, bool use_modifier) {
+ size_t img_attr_index = 0;
+
+ img_attr[img_attr_index++] = EGL_LINUX_DRM_FOURCC_EXT;
+ img_attr[img_attr_index++] = format;
+
+ img_attr[img_attr_index++] = EGL_WIDTH;
+ img_attr[img_attr_index++] = width;
+
+ img_attr[img_attr_index++] = EGL_HEIGHT;
+ img_attr[img_attr_index++] = height;
+
+ if(num_planes >= 1) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_FD_EXT;
+ img_attr[img_attr_index++] = fds[0];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[0];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[0];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[0] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[0] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 2) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_FD_EXT;
+ img_attr[img_attr_index++] = fds[1];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[1];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[1];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[1] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[1] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 3) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_FD_EXT;
+ img_attr[img_attr_index++] = fds[2];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[2];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[2];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[2] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[2] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 4) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_FD_EXT;
+ img_attr[img_attr_index++] = fds[3];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[3];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[3];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[3] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[3] >> 32ULL;
+ }
+ }
+
+ img_attr[img_attr_index++] = EGL_NONE;
+ assert(img_attr_index <= 44);
+}
+
+static VADisplay video_codec_context_get_vaapi_display(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ if(device_context->type != AV_HWDEVICE_TYPE_VAAPI)
+ return NULL;
+
+ AVVAAPIDeviceContext *vactx = device_context->hwctx;
+ return vactx->display;
+}
+
+bool video_codec_context_is_vaapi(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ return device_context->type == AV_HWDEVICE_TYPE_VAAPI;
+}
+
+static uint32_t drm_fourcc_to_va_fourcc(uint32_t drm_fourcc) {
+ switch(drm_fourcc) {
+ case DRM_FORMAT_XRGB8888: return VA_FOURCC_BGRX;
+ case DRM_FORMAT_XBGR8888: return VA_FOURCC_RGBX;
+ case DRM_FORMAT_RGBX8888: return VA_FOURCC_XBGR;
+ case DRM_FORMAT_BGRX8888: return VA_FOURCC_XRGB;
+ case DRM_FORMAT_ARGB8888: return VA_FOURCC_BGRA;
+ case DRM_FORMAT_ABGR8888: return VA_FOURCC_RGBA;
+ case DRM_FORMAT_RGBA8888: return VA_FOURCC_ABGR;
+ case DRM_FORMAT_BGRA8888: return VA_FOURCC_ARGB;
+ default: return drm_fourcc;
+ }
+}
+
+bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, AVFrame *video_frame, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, uint32_t format, vec2i size, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes) {
+ VAConfigID config_id = 0;
+ VAContextID context_id = 0;
+ VASurfaceID input_surface_id = 0;
+ VABufferID buffer_id = 0;
+ bool success = true;
+
+ VADisplay va_dpy = video_codec_context_get_vaapi_display(video_codec_context);
+ if(!va_dpy) {
+ success = false;
+ goto done;
+ }
+
+ VAStatus va_status = vaCreateConfig(va_dpy, VAProfileNone, VAEntrypointVideoProc, NULL, 0, &config_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateConfig failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ VASurfaceID output_surface_id = (uintptr_t)video_frame->data[3];
+ va_status = vaCreateContext(va_dpy, config_id, size.x, size.y, VA_PROGRESSIVE, &output_surface_id, 1, &context_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateContext failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ VADRMPRIMESurfaceDescriptor buf = {0};
+ buf.fourcc = drm_fourcc_to_va_fourcc(format);//VA_FOURCC_BGRX; // TODO: VA_FOURCC_BGRA, VA_FOURCC_X2R10G10B10
+ buf.width = size.x;
+ buf.height = size.y;
+ buf.num_objects = num_planes;
+ buf.num_layers = 1;
+ buf.layers[0].drm_format = format;
+ buf.layers[0].num_planes = buf.num_objects;
+ for(int i = 0; i < num_planes; ++i) {
+ buf.objects[i].fd = fds[i];
+ buf.objects[i].size = size.y * pitches[i]; // TODO:
+ buf.objects[i].drm_format_modifier = modifiers[i];
+
+ buf.layers[0].object_index[i] = i;
+ buf.layers[0].offset[i] = offsets[i];
+ buf.layers[0].pitch[i] = pitches[i];
+ }
+
+ VASurfaceAttrib attribs[2] = {0};
+ attribs[0].type = VASurfaceAttribMemoryType;
+ attribs[0].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[0].value.type = VAGenericValueTypeInteger;
+ attribs[0].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2;
+ attribs[1].type = VASurfaceAttribExternalBufferDescriptor;
+ attribs[1].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[1].value.type = VAGenericValueTypePointer;
+ attribs[1].value.value.p = &buf;
+
+ // TODO: RT_FORMAT with 10 bit/hdr, VA_RT_FORMAT_RGB32_10
+ // TODO: Max size same as source_size
+ va_status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_RGB32, size.x, size.y, &input_surface_id, 1, attribs, 2);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateSurfaces failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ const VARectangle source_region = {
+ .x = source_pos.x,
+ .y = source_pos.y,
+ .width = source_size.x,
+ .height = source_size.y
+ };
+
+ const VARectangle output_region = {
+ .x = dest_pos.x,
+ .y = dest_pos.y,
+ .width = dest_size.x,
+ .height = dest_size.y
+ };
+
+ // Copying a surface to another surface will automatically perform the color conversion. Thanks vaapi!
+ VAProcPipelineParameterBuffer params = {0};
+ params.surface = input_surface_id;
+ params.surface_region = NULL;
+ params.surface_region = &source_region;
+ params.output_region = &output_region;
+ params.output_background_color = 0;
+ params.filter_flags = VA_FRAME_PICTURE;
+ params.pipeline_flags = VA_PROC_PIPELINE_FAST;
+
+ params.input_color_properties.colour_primaries = 1;
+ params.input_color_properties.transfer_characteristics = 1;
+ params.input_color_properties.matrix_coefficients = 1;
+ params.surface_color_standard = VAProcColorStandardBT709; // TODO:
+ params.input_color_properties.color_range = video_frame->color_range == AVCOL_RANGE_JPEG ? VA_SOURCE_RANGE_FULL : VA_SOURCE_RANGE_REDUCED;
+
+ params.output_color_properties.colour_primaries = 1;
+ params.output_color_properties.transfer_characteristics = 1;
+ params.output_color_properties.matrix_coefficients = 1;
+ params.output_color_standard = VAProcColorStandardBT709; // TODO:
+ params.output_color_properties.color_range = video_frame->color_range == AVCOL_RANGE_JPEG ? VA_SOURCE_RANGE_FULL : VA_SOURCE_RANGE_REDUCED;
+
+ params.processing_mode = VAProcPerformanceMode;
+
+ // VAProcPipelineCaps pipeline_caps = {0};
+ // va_status = vaQueryVideoProcPipelineCaps(self->va_dpy,
+ // self->context_id,
+ // NULL, 0,
+ // &pipeline_caps);
+ // if(va_status == VA_STATUS_SUCCESS) {
+ // fprintf(stderr, "pipeline_caps: %u, %u\n", (unsigned int)pipeline_caps.rotation_flags, pipeline_caps.blend_flags);
+ // }
+
+ // TODO: params.output_hdr_metadata
+
+ // TODO:
+ // if (first surface to render)
+ // pipeline_param->output_background_color = 0xff000000; // black
+
+ va_status = vaCreateBuffer(va_dpy, context_id, VAProcPipelineParameterBufferType, sizeof(params), 1, &params, &buffer_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateBuffer failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaBeginPicture(va_dpy, context_id, output_surface_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaBeginPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaRenderPicture(va_dpy, context_id, &buffer_id, 1);
+ if(va_status != VA_STATUS_SUCCESS) {
+ vaEndPicture(va_dpy, context_id);
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaRenderPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaEndPicture(va_dpy, context_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaEndPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ // vaSyncBuffer(va_dpy, buffer_id, 1000 * 1000 * 1000);
+ // vaSyncSurface(va_dpy, input_surface_id);
+ // vaSyncSurface(va_dpy, output_surface_id);
+
+ done:
+ if(buffer_id)
+ vaDestroyBuffer(va_dpy, buffer_id);
+
+ if(input_surface_id)
+ vaDestroySurfaces(va_dpy, &input_surface_id, 1);
+
+ if(context_id)
+ vaDestroyContext(va_dpy, context_id);
+
+ if(config_id)
+ vaDestroyConfig(va_dpy, config_id);
+
+ return success;
+}
+
+bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, AVCodecContext *video_codec_context, AVFrame *video_frame) {
+ if(!image)
+ return false;
+
+ int texture_fourcc = 0;
+ int texture_num_planes = 0;
+ uint64_t texture_modifiers = 0;
+ if(!egl->eglExportDMABUFImageQueryMESA(egl->egl_display, image, &texture_fourcc, &texture_num_planes, &texture_modifiers)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: eglExportDMABUFImageQueryMESA failed\n");
+ return false;
+ }
+
+ if(texture_num_planes <= 0 || texture_num_planes > 8) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: expected planes size to be 0<planes<8 for drm buf, got %d planes\n", texture_num_planes);
+ return false;
+ }
+
+ int texture_fds[8];
+ int32_t texture_strides[8];
+ int32_t texture_offsets[8];
+
+ while(egl->eglGetError() != EGL_SUCCESS){}
+ if(!egl->eglExportDMABUFImageMESA(egl->egl_display, image, texture_fds, texture_strides, texture_offsets)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: eglExportDMABUFImageMESA failed, error: %d\n", egl->eglGetError());
+ return false;
+ }
+
+ int fds[8];
+ uint32_t offsets[8];
+ uint32_t pitches[8];
+ uint64_t modifiers[8];
+ for(int i = 0; i < texture_num_planes; ++i) {
+ fds[i] = texture_fds[i];
+ offsets[i] = texture_offsets[i];
+ pitches[i] = texture_strides[i];
+ modifiers[i] = texture_modifiers;
+
+ if(fds[i] == -1)
+ texture_num_planes = i;
+ }
+ const bool success = texture_num_planes > 0 && vaapi_copy_drm_planes_to_video_surface(video_codec_context, video_frame, source_pos, source_size, dest_pos, dest_size, texture_fourcc, source_size, fds, offsets, pitches, modifiers, texture_num_planes);
+
+ for(int i = 0; i < texture_num_planes; ++i) {
+ if(texture_fds[i] > 0) {
+ close(texture_fds[i]);
+ texture_fds[i] = -1;
+ }
+ }
+
+ return success;
+}
diff --git a/src/window_texture.c b/src/window_texture.c
index 0f4aa2c..8eef4c9 100644
--- a/src/window_texture.c
+++ b/src/window_texture.c
@@ -16,6 +16,7 @@ int window_texture_init(WindowTexture *window_texture, Display *display, Window
window_texture->display = display;
window_texture->window = window;
window_texture->pixmap = None;
+ window_texture->image = NULL;
window_texture->texture_id = 0;
window_texture->redirected = 0;
window_texture->egl = egl;
@@ -34,6 +35,11 @@ static void window_texture_cleanup(WindowTexture *self, int delete_texture) {
self->texture_id = 0;
}
+ if(self->image) {
+ self->egl->eglDestroyImage(self->egl->egl_display, self->image);
+ self->image = NULL;
+ }
+
if(self->pixmap) {
XFreePixmap(self->display, self->pixmap);
self->pixmap = None;
@@ -101,14 +107,14 @@ int window_texture_on_resize(WindowTexture *self) {
self->pixmap = pixmap;
self->texture_id = texture_id;
+ self->image = image;
cleanup:
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
- if(image)
- self->egl->eglDestroyImage(self->egl->egl_display, image);
-
if(result != 0) {
+ if(image)
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
if(texture_id != 0)
self->egl->glDeleteTextures(1, &texture_id);
if(pixmap)