From 112640282d236587dff0fd09e5137bfbc4cfec06 Mon Sep 17 00:00:00 2001 From: dec05eba Date: Sat, 26 Oct 2024 20:57:57 +0200 Subject: Add option to change output resolution (-s) --- src/capture/kms.c | 54 ++++++++++++++++++++++++++++++++++-------------- src/capture/nvfbc.c | 24 +++++++++++++++++++-- src/capture/portal.c | 36 +++++++++++++++++++++++--------- src/capture/xcomposite.c | 38 +++++++++++++++++++++------------- src/main.cpp | 51 ++++++++++++++++++++++++++------------------- src/utils.c | 21 ++++++++++++++++++- 6 files changed, 161 insertions(+), 63 deletions(-) (limited to 'src') diff --git a/src/capture/kms.c b/src/capture/kms.c index 8b16ec9..fdb1ced 100644 --- a/src/capture/kms.c +++ b/src/capture/kms.c @@ -214,8 +214,15 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c /* Disable vsync */ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0); - video_codec_context->width = FFALIGN(self->capture_size.x, 2); - video_codec_context->height = FFALIGN(self->capture_size.y, 2); + if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) { + self->params.output_resolution = self->capture_size; + video_codec_context->width = FFALIGN(self->capture_size.x, 2); + video_codec_context->height = FFALIGN(self->capture_size.y, 2); + } else { + self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution); + video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2); + video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2); + } frame->width = video_codec_context->width; frame->height = video_codec_context->height; @@ -429,7 +436,12 @@ static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *sel return cursor_drm_fd; } -static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation) { +static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation, vec2i output_size) { + const vec2d scale = { + self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x, + self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y + }; + const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA; const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height}; @@ -458,6 +470,9 @@ static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color break; } + cursor_pos.x *= scale.x; + cursor_pos.y *= scale.y; + cursor_pos.x += target_pos.x; cursor_pos.y += target_pos.y; @@ -487,32 +502,37 @@ static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image); self->params.egl->glEnable(GL_SCISSOR_TEST); - self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y); + self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y); gsr_color_conversion_draw(color_conversion, self->cursor_texture_id, - cursor_pos, cursor_size, + cursor_pos, (vec2i){cursor_size.x * scale.x, cursor_size.y * scale.y}, (vec2i){0, 0}, cursor_size, texture_rotation, cursor_texture_id_is_external); self->params.egl->glDisable(GL_SCISSOR_TEST); } -static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos) { +static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos, vec2i output_size) { if(!self->x11_cursor.visible) return; + const vec2d scale = { + self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x, + self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y + }; + gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy)); const vec2i cursor_pos = { - target_pos.x + self->x11_cursor.position.x - self->x11_cursor.hotspot.x - capture_pos.x, - target_pos.y + self->x11_cursor.position.y - self->x11_cursor.hotspot.y - capture_pos.y + target_pos.x + (self->x11_cursor.position.x - self->x11_cursor.hotspot.x) * scale.x - capture_pos.x, + target_pos.y + (self->x11_cursor.position.y - self->x11_cursor.hotspot.y) * scale.y - capture_pos.y }; self->params.egl->glEnable(GL_SCISSOR_TEST); - self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y); + self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y); gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id, - cursor_pos, self->x11_cursor.size, + cursor_pos, (vec2i){self->x11_cursor.size.x * scale.x, self->x11_cursor.size.y * scale.y}, (vec2i){0, 0}, self->x11_cursor.size, 0.0f, false); @@ -562,8 +582,12 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c " If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n"); } + const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0; + vec2i output_size = is_scaled ? self->params.output_resolution : self->capture_size; + output_size = scale_keep_aspect_ratio(self->capture_size, output_size); + const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation); - const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) }; + const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) }; self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h }); gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd); @@ -586,7 +610,7 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c pitches[i] = drm_fd->dma_buf[i].pitch; modifiers[i] = drm_fd->modifier; } - if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, self->capture_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) { + if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, output_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) { fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n"); self->fast_path_failed = true; } @@ -602,7 +626,7 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c } gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id, - target_pos, self->capture_size, + target_pos, output_size, capture_pos, self->capture_size, texture_rotation, self->external_texture_fallback); } @@ -613,9 +637,9 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c // the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device. if(self->is_x11) { const vec2i cursor_monitor_offset = self->capture_pos; - render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos); + render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos, output_size); } else if(cursor_drm_fd) { - render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation); + render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation, output_size); } } diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c index ee77a20..96f3894 100644 --- a/src/capture/nvfbc.c +++ b/src/capture/nvfbc.c @@ -240,6 +240,11 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) { } } + if(!self->capture_region) { + self->width = self->tracking_width; + self->height = self->tracking_height; + } + return 0; error_cleanup: @@ -351,6 +356,14 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec video_codec_context->height = FFALIGN(self->tracking_height, 2); } + if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) { + self->params.output_resolution = (vec2i){video_codec_context->width, video_codec_context->height}; + } else { + self->params.output_resolution = scale_keep_aspect_ratio((vec2i){video_codec_context->width, video_codec_context->height}, self->params.output_resolution); + video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2); + video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2); + } + frame->width = video_codec_context->width; frame->height = video_codec_context->height; @@ -390,6 +403,13 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color } } + const vec2i frame_size = (vec2i){self->width, self->height}; + const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0; + vec2i output_size = is_scaled ? self->params.output_resolution : frame_size; + output_size = scale_keep_aspect_ratio(frame_size, output_size); + + const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) }; + NVFBC_FRAME_GRAB_INFO frame_info; memset(&frame_info, 0, sizeof(frame_info)); @@ -412,8 +432,8 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color self->params.egl->glFinish(); gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex], - (vec2i){0, 0}, (vec2i){frame->width, frame->height}, - (vec2i){0, 0}, (vec2i){frame->width, frame->height}, + target_pos, (vec2i){output_size.x, output_size.y}, + (vec2i){0, 0}, frame_size, 0.0f, false); self->params.egl->glFlush(); diff --git a/src/capture/portal.c b/src/capture/portal.c index 9ab7e8b..b04d5e7 100644 --- a/src/capture/portal.c +++ b/src/capture/portal.c @@ -300,8 +300,15 @@ static int gsr_capture_portal_start(gsr_capture *cap, AVCodecContext *video_code /* Disable vsync */ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0); - video_codec_context->width = FFALIGN(self->capture_size.x, 2); - video_codec_context->height = FFALIGN(self->capture_size.y, 2); + if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) { + self->params.output_resolution = self->capture_size; + video_codec_context->width = FFALIGN(self->capture_size.x, 2); + video_codec_context->height = FFALIGN(self->capture_size.y, 2); + } else { + self->params.output_resolution = scale_keep_aspect_ratio(self->capture_size, self->params.output_resolution); + video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2); + video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2); + } frame->width = video_codec_context->width; frame->height = video_codec_context->height; @@ -334,8 +341,12 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo } else { return 0; } + + const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0; + vec2i output_size = is_scaled ? self->params.output_resolution : self->capture_size; + output_size = scale_keep_aspect_ratio(self->capture_size, output_size); - const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) }; + const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) }; self->params.egl->glFlush(); self->params.egl->glFinish(); @@ -354,7 +365,7 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo pitches[i] = self->dmabuf_data[i].stride; modifiers[i] = pipewire_modifiers; } - if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, self->capture_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) { + if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, output_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) { fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n"); self->fast_path_failed = true; } @@ -364,21 +375,26 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo if(self->fast_path_failed) { gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id, - target_pos, self->capture_size, + target_pos, output_size, (vec2i){region.x, region.y}, self->capture_size, 0.0f, using_external_image); } - if(self->params.record_cursor) { + if(self->params.record_cursor && self->texture_map.cursor_texture_id > 0 && cursor_region.width > 0) { + const vec2d scale = { + self->capture_size.x == 0 ? 0 : (double)output_size.x / (double)self->capture_size.x, + self->capture_size.y == 0 ? 0 : (double)output_size.y / (double)self->capture_size.y + }; + const vec2i cursor_pos = { - target_pos.x + cursor_region.x, - target_pos.y + cursor_region.y + target_pos.x + (cursor_region.x * scale.x), + target_pos.y + (cursor_region.y * scale.y) }; self->params.egl->glEnable(GL_SCISSOR_TEST); - self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y); + self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y); gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id, - (vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width, cursor_region.height}, + (vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width * scale.x, cursor_region.height * scale.y}, (vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height}, 0.0f, false); self->params.egl->glDisable(GL_SCISSOR_TEST); diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c index 9e208d6..6a3be16 100644 --- a/src/capture/xcomposite.c +++ b/src/capture/xcomposite.c @@ -113,13 +113,14 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_ self->params.egl->glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &self->texture_size.y); self->params.egl->glBindTexture(GL_TEXTURE_2D, 0); - vec2i video_size = self->texture_size; - - if(self->params.region_size.x > 0 && self->params.region_size.y > 0) - video_size = self->params.region_size; - - video_codec_context->width = FFALIGN(video_size.x, 2); - video_codec_context->height = FFALIGN(video_size.y, 2); + if(self->params.output_resolution.x == 0 && self->params.output_resolution.y == 0) { + self->params.output_resolution = self->texture_size; + video_codec_context->width = FFALIGN(self->texture_size.x, 2); + video_codec_context->height = FFALIGN(self->texture_size.y, 2); + } else { + video_codec_context->width = FFALIGN(self->params.output_resolution.x, 2); + video_codec_context->height = FFALIGN(self->params.output_resolution.y, 2); + } frame->width = video_codec_context->width; frame->height = video_codec_context->height; @@ -257,14 +258,18 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_ gsr_color_conversion_clear(color_conversion); } - const vec2i target_pos = { max_int(0, frame->width / 2 - self->texture_size.x / 2), max_int(0, frame->height / 2 - self->texture_size.y / 2) }; + const bool is_scaled = self->params.output_resolution.x > 0 && self->params.output_resolution.y > 0; + vec2i output_size = is_scaled ? self->params.output_resolution : self->texture_size; + output_size = scale_keep_aspect_ratio(self->texture_size, output_size); + + const vec2i target_pos = { max_int(0, frame->width / 2 - output_size.x / 2), max_int(0, frame->height / 2 - output_size.y / 2) }; self->params.egl->glFlush(); self->params.egl->glFinish(); /* Fast opengl free path */ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) { - if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, self->texture_size, self->video_codec_context, frame)) { + if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, output_size, self->video_codec_context, frame)) { fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n"); self->fast_path_failed = true; } @@ -274,24 +279,29 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_ if(self->fast_path_failed) { gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture), - target_pos, self->texture_size, + target_pos, output_size, (vec2i){0, 0}, self->texture_size, 0.0f, false); } if(self->params.record_cursor && self->cursor.visible) { + const vec2d scale = { + self->texture_size.x == 0 ? 0 : (double)output_size.x / (double)self->texture_size.x, + self->texture_size.y == 0 ? 0 : (double)output_size.y / (double)self->texture_size.y + }; + gsr_cursor_tick(&self->cursor, self->window); const vec2i cursor_pos = { - target_pos.x + self->cursor.position.x - self->cursor.hotspot.x, - target_pos.y + self->cursor.position.y - self->cursor.hotspot.y + target_pos.x + (self->cursor.position.x - self->cursor.hotspot.x) * scale.x, + target_pos.y + (self->cursor.position.y - self->cursor.hotspot.y) * scale.y }; self->params.egl->glEnable(GL_SCISSOR_TEST); - self->params.egl->glScissor(target_pos.x, target_pos.y, self->texture_size.x, self->texture_size.y); + self->params.egl->glScissor(target_pos.x, target_pos.y, output_size.x, output_size.y); gsr_color_conversion_draw(color_conversion, self->cursor.texture_id, - cursor_pos, self->cursor.size, + cursor_pos, (vec2i){self->cursor.size.x * scale.x, self->cursor.size.y * scale.y}, (vec2i){0, 0}, self->cursor.size, 0.0f, false); diff --git a/src/main.cpp b/src/main.cpp index a47e4fc..f4c1086 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1082,7 +1082,9 @@ static void usage_full() { fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n"); fprintf(stderr, " Only containers that support h264, hevc, av1, vp8 or vp9 are supported, which means that only mp4, mkv, flv, webm (and some others) are supported.\n"); fprintf(stderr, "\n"); - fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. This option is only supported (and required) when -w is \"focused\".\n"); + fprintf(stderr, " -s The output resolution of the video in the format WxH, for example 1920x1080. If this is 0x0 then the original resolution is used. Optional, except when -w is \"focused\".\n"); + fprintf(stderr, " Note: the captured content is scaled to this size. The output resolution might not be exactly as specified by this option. The original aspect ratio is respected so the resolution will match that.\n"); + fprintf(stderr, " The video encoder might also need to add padding, which will result in black bars on the sides of the video. This is especially an issue on AMD.\n"); fprintf(stderr, "\n"); fprintf(stderr, " -f Frame rate to record at. Recording will only capture frames at this target frame rate.\n"); fprintf(stderr, " For constant frame rate mode this option is the frame rate every frame will be captured at and if the capture frame rate is below this target frame rate then the frames will be duplicated.\n"); @@ -1198,6 +1200,7 @@ static void usage_full() { fprintf(stderr, "\n"); fprintf(stderr, "EXAMPLES:\n"); fprintf(stderr, " %s -w screen -f 60 -a default_output -o \"$HOME/Videos/video.mp4\"\n", program_name); + fprintf(stderr, " %s -w screen -f 60 -a default_output -a default_input -o \"$HOME/Videos/video.mp4\"\n", program_name); fprintf(stderr, " %s -w screen -f 60 -a \"default_output|default_input\" -o \"$HOME/Videos/video.mp4\"\n", program_name); fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name); fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name); @@ -2075,11 +2078,10 @@ static void list_audio_devices_command() { _exit(0); } -static gsr_capture* create_capture_impl(std::string &window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range, +static gsr_capture* create_capture_impl(std::string &window_str, vec2i output_resolution, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range, bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath, gsr_color_depth color_depth) { - vec2i region_size = { 0, 0 }; Window src_window_id = None; bool follow_focused = false; @@ -2090,18 +2092,8 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr _exit(2); } - if(!screen_region) { - fprintf(stderr, "Error: option -s is required when using -w focused\n"); - usage(); - } - - if(sscanf(screen_region, "%dx%d", ®ion_size.x, ®ion_size.y) != 2) { - fprintf(stderr, "Error: invalid value for option -s '%s', expected a value in format WxH\n", screen_region); - usage(); - } - - if(region_size.x <= 0 || region_size.y <= 0) { - fprintf(stderr, "Error: invalud value for option -s '%s', expected width and height to be greater than 0\n", screen_region); + if(output_resolution.x <= 0 || output_resolution.y <= 0) { + fprintf(stderr, "Error: invalid value for option -s '%dx%d' when using -w focused option. expected width and height to be greater than 0\n", output_resolution.x, output_resolution.y); usage(); } @@ -2121,6 +2113,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr portal_params.record_cursor = record_cursor; portal_params.restore_portal_session = restore_portal_session; portal_params.portal_session_token_filepath = portal_session_token_filepath; + portal_params.output_resolution = output_resolution; capture = gsr_capture_portal_create(&portal_params); if(!capture) _exit(1); @@ -2195,6 +2188,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr nvfbc_params.color_range = color_range; nvfbc_params.record_cursor = record_cursor; nvfbc_params.use_software_video_encoder = use_software_video_encoder; + nvfbc_params.output_resolution = output_resolution; capture = gsr_capture_nvfbc_create(&nvfbc_params); if(!capture) _exit(1); @@ -2207,6 +2201,7 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr kms_params.record_cursor = record_cursor; kms_params.hdr = video_codec_is_hdr(video_codec); kms_params.fps = fps; + kms_params.output_resolution = output_resolution; capture = gsr_capture_kms_create(&kms_params); if(!capture) _exit(1); @@ -2230,10 +2225,10 @@ static gsr_capture* create_capture_impl(std::string &window_str, const char *scr xcomposite_params.egl = egl; xcomposite_params.window = src_window_id; xcomposite_params.follow_focused = follow_focused; - xcomposite_params.region_size = region_size; xcomposite_params.color_range = color_range; xcomposite_params.record_cursor = record_cursor; xcomposite_params.color_depth = color_depth; + xcomposite_params.output_resolution = output_resolution; capture = gsr_capture_xcomposite_create(&xcomposite_params); if(!capture) _exit(1); @@ -2607,6 +2602,8 @@ static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec, } int main(int argc, char **argv) { + setlocale(LC_ALL, "C"); // Sigh... stupid C + signal(SIGINT, stop_handler); signal(SIGUSR1, save_replay_handler); signal(SIGUSR2, toggle_pause_handler); @@ -3144,13 +3141,25 @@ int main(int argc, char **argv) { usage(); } - const char *screen_region = args["-s"].value(); - - if(screen_region && strcmp(window_str.c_str(), "focused") != 0) { - fprintf(stderr, "Error: option -s is only available when using -w focused\n"); + const char *output_resolution_str = args["-s"].value(); + if(!output_resolution_str && strcmp(window_str.c_str(), "focused") == 0) { + fprintf(stderr, "Error: option -s is required when using -w focused option\n"); usage(); } + vec2i output_resolution = {0, 0}; + if(output_resolution_str) { + if(sscanf(output_resolution_str, "%dx%d", &output_resolution.x, &output_resolution.y) != 2) { + fprintf(stderr, "Error: invalid value for option -s '%s', expected a value in format WxH\n", output_resolution_str); + usage(); + } + + if(output_resolution.x < 0 || output_resolution.y < 0) { + fprintf(stderr, "Error: invalud value for option -s '%s', expected width and height to be greater or equal to 0\n", output_resolution_str); + usage(); + } + } + bool is_livestream = false; const char *filename = args["-o"].value(); if(filename) { @@ -3235,7 +3244,7 @@ int main(int argc, char **argv) { const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power); const gsr_color_depth color_depth = video_codec_to_bit_depth(video_codec); - gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth); + gsr_capture *capture = create_capture_impl(window_str, output_resolution, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth); // (Some?) livestreaming services require at least one audio track to work. // If not audio is provided then create one silent audio track. diff --git a/src/utils.c b/src/utils.c index c28208d..bad16d9 100644 --- a/src/utils.c +++ b/src/utils.c @@ -738,6 +738,8 @@ bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, .height = dest_size.y }; + const bool scaled = dest_size.x != source_size.x || dest_size.y != source_size.y; + // Copying a surface to another surface will automatically perform the color conversion. Thanks vaapi! VAProcPipelineParameterBuffer params = {0}; params.surface = input_surface_id; @@ -745,7 +747,7 @@ bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, params.surface_region = &source_region; params.output_region = &output_region; params.output_background_color = 0; - params.filter_flags = VA_FRAME_PICTURE; + params.filter_flags = scaled ? (VA_FILTER_SCALING_HQ | VA_FILTER_INTERPOLATION_BILINEAR) : 0; params.pipeline_flags = VA_PROC_PIPELINE_FAST; params.input_color_properties.colour_primaries = 1; @@ -877,3 +879,20 @@ bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i s return success; } + +vec2i scale_keep_aspect_ratio(vec2i from, vec2i to) { + if(from.x == 0 || from.y == 0) + return (vec2i){0, 0}; + + const double height_to_width_ratio = (double)from.y / (double)from.x; + from.x = to.x; + from.y = from.x * height_to_width_ratio; + + if(from.y > to.y) { + const double width_height_ratio = (double)from.x / (double)from.y; + from.y = to.y; + from.x = from.y * width_height_ratio; + } + + return from; +} -- cgit v1.2.3