diff options
-rw-r--r-- | README.md | 7 | ||||
-rw-r--r-- | include/NvFBCLibrary.hpp | 8 | ||||
-rw-r--r-- | src/main.cpp | 51 |
3 files changed, 43 insertions, 23 deletions
@@ -8,7 +8,8 @@ where only the last few seconds are saved. ## Note Might now work when using a compositor such as picom when using the glx backend. Using the xrender backend with picom fixes this issue.\ Does not work when using gtk client side decorations (such as on Pop OS). Either disable those (if possible), install gtk-nocsd or record the whole monitor/screen if you have NvFBC.\ -NvFBC doesn't work with PRIME, so if you are using PRIME then you can't record the monitor/screen, you have to record a single window. +NvFBC doesn't work with PRIME, so if you are using PRIME then you can't record the monitor/screen, you have to record a single window.\ +If you are using a variable refresh rate monitor, then choose to record "screen-direct". This will allow variable refresh rate to work when recording fullscreen applications. Note that some applications such as mpv will not work in fullscreen mode. A fix is being developed for this. # Performance When recording a 4k game, fps drops from 30 to 7 when using OBS Studio, however when using this screen recorder @@ -42,4 +43,6 @@ libraries at compile-time. * Clean up the code! * Fix segfault in debug mode (happens because audio codec becomes NULL?) * Dynamically change bitrate/resolution to match desired fps. This would be helpful when streaming for example, where the encode output speed also depends on upload speed to the streaming service. -* Show cursor when recording a single window. Currently the cursor can only be recorded when recording the monitor/screen (nvfbc).
\ No newline at end of file +* Show cursor when recording. Currently the cursor is not visible when recording a window and it's disabled when recording screen-direct to allow direct nvfbc capture for fullscreen windows, which allows for better performance and variable refresh rate monitors to work. +* Implement opengl injection to capture texture. This fixes composition issues and (VRR) without having to use NvFBC direct capture. +* Always use direct capture with NvFBC once the capture issue in mpv fullscreen has been resolved.
\ No newline at end of file diff --git a/include/NvFBCLibrary.hpp b/include/NvFBCLibrary.hpp index 1d7402e..e26264d 100644 --- a/include/NvFBCLibrary.hpp +++ b/include/NvFBCLibrary.hpp @@ -58,7 +58,7 @@ public: } // If |display_to_capture| is "screen", then the entire x11 screen is captured (all displays) - bool create(const char *display_to_capture, uint32_t fps, /*out*/ uint32_t *display_width, /*out*/ uint32_t *display_height, uint32_t x = 0, uint32_t y = 0, uint32_t width = 0, uint32_t height = 0) { + bool create(const char *display_to_capture, uint32_t fps, /*out*/ uint32_t *display_width, /*out*/ uint32_t *display_height, uint32_t x = 0, uint32_t y = 0, uint32_t width = 0, uint32_t height = 0, bool direct_capture = false) { if(!library || !display_to_capture || !display_width || !display_height || fbc_handle_created) return false; @@ -122,7 +122,7 @@ public: memset(&create_capture_params, 0, sizeof(create_capture_params)); create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER; create_capture_params.eCaptureType = NVFBC_CAPTURE_SHARED_CUDA; - create_capture_params.bWithCursor = NVFBC_TRUE; + create_capture_params.bWithCursor = direct_capture ? NVFBC_FALSE : NVFBC_TRUE; if(capture_region) { create_capture_params.captureBox = { x, y, width, height }; *display_width = width; @@ -131,8 +131,8 @@ public: create_capture_params.eTrackingType = tracking_type; create_capture_params.dwSamplingRateMs = 1000 / fps; // Cant use this, it breaks when a compositor is used - //create_capture_params.bAllowDirectCapture = NVFBC_TRUE; - //create_capture_params.bPushModel = NVFBC_TRUE; + create_capture_params.bAllowDirectCapture = direct_capture ? NVFBC_TRUE : NVFBC_FALSE; + create_capture_params.bPushModel = direct_capture ? NVFBC_TRUE : NVFBC_FALSE; if(tracking_type == NVFBC_TRACKING_OUTPUT) create_capture_params.dwOutputId = output_id; diff --git a/src/main.cpp b/src/main.cpp index 11bc57d..d492878 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -61,6 +61,9 @@ extern "C" { //#include <CL/cl.h> +// TODO: REMOVE!!! +static bool direct_capture_sound_hack = false; + static thread_local char av_error_buffer[AV_ERROR_MAX_STRING_SIZE]; static char* av_error_to_string(int err) { @@ -328,14 +331,18 @@ static void receive_frames(AVCodecContext *av_codec_context, AVStream *stream, av_packet.size = 0; int res = avcodec_receive_packet(av_codec_context, &av_packet); if (res == 0) { // we have a packet, send the packet to the muxer - //av_packet_rescale_ts(&av_packet, av_codec_context->time_base, - // stream->time_base); - if(av_packet.pts != AV_NOPTS_VALUE) - av_packet.pts = av_rescale_q(av_packet.pts, av_codec_context->time_base, stream->time_base); - if(av_packet.dts != AV_NOPTS_VALUE) - av_packet.dts = av_rescale_q(av_packet.dts, av_codec_context->time_base, stream->time_base); + if(direct_capture_sound_hack) { + av_packet_rescale_ts(&av_packet, av_codec_context->time_base, stream->time_base); + //av_packet.dts = AV_NOPTS_VALUE; + } else { + if(av_packet.pts != AV_NOPTS_VALUE) + av_packet.pts = av_rescale_q(av_packet.pts, av_codec_context->time_base, stream->time_base); + if(av_packet.dts != AV_NOPTS_VALUE) + av_packet.dts = av_rescale_q(av_packet.dts, av_codec_context->time_base, stream->time_base); + } + av_packet.stream_index = stream->index; - //av_packet.dts = AV_NOPTS_VALUE; + std::lock_guard<std::mutex> lock(write_output_mutex); if(replay_buffer_size_secs != -1) { double time_now = glfwGetTime(); @@ -629,7 +636,8 @@ static void close_video(AVStream *video_stream, AVFrame *frame) { static void usage() { fprintf(stderr, "usage: gpu-screen-recorder -w <window_id> -c <container_format> -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-o <output_file>]\n"); fprintf(stderr, "OPTIONS:\n"); - fprintf(stderr, " -w Window to record or a display or \"screen\". The display is the display name in xrandr and if \"screen\" is selected then all displays are recorded and they are recorded in h265 (aka hevc). Recording a display requires a gpu with NvFBC support.\n"); + fprintf(stderr, " -w Window to record or a display, \"screen\" or \"screen-direct\". The display is the display name in xrandr and if \"screen\" or \"screen-direct\" is selected then all displays are recorded and they are recorded in h265 (aka hevc)." + "\"screen-direct\" skips one texture copy for fullscreen applications so it may lead to better performance and it works with VRR monitors when recording fullscreen application but may break some applications, such as mpv in fullscreen mode. Recording a display requires a gpu with NvFBC support.\n"); fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. Usually you want to set this to the size of the window. Optional, by default the size of the window, monitor or screen is used (which is passed to -w).\n"); fprintf(stderr, " -c Container format for output file, for example mp4, or flv.\n"); fprintf(stderr, " -f Framerate to record at. Clamped to [1,250].\n"); @@ -792,7 +800,13 @@ int main(int argc, char **argv) { if(!nv_fbc_library.load()) return 1; - if(!nv_fbc_library.create(window_str, fps, &window_width, &window_height, region_x, region_y, region_width, region_height)) + const char *capture_target = window_str; + const bool direct_capture = strcmp(window_str, "screen-direct") == 0; + direct_capture_sound_hack = direct_capture; + if(direct_capture) + capture_target = "screen"; + + if(!nv_fbc_library.create(capture_target, fps, &window_width, &window_height, region_x, region_y, region_width, region_height, direct_capture)) return 1; } else { errno = 0; @@ -923,7 +937,7 @@ int main(int argc, char **argv) { av_format_context->flags |= AVFMT_FLAG_GENPTS; const AVOutputFormat *output_format = av_format_context->oformat; - bool use_hevc = strcmp(window_str, "screen") == 0; + bool use_hevc = strcmp(window_str, "screen") == 0 || strcmp(window_str, "screen-direct") == 0; if(use_hevc && strcmp(container_format, "flv") == 0) { use_hevc = false; fprintf(stderr, "Warning: hevc is not compatible with flv, falling back to h264 instead.\n"); @@ -1066,6 +1080,7 @@ int main(int argc, char **argv) { bool frames_erased = false; SoundDevice sound_device; + uint8_t *audio_frame_buf; if(audio_input_arg.value) { if(sound_device_get_by_name(&sound_device, audio_input_arg.value, audio_codec_context->channels, audio_codec_context->frame_size) != 0) { fprintf(stderr, "failed to get 'pulse' sound device\n"); @@ -1073,7 +1088,7 @@ int main(int argc, char **argv) { } int audio_buffer_size = av_samples_get_buffer_size(NULL, audio_codec_context->channels, audio_codec_context->frame_size, audio_codec_context->sample_fmt, 1); - uint8_t *audio_frame_buf = (uint8_t *)av_malloc(audio_buffer_size); + audio_frame_buf = (uint8_t *)av_malloc(audio_buffer_size); avcodec_fill_audio_frame(audio_frame, audio_codec_context->channels, audio_codec_context->sample_fmt, (const uint8_t*)audio_frame_buf, audio_buffer_size, 1); audio_thread = std::thread([record_start_time, replay_buffer_size_secs, &frame_data_queue, &frames_erased, audio_codec_context, &frame_count](AVFormatContext *av_format_context, AVStream *audio_stream, uint8_t *audio_frame_buf, SoundDevice *sound_device, AVFrame *audio_frame, std::mutex *write_output_mutex) mutable { @@ -1097,18 +1112,20 @@ int main(int argc, char **argv) { void *sound_buffer; int sound_buffer_size = sound_device_read_next_chunk(sound_device, &sound_buffer); if(sound_buffer_size >= 0) { - // TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format? - swr_convert(swr, &audio_frame_buf, audio_frame->nb_samples, (const uint8_t**)&sound_buffer, sound_buffer_size); - audio_frame->extended_data = &audio_frame_buf; - const int64_t pts = frame_count; - if(pts == prev_frame_count) { + if(!direct_capture_sound_hack && pts == prev_frame_count) { prev_frame_count = pts; continue; } prev_frame_count = pts; - audio_frame->pts = pts; + // TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format? + swr_convert(swr, &audio_frame_buf, audio_frame->nb_samples, (const uint8_t**)&sound_buffer, sound_buffer_size); + audio_frame->extended_data = &audio_frame_buf; + + if(!direct_capture_sound_hack) + audio_frame->pts = pts; + int ret = avcodec_send_frame(audio_codec_context, audio_frame); if(ret < 0){ printf("Failed to encode!\n"); |