aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md156
-rw-r--r--TODO84
-rw-r--r--external/nvEncodeAPI.h4285
-rw-r--r--extra/gpu-screen-recorder.env6
-rw-r--r--extra/gpu-screen-recorder.service7
-rwxr-xr-xextra/install_preserve_video_memory.sh8
-rwxr-xr-xextra/meson_post_install.sh16
-rw-r--r--include/capture/capture.h20
-rw-r--r--include/capture/kms.h2
-rw-r--r--include/capture/nvfbc.h3
-rw-r--r--include/capture/portal.h18
-rw-r--r--include/capture/xcomposite.h2
-rw-r--r--include/codec_query/codec_query.h23
-rw-r--r--include/codec_query/nvenc.h8
-rw-r--r--include/codec_query/vaapi.h8
-rw-r--r--include/codec_query/vulkan.h8
-rw-r--r--include/color_conversion.h5
-rw-r--r--include/cursor.h4
-rw-r--r--include/damage.h51
-rw-r--r--include/dbus.h45
-rw-r--r--include/defs.h3
-rw-r--r--include/egl.h55
-rw-r--r--include/encoder/video/cuda.h2
-rw-r--r--include/encoder/video/software.h2
-rw-r--r--include/encoder/video/vaapi.h2
-rw-r--r--include/encoder/video/video.h4
-rw-r--r--include/encoder/video/vulkan.h15
-rw-r--r--include/pipewire.h112
-rw-r--r--include/sound.hpp8
-rw-r--r--include/utils.h18
-rw-r--r--include/window_texture.h1
-rwxr-xr-xinstall.sh3
-rw-r--r--kms/client/kms_client.c50
-rw-r--r--kms/kms_shared.h25
-rw-r--r--kms/server/kms_server.c148
-rw-r--r--meson.build34
-rw-r--r--meson_options.txt6
-rw-r--r--project.conf15
-rwxr-xr-xscripts/interactive.sh4
-rwxr-xr-xscripts/record-application-name.sh2
-rwxr-xr-xscripts/record-save-application-name.sh2
-rwxr-xr-xscripts/replay-application-name.sh2
-rwxr-xr-xscripts/replay.sh6
-rwxr-xr-xscripts/start-recording.sh5
-rwxr-xr-xscripts/start-replay.sh3
-rwxr-xr-xscripts/start-stop-recording.sh10
-rwxr-xr-xscripts/toggle-recording-selected.sh2
-rwxr-xr-xscripts/twitch-stream-local-copy.sh2
-rwxr-xr-xscripts/twitch-stream.sh2
-rwxr-xr-xscripts/youtube-hls-stream.sh2
-rw-r--r--src/capture/capture.c22
-rw-r--r--src/capture/kms.c724
-rw-r--r--src/capture/nvfbc.c226
-rw-r--r--src/capture/portal.c458
-rw-r--r--src/capture/xcomposite.c242
-rw-r--r--src/codec_query/nvenc.c235
-rw-r--r--src/codec_query/vaapi.c203
-rw-r--r--src/codec_query/vulkan.c156
-rw-r--r--src/cursor.c62
-rw-r--r--src/damage.c324
-rw-r--r--src/dbus.c876
-rw-r--r--src/egl.c144
-rw-r--r--src/encoder/video/cuda.c67
-rw-r--r--src/encoder/video/software.c31
-rw-r--r--src/encoder/video/vaapi.c108
-rw-r--r--src/encoder/video/video.c4
-rw-r--r--src/encoder/video/vulkan.c313
-rw-r--r--src/main.cpp2359
-rw-r--r--src/overclock.c4
-rw-r--r--src/pipewire.c788
-rw-r--r--src/sound.cpp66
-rw-r--r--src/utils.c452
-rw-r--r--src/window_texture.c12
73 files changed, 11359 insertions, 1821 deletions
diff --git a/README.md b/README.md
index 097a1c2..4b9f2e1 100644
--- a/README.md
+++ b/README.md
@@ -9,29 +9,32 @@ where only the last few minutes are saved.
Supported video codecs:
* H264 (default)
-* HEVC
-* AV1 (not currently supported on NVIDIA if you use GPU Screen Recorder flatpak)
+* HEVC (Optionally with HDR)
+* AV1 (Optionally with HDR. Not currently supported on NVIDIA if you use GPU Screen Recorder flatpak)
+* VP8
+* VP9
Supported audio codecs:
* Opus (default)
* AAC
## Note
-This software works with x11 and wayland, but when using Wayland then only monitors can be recorded.
+This software works on X11 and Wayland on AMD, Intel and NVIDIA.
### TEMPORARY ISSUES
1) screen-direct capture has been temporary disabled as it causes issues with stuttering. This might be a nvfbc bug.
2) Videos are in variable framerate format. Use MPV to play such videos, otherwise you might experience stuttering in the video if you are using a buggy video player. You can try saving the video into a .mkv file instead as some software may have better support for .mkv files (such as kdenlive). You can use the "-fm cfr" option to to use constant framerate mode.
-3) HDR capture is supported (on wayland), but all GPU drivers have bugs that ignore HDR metadata so the HDR metadata will be missing in the video file. I will eventually patch the video file to workaround these GPU driver issues.
-4) FLAC audio codec is disabled at the moment because of temporary issues.
+3) FLAC audio codec is disabled at the moment because of temporary issues.
### AMD/Intel/Wayland root permission
-When recording a window under AMD/Intel no special user permission is required, however when recording a monitor (or when using wayland) the program needs root permission (to access KMS).\
+When recording a window or when using the `-w portal` option under AMD/Intel no special user permission is required,
+however when recording a monitor (or when using wayland) the program needs root permission (to access KMS).\
This is safe in GPU Screen Recorder as the part that needs root access has been moved to its own small program that only does one thing.\
For you as a user this only means that if you installed GPU Screen Recorder as a flatpak then a prompt asking for root password will show up when you start recording.
# Performance
On a system with a i5 4690k CPU and a GTX 1080 GPU:\
When recording Legend of Zelda Breath of the Wild at 4k, fps drops from 30 to 7 when using OBS Studio + nvenc, however when using this screen recorder the fps remains at 30.\
-When recording GTA V at 4k on highest settings, fps drops from 60 to 23 when using obs-nvfbc + nvenc, however when using this screen recorder the fps only drops to 58. The quality is also much better when using gpu screen recorder.\
+When recording GTA V at 4k on highest settings, fps drops from 60 to 23 when using obs-nvfbc + nvenc, however when using this screen recorder the fps only drops to 58.\
GPU Screen Recorder also produces much smoother videos than OBS when GPU utilization is close to 100%, see comparison here: [https://www.youtube.com/watch?v=zfj4sNVLLLg](https://www.youtube.com/watch?v=zfj4sNVLLLg).\
+GPU Screen Recorder has much better performance than OBS Studio even with version 30.2 that does "zero-copy" recording and encoding, see: [https://www.youtube.com/watch?v=jdroRjibsDw](https://www.youtube.com/watch?v=jdroRjibsDw).\
It is recommended to save the video to a SSD because of the large file size, which a slow HDD might not be fast enough to handle. Using variable framerate mode (-fm vfr) which is the default is also recommended as this reduces encoding load. Ultra quality is also overkill most of the time, very high (the default) or lower quality is usually enough.
## Note about optimal performance on NVIDIA
NVIDIA driver has a "feature" (read: bug) where it will downclock memory transfer rate when a program uses cuda (or nvenc, which uses cuda), such as GPU Screen Recorder. To work around this bug, GPU Screen Recorder can overclock your GPU memory transfer rate to it's normal optimal level.\
@@ -52,55 +55,63 @@ from one of the official sources before reporting it as an issue.
If you install GPU Screen Recorder flatpak, which is the gtk gui version then you can still run GPU Screen Recorder command line by using the flatpak command option, for example `flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder -w screen -f 60 -o video.mp4`. Note that if you want to record your monitor on AMD/Intel then you need to install the flatpak system-wide (like so: `flatpak install flathub --system com.dec05eba.gpu_screen_recorder`).
# Dependencies
-GPU Screen Recorder uses meson build system so you need to install `meson`. There are additional dependencies depending on your graphics card:
-## AMD
-libglvnd (which provides libgl and libegl)\
-mesa\
-ffmpeg (libavcodec, libavformat, libavutil, libswresample, libavfilter)\
-x11 (libx11, libxcomposite, libxrandr, libxfixes, libxdamage, libxi)\
-libpulse\
-vaapi (libva, libva-mesa-driver)\
-libdrm\
-libcap\
-wayland-client
-## Intel
-libglvnd (which provides libgl and libegl)\
-mesa\
-ffmpeg (libavcodec, libavformat, libavutil, libswresample, libavfilter)\
-x11 (libx11, libxcomposite, libxrandr, libxfixes, libxdamage, libxi)\
-libpulse\
-vaapi (libva, intel-media-driver/libva-intel-driver)\
-libdrm\
-libcap\
-wayland-client
-## NVIDIA
-libglvnd (which provides libgl and libegl)\
-ffmpeg (libavcodec, libavformat, libavutil, libswresample, libavfilter)\
-x11 (libx11, libxcomposite, libxrandr, libxfixes, libxdamage, libxi)\
-libpulse\
-cuda runtime (libcuda.so.1) (libnvidia-compute)\
-nvenc (libnvidia-encode)\
-libva\
-libdrm\
-libcap\
-wayland-client\
-nvfbc (libnvidia-fbc1, when recording the screen on x11)\
-xnvctrl (libxnvctrl0, when using the `-oc` option)
+GPU Screen Recorder uses meson build system so you need to install `meson` to build GPU Screen Recorder.
+
+## Build dependencies
+These are the dependencies needed to build GPU Screen Recorder:
+
+* libglvnd (which provides libgl, libglx and libegl)
+* vulkan-headers
+* ffmpeg (libavcodec, libavformat, libavutil, libswresample, libavfilter)
+* x11 (libx11, libxcomposite, libxrandr, libxfixes, libxdamage)
+* libpulse
+* libva (and libva-drm)
+* libdrm
+* libcap
+* wayland-client
+* wayland-egl
+
+## Runtime dependencies
+There are also additional dependencies needed at runtime depending on your GPU vendor:
+
+### AMD
+* mesa
+* vaapi (libva-mesa-driver)
+
+### Intel
+* mesa
+* vaapi (intel-media-driver/libva-intel-driver/linux-firmware, depending on which intel iGPU you have)
+
+### NVIDIA
+* cuda runtime (libcuda.so.1) (libnvidia-compute)
+* nvenc (libnvidia-encode)
+* nvfbc (libnvidia-fbc1, when recording the screen on x11)
+* xnvctrl (libxnvctrl0, when using the `-oc` option)
+
+## Optional dependencies
+When compiling GPU Screen Recorder with portal support (`-Dportal=true`, which is enabled by default) these dependencies are also needed:
+* libdbus
+* libpipewire (and libspa which is usually part of libpipewire)
# How to use
-Run `gpu-screen-recorder --help` to see all options.
+Run `gpu-screen-recorder --help` to see all options and also examples.
## Recording
-Here is an example of how to record all monitors and the default audio output: `gpu-screen-recorder -w screen -f 60 -a "$(pactl get-default-sink).monitor" -o ~/Videos/test_video.mp4` then stop the screen recorder with `Ctrl+C`, which will also save the recording. You can record a single monitor if you change `-w screen` to the name of a monitor, which you can find if you run the `xrandr`. An example of a monitor name is HDMI-1.
+Here is an example of how to record your monitor and the default audio output: `gpu-screen-recorder -w screen -f 60 -a default_output -o ~/Videos/test_video.mp4`.
+Yyou can stop and save the recording with `Ctrl+C` or by running `killall -SIGINT gpu-screen-recorder`.
+You can see a list of monitor names to record if you use an invalid monitor name, for example: `gpu-screen-recorder -w invalid -f 60 -o video.mp4`.
## Streaming
-Streaming works the same as recording, but the `-o` argument should be path to the live streaming service you want to use (including your live streaming key). Take a look at scripts/twitch-stream.sh to see an example of how to stream to twitch.
+Streaming works the same as recording, but the `-o` argument should be path to the live streaming service you want to use (including your live streaming key). Take a look at `scripts/twitch-stream.sh` to see an example of how to stream to twitch.
## Replay mode
Run `gpu-screen-recorder` with the `-c mp4` and `-r` option, for example: `gpu-screen-recorder -w screen -f 60 -r 30 -c mp4 -o ~/Videos`. Note that in this case, `-o` should point to a directory.\
-If `-mf yes` is set, replays are save in folders based on the date.
+If `-df yes` is set, replays are save in folders based on the date.
+The file path to the saved replay is output to stdout. All other output from GPU Screen Recorder are output to stderr.
+You can also use the `-sc` option to specify a script that should be run (asynchronously) when the video has been saved and the script will have access to the location of the saved file as its first argument.
+This can be used for example to show a notification when a replay has been saved, to rename the video with a title that matches the game played (see `scripts/record-save-application-name.sh` as an example on how to do this on X11) or to re-encode the video.\
+The replay buffer is stored in ram (as encoded video), so don't use a too large replay time and/or video quality unless you have enough ram to store it.
+## Controlling GPU Screen Recorder remotely
To save a video in replay mode, you need to send signal SIGUSR1 to gpu screen recorder. You can do this by running `killall -SIGUSR1 gpu-screen-recorder`.\
-To stop recording send SIGINT to gpu screen recorder. You can do this by running `killall -SIGINT gpu-screen-recorder` or pressing `Ctrl-C` in the terminal that runs gpu screen recorder.\
+To stop recording send SIGINT to gpu screen recorder. You can do this by running `killall -SIGINT gpu-screen-recorder` or pressing `Ctrl-C` in the terminal that runs gpu screen recorder. When recording a regular non-replay video this will also save the video.\
To pause/unpause recording send SIGUSR2 to gpu screen recorder. You can do this by running `killall -SIGUSR2 gpu-screen-recorder`. This is only applicable and useful when recording (not streaming nor replay).\
-The file path to the saved replay is output to stdout. All other output from GPU Screen Recorder is output to stderr.\
-The replay buffer is stored in ram (as encoded video), so don't use a too large replay time and/or video quality unless you have enough ram to store it.
## Finding audio device name
You can find the default output audio device (headset, speakers (in other words, desktop audio)) with the command `pactl get-default-sink`. Add `monitor` to the end of that to use that as an audio input in gpu screen recorder.\
You can find the default input audio device (microphone) with the command `pactl get-default-source`. This input should not have `monitor` added to the end when used in gpu screen recorder.\
@@ -118,60 +129,43 @@ hotkey settings on your system and choose a hotkey to run the script `scripts/sa
If you installed GPU Screen Recorder from AUR or from source and you are running a distro that uses systemd then you will have a systemd service installed that can be started with `systemctl enable --now --user gpu-screen-recorder`. This systemd service runs GPU Screen Recorder on system startup.\
It's configured with `$HOME/.config/gpu-screen-recorder.env` (create it if it doesn't exist). You can look at [extra/gpu-screen-recorder.env](https://git.dec05eba.com/gpu-screen-recorder/plain/extra/gpu-screen-recorder.env) to see an example.
You can see which variables that you can use in the `gpu-screen-recorder.env` file by looking at the `extra/gpu-screen-recorder.service` file. Note that all of the variables are optional, you only have to set the ones that are you interested in.
-You can use the `scripts/save-replay.sh` script to save a replay and by default the systemd service saves videos in `$HOME/Videos`.\
-If you are using a NVIDIA GPU then it's recommended to set PreserveVideoMemoryAllocations=1 as mentioned in the section below.
-## Examples
-Look at the [scripts](https://git.dec05eba.com/gpu-screen-recorder/tree/scripts) directory for script examples. For example if you want to automatically save a recording/replay into a folder with the same name as the game you are recording.
+You can use the `scripts/save-replay.sh` script to save a replay and by default the systemd service saves videos in `$HOME/Videos`.
# Issues
## NVIDIA
-Nvidia drivers have an issue where CUDA breaks if CUDA is running when suspend/hibernation happens, and it remains broken until you reload the nvidia driver. To fix this, either disable suspend or tell the NVIDIA driver to preserve video memory on suspend/hibernate by using the `NVreg_PreserveVideoMemoryAllocations=1` option. You can run `sudo extra/install_preserve_video_memory.sh` to automatically add that option to your system.
+Nvidia drivers have an issue where CUDA breaks if CUDA is running when suspend/hibernation happens, and it remains broken until you reload the nvidia driver. `extra/gsr-nvidia.conf` will be installed by default when you install GPU Screen Recorder and that should fix this issue. If this doesn't fix the issue for you then your distro may use a different path for modprobe files. In that case you have to install that `extra/gsr-nvidia.conf` yourself into that location.
+You have to reboot your computer after installing GPU Screen Recorder for the first time for the fix to have any effect.
+# Examples
+Look at the [scripts](https://git.dec05eba.com/gpu-screen-recorder/tree/scripts) directory for script examples. For example if you want to automatically save a recording/replay into a folder with the same name as the game you are recording.
# Reporting bugs
-Issues are reported on this Github page: [https://github.com/dec05eba/gpu-screen-recorder-issues/issues](https://github.com/dec05eba/gpu-screen-recorder-issues/issues)
+Issues are reported on this Github page: [https://github.com/dec05eba/gpu-screen-recorder-issues/issues](https://github.com/dec05eba/gpu-screen-recorder-issues/issues).
# Contributing patches
-See [https://git.dec05eba.com/?p=about](https://git.dec05eba.com/?p=about)
+See [https://git.dec05eba.com/?p=about](https://git.dec05eba.com/?p=about) for contribution steps.
+# Donations
+See [https://git.dec05eba.com/?p=about](https://git.dec05eba.com/?p=about) for donation options.
# Demo
[![Click here to watch a demo video on youtube](https://img.youtube.com/vi/n5tm0g01n6A/0.jpg)](https://www.youtube.com/watch?v=n5tm0g01n6A)
# FAQ
-## How is this different from using OBS with nvenc?
-OBS only uses the gpu for video encoding, but the window image that is encoded is copied from the GPU to the CPU and then back to the GPU (video encoding unit). These operations are very slow and causes all of the fps drops when using OBS. OBS only uses the GPU efficiently on Windows 10 and Nvidia.\
-This gpu screen recorder keeps the window image on the GPU and sends it directly to the video encoding unit on the GPU by using CUDA. This means that CPU usage remains at around 0% when using this screen recorder.
-## How is this different from using OBS NvFBC plugin?
-The plugin does everything on the GPU and gives the texture to OBS, but OBS does not know how to use the texture directly on the GPU so it copies the texture to the CPU and then back to the GPU (video encoding unit). These operations are very slow and causes a lot of fps drops unless you have a fast CPU. This is especially noticable when recording at higher resolutions than 1080p.
-## How is this different from using FFMPEG with x11grab and nvenc?
-FFMPEG only uses the GPU with CUDA when doing transcoding from an input video to an output video, and not when recording the screen when using x11grab. So FFMPEG has the same fps drop issues that OBS has.
## It tells me that my AMD/Intel GPU is not supported or that my GPU doesn't support h264/hevc, but that's not true!
Some linux distros (such as manjaro and fedora) disable hardware accelerated h264/hevc on AMD/Intel because of "patent license issues". If you are using an arch-based distro then you can install mesa-git instead of mesa and if you are using another distro then you may have to switch to a better distro. On fedora based distros you can follow this: [Hardware Accelerated Codec](https://rpmfusion.org/Howto/Multimedia).\
If you installed GPU Screen Recorder flatpak then you can try installing mesa-extra freedesktop runtime by running this command: `flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra`
## I have an old nvidia GPU that supports nvenc but I get a cuda error when trying to record
Newer ffmpeg versions don't support older nvidia cards. Try installing GPU Screen Recorder flatpak from [flathub](https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder) instead. It comes with an older ffmpeg version which might work for your GPU.
## I get a black screen/glitches while live streaming
-It seems like ffmpeg earlier than version 6.1 has some type of bug. Install ffmpeg 6.1 and then reinstall GPU Screen Recorder to fix this issue. The flatpak version of GPU Screen Recorder comes with ffmpeg 6.1 so no extra steps are needed.
+It seems like ffmpeg earlier than version 6.1 has some type of bug. Install ffmpeg version 6.1 or later and then reinstall GPU Screen Recorder to fix this issue. The flatpak version of GPU Screen Recorder comes with a newer version of ffmpeg so no extra steps are needed.
## I can't play the video in my browser directly or in discord
Browsers and discord don't support hevc video codec at the moment. Choose h264 video codec instead with the -k h264 option.
Note that websites such as youtube support hevc so there is no need to choose h264 video codec if you intend to upload the video to youtube or if you want to play the video locally or if you intend to
edit the video with a video editor. Hevc allows for better video quality (especially at lower file sizes) so hevc (or av1) is recommended for source videos.
-## I get a black bar/distorted colors on the right/bottom in the video
-This is mostly an issue on AMD. For av1 it's a hardware issue, see: https://gitlab.freedesktop.org/mesa/mesa/-/issues/9185. For hevc it's a software issue that has been fixed but not released yet, see: https://gitlab.freedesktop.org/mesa/mesa/-/issues/10985.
-If you get this issue then a workaround is to record with h264 video codec instead (using the -k h264 option).
-## The video is glitched, looks like checkerboard pattern
-This is an issue on some intel integrated gpus on wayland caused by power saving option. Right now the only way to fix this is to record on X11 instead.
+## I get a black bar/distorted colors on the sides in the video
+This is mostly an issue on AMD. For av1 it's a hardware issue, see: https://gitlab.freedesktop.org/mesa/mesa/-/issues/9185. For hevc it's a software issue in the AMD driver that hasn't been fixed yet. This issue happens at certain video resolutions. If you get this issue then a workaround is to record with h264 video codec instead (using the -k h264 option).
## The video doesn't display or has a green/yellow overlay
This can happen if your video player is missing the H264/HEVC video codecs. Either install the codecs or use mpv.
## I get stutter in the video
Try recording to an SSD and make sure it's not using NTFS file system. Also record in variable framerate format.
-## I get a black screen when recording
-This can happen if you use software such as prime-run to run GPU Screen Recorder. Such software should not be used to run GPU Screen Recorder.
-GPU Screen Recorder needs to run on the same GPU that you use to display your monitors graphics to work.
-
-# Donations
-If you want to donate you can donate via bitcoin or monero.
-* Bitcoin: bc1qqvuqnwrdyppf707ge27fqz2n9y9gu7lf5ypyuf
-* Monero: 4An9kp2qW1C9Gah7ewv4JzcNFQ5TAX7ineGCqXWK6vQnhsGGcRpNgcn8r9EC3tMcgY7vqCKs3nSRXhejMHBaGvFdN2egYet
-
-# TODO
-* Dynamically change bitrate/resolution to match desired fps. This would be helpful when streaming for example, where the encode output speed also depends on upload speed to the streaming service.
-* Implement opengl injection to capture texture. This fixes VRR without having to use NvFBC direct capture.
-* Always use direct capture with NvFBC once the capture issue in mpv fullscreen has been resolved (maybe detect if direct capture fails in nvfbc and switch to non-direct recording. NvFBC says if direct capture fails).
+## The colors look washed out when recording a monitor with HDR enabled
+You have to either record in hdr mode (-k `hevc_hdr` or -k `av1_hdr` option) to record a HDR video or record with desktop portal option (`-w portal`) to turn the HDR recording into SDR.
+## GPU Screen Recorder records night light when recording in HDR mode
+You can record with desktop portal option (`-w portal`) instead which ignores night light, if you are ok with recording without HDR. \ No newline at end of file
diff --git a/TODO b/TODO
index 1051253..6e535c7 100644
--- a/TODO
+++ b/TODO
@@ -6,9 +6,6 @@ Allow setting a different output resolution than the input resolution.
Use mov+faststart.
Allow recording all monitors/selected monitor without nvfbc by recording the compositor proxy window and only recording the part that matches the monitor(s).
Allow recording a region by recording the compositor proxy window / nvfbc window and copying part of it.
-Use nvenc directly, which allows removing the use of cuda.
-Handle xrandr monitor change in nvfbc.
-Implement follow focused in drm.
Support amf and qsv.
Disable flipping on nvidia? this might fix some stuttering issues on some setups. See NvCtrlGetAttribute/NvCtrlSetAttributeAndGetStatus NV_CTRL_SYNC_TO_VBLANK https://github.com/NVIDIA/nvidia-settings/blob/d5f022976368cbceb2f20b838ddb0bf992f0cfb9/src/gtk%2B-2.x/ctkopengl.c.
Replays seem to have some issues with audio/video. Why?
@@ -20,7 +17,7 @@ Reverse engineer nvapi so we can disable "force p2 state" on linux too (nvapi pr
Support yuv444p on amd/intel.
fix yuv444 for hevc.
Do not allow streaming if yuv444.
-Re-enable yuv444.
+Re-enable yuv444 and allow yuv444 for software encoding. Good for remote desktop. But for remote desktop its more ideal to use yuv420 and when the image is not moving then send a png image instead, for clear image when the image is static.
Support 10 bit output because of better gradients. May even be smaller file size. Better supported on hevc (not supported at all on h264 on my gpu).
Add nvidia/(amd/intel) specific install script for ubuntu. User should run install_ubuntu.sh but it should run different install dep script depending on if /proc/driver/nvidia/version exists or not. But what about switchable graphics setup?
Test different combinations of switchable graphics. Intel hybrid mode (running intel but possible to run specific applications with prime-run), running pure intel. Detect switchable graphics.
@@ -31,7 +28,6 @@ https://djdallmann.github.io/GamingPCSetup/CONTENT/RESEARCH/FINDINGS/registrykey
The video output will be black if if the system is suspended on nvidia and NVreg_PreserveVideoMemoryAllocations is not set to 1. This happens because I think that the driver invalidates textures/cuda buffers? To fix this we could try and recreate gsr capture when gsr_capture_capture fails (with timeout to retry again).
NVreg_RegistryDwords.
-Restore nvfbc screen recording on monitor reconfiguration.
Window capture doesn't work properly in _control_ game after going from pause menu to in-game (and back to pause menu). There might be some x11 event we need to catch. Same for vr-video-player.
Monitor capture on steam deck is slightly below the game fps, but only when capturing on the steam deck screen. If capturing on another monitor, there is no issue.
@@ -44,23 +40,14 @@ Intel is a bit weird with monitor capture and multiple monitors. If one of the m
Is that only the case when the primary monitor is rotated? Also the primary monitor becomes position 0, 0 so crtc (x11 randr) position doesn't match the drm pos. Maybe get monitor position and size from drm instead.
How about if multiple monitors are rotated?
-Support vp8/vp9. This is especially important on amd which on some distros (such as Manjaro) where hardware accelerated h264/hevc is disabled in the mesa package.
-
Support screen (all monitors) capture on amd/intel and nvidia wayland when no combined plane is found. Right now screen just takes the first output.
Use separate plane (which has offset and pitch) from combined plane instead of the combined plane.
Both twitch and youtube support variable bitrate but twitch recommends constant bitrate to reduce stream buffering/dropped frames when going from low motion to high motion: https://help.twitch.tv/s/article/broadcasting-guidelines?language=en_US. Info for youtube: https://support.google.com/youtube/answer/2853702?hl=en#zippy=%2Cvariable-bitrate-with-custom-stream-keys-in-live-control-room%2Ck-p-fps%2Cp-fps.
-Limit fps recording with x damage. This is good when running replay mode 24/7 and being afk or when not much is happening on the screen.
-
On nvidia some games apparently causes the game to appear to stutter (without dropping fps) when recording a monitor but not using
when using direct screen capture. Observed in Deus Ex and Apex Legends.
-Support "screen" (all monitors) capture on wayland. This should be done by getting all drm fds and multiple EGL_DMA_BUF_PLANEX_FD_EXT to create one egl image with all fds combined.
-
-Support pipewire screen capture?
-CPU usage is pretty high on AMD/Intel/(Nvidia(wayland)), why? opening and closing fds, creating egl, cuda association, is slow when done every frame. Test if desktop portal screencast has better performance.
-
Capture is broken on amd on wlroots. It's disabled at the moment and instead uses kms capture. Find out why we get a black screen in wlroots.
Support vulkan video encoding. That might workaround forced p2 state nvidia driver "bug". Ffmpeg supports vulkan video encoding if it's encoding with --enable-vulkan
@@ -84,10 +71,6 @@ Use SRC_W and SRC_H for screen plane instead of crtc_w and crtc_h.
Make it possible to select which /dev/dri/card* to use, but that requires opengl to also use the same card. Not sure if that is possible for amd, intel and nvidia without using vulkan instead.
-Support intel display framebuffer compression (I915_FORMAT_MOD_Y_TILED_CCS modifier) (and other power saving modifiers, see https://trac.ffmpeg.org/ticket/8542). The only fix may be to use desktop portal for recording. This issue doesn't appear on x11 since these modifiers are not used by xorg server.
-This issue only appears on some intel iGPUs, such as Intel Iris Xe, see: https://github.com/dec05eba/gpu-screen-recorder-issues/issues/1.
-Intel dedicated GPU (intel arc a750) can have a similar issue, but it's not related to compression. In that case the modifier is I915_FORMAT_MOD_4_TILED.
-
Test if p2 state can be worked around by using pure nvenc api and overwriting cuInit/cuCtxCreate* to not do anything. Cuda might be loaded when using nvenc but it might not be used, with certain record options? (such as h264 p5).
nvenc uses cuda when using b frames and rgb->yuv conversion, so convert the image ourselves instead.-
@@ -95,7 +78,7 @@ Mesa doesn't support global headers (AV_CODEC_FLAG_GLOBAL_HEADER) with h264... w
Drop frames if live streaming cant keep up with target fps, or dynamically change resolution/quality.
-Support low power option (does it even work with vaapi in ffmpeg??). Would be very useful for steam deck.
+Support low power option.
Instead of sending a big list of drm data back to kms client, send the monitor we want to record to kms server and the server should respond with only the matching monitor, and cursor.
@@ -109,8 +92,6 @@ Setup hardware video context so we can query constraints and capabilities for be
Use CAP_SYS_NICE in flatpak too on the main gpu screen recorder binary. It makes recording smoother, especially with constant framerate.
-Show error when using compressed kms plane which isn't supported. Also do that in the gui.
-
Modify ffmpeg to accept opengl texture for nvenc encoding. Removes extra buffers and copies.
When vulkan encode is added, mention minimum nvidia driver required. (550.54.14?).
@@ -124,8 +105,6 @@ Go back to using pure vaapi without opengl for video encoding? rotation (transpo
Implement scaling and use lanczos resampling for better quality. Lanczos resampling can also be used for YUV chroma for better color quality on small text.
-Try fixing HDR by passing HDR+10 data as well, and in the packet. Run "ffprobe -loglevel quiet -read_intervals "%+#2" -select_streams v:0 -show_entries side_data video.mp4" to test if the file has correct metadata.
-
Flac is disabled because the frame sizes are too large which causes big audio/video desync.
Add 10-bit capture option. This is good because it reduces banding and quality in very dark areas while reducing the file size compared to doing the same thing with 8-bits.
@@ -136,14 +115,10 @@ Support vfr matching games exact fps all the time. On x11 use damage tracking, o
Another method to track damage that works regardless of the display server would be to do a diff between frames with a shader.
Support selecting which gpu to use. This can be done in egl with eglQueryDevicesEXT and then eglGetPlatformDisplayEXT. This will automatically work on AMD and Intel as vaapi uses the same device. On nvidia we need to use eglQueryDeviceAttribEXT with EGL_CUDA_DEVICE_NV.
- Maybe on glx (nvidia x11 nvfbc) we need to use __NV_PRIME_RENDER_OFFLOAD_PROVIDER and __GLX_VENDOR_LIBRARY_NAME instead.
-
-Remove is_damaged and clear_damage and return a value from capture function instead that states if the image has been updated or not.
+ Maybe on glx (nvidia x11 nvfbc) we need to use __NV_PRIME_RENDER_OFFLOAD, __NV_PRIME_RENDER_OFFLOAD_PROVIDER, __GLX_VENDOR_LIBRARY_NAME, __VK_LAYER_NV_optimus, VK_ICD_FILENAMES instead. Just look at prime-run /usr/bin/prime-run.
-Test install intel-hybrid-codec-driver-git for vp8 encoding on intel.
When adding support for steam deck, add option to send video to another computer.
New gpu screen recorder gui should have the option to cut the video directly, maybe running an ffmpeg command or implementing that ourselves. Only support gpu screen recorder video files.
-Add hdr metadata to encoder settings metadata.
Check if is software renderer by using eglQueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT..) eglQueryDeviceStringEXT(egl_device, EGL_EXTENSIONS) and check for "EGL_MESA_device_software".
@@ -151,4 +126,55 @@ Use MapTexture2DINTEL for software encoding on intel.
To test vulkan encode on amd set the environment variable RADV_PERFTEST=video_encode before running a program that uses vulkan encode (or queries for it, such as vulkaninfo).
-Support hevc/av1 for software encoder and hdr support at the same time. Need support for yuv420p shader for that. Use libx265 for hevc and libsvtav1 for av1 (libsvtav1 is the fastest software av1 video encoder). Also support vp8/vp9 since we are not limited by hardware. \ No newline at end of file
+Support hevc/av1 for software encoder and hdr support at the same time. Need support for yuv420p shader for that. Use libx265 for hevc and libsvtav1 for av1 (libsvtav1 is the fastest software av1 video encoder). Also support vp8/vp9 since we are not limited by hardware.
+
+Cleanup pipewire code and add more error checks.
+
+Make dbus code and pipewire setup non blocking.
+
+Support portal (pipewire) hdr capture when pipewire adds support for it. Maybe use the result of SelectSources and then query the hdr metadata with drm.
+
+HDR support on x11?
+
+Move most kms data to kms client. We dont need root access for everything that is server from kms server right now, such as hdr metadata and drm plane properties. Only the drm plane fd really needs root access.
+
+Show rotated window size in monitor list when using incorrect monitor name.
+
+Desktop portal capture on kde plasma makes notifications not show up unless the notification is set as urgent. How to fix this? do we have to make our own notification system?
+
+Explicit sync is done with the drm property IN_FENCE_FD (see https://drmdb.emersion.fr/properties/4008636142/IN_FENCE_FD). Check if this needs to be used on wayland (especially on nvidia) when capturing a monitor directly without desktop portal.
+
+The update fps appear to be lower when recording a monitor instead of using portal on intel. Does this reflect in game framerate?
+
+Fix glitches when using prime-run with desktop portal. It happens when moving a window around. It's probably a syncing issue.
+
+Allow prime-run on x11 if monitor capture and the prime gpu is not nvidia.
+
+Enable 2-pass encoding.
+
+Add cbr option.
+
+Restart replay/update video resolution if monitor resolution changes.
+
+Fix pure vaapi copy on intel.
+
+ffmpeg supports vulkan encoding now (h264!). Doesn't work on amd yet because mesa is missing VK_KHR_video_maintenance1, see https://gitlab.freedesktop.org/mesa/mesa/-/issues/11857. Test on nvidia!
+
+Test vaapi low latency mode (setenv("AMD_DEBUG", "lowlatencyenc", true);), added in mesa 24.1.4, released on july 17, 2024. Note that this forces gpu power usage to max at all times, even when recording at 2 fps.
+Use nvidia low latency options for better encoding times.
+
+Test ideal async_depth value. Increasing async_depth also increased gpu memory usage a lot (from 100mb to 500mb when moving from async_depth 2 to 16) at 4k resolution. Setting it to 8 increases it by 200mb which might be ok.
+
+Replace -encoder cpu with -k h264_software?
+
+Change vp8/vp9 quality options, right now the file size is too large (for vp9 at least at very_high quality).
+
+Support recording while in replay mode. This will be needed when enabling replay on system startup with systemd service and wanting to record a video besides that.
+ The harder and more bloat solution for this would be to make an IPC.
+ The simple solution would be to use SIGUSR2 for starting/stopping recording since SIGUSR2 is unused for replays. That would mean SIGUSR2 for pausing recording would be ignored.
+ It also means that the video will be created in the same directory as the replay (or have option to specify another location for that) but the filename would have to be generated automatically.
+ To rename the file you would have to use -sc to rename it with a script, or add an option to provide a template for the name.
+
+Dynamically change bitrate/resolution to match desired fps. This would be helpful when streaming for example, where the encode output speed also depends on upload speed to the streaming service.
+Implement opengl injection to capture texture. This fixes VRR without having to use NvFBC direct capture and also allows perfect frame timing.
+Always use direct capture with NvFBC once the capture issue in mpv fullscreen has been resolved (maybe detect if direct capture fails in nvfbc and switch to non-direct recording. NvFBC says if direct capture fails). \ No newline at end of file
diff --git a/external/nvEncodeAPI.h b/external/nvEncodeAPI.h
new file mode 100644
index 0000000..281464c
--- /dev/null
+++ b/external/nvEncodeAPI.h
@@ -0,0 +1,4285 @@
+/*
+ * This copyright notice applies to this header file only:
+ *
+ * Copyright (c) 2010-2022 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the software, and to permit persons to whom the
+ * software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file nvEncodeAPI.h
+ * NVIDIA GPUs - beginning with the Kepler generation - contain a hardware-based encoder
+ * (referred to as NVENC) which provides fully-accelerated hardware-based video encoding.
+ * NvEncodeAPI provides the interface for NVIDIA video encoder (NVENC).
+ * \date 2011-2022
+ * This file contains the interface constants, structure definitions and function prototypes.
+ */
+
+#ifndef _NV_ENCODEAPI_H_
+#define _NV_ENCODEAPI_H_
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#ifdef _MSC_VER
+#ifndef _STDINT
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+typedef signed char int8_t;
+typedef unsigned char uint8_t;
+typedef short int16_t;
+typedef unsigned short uint16_t;
+#endif
+#else
+#include <stdint.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures
+ * @{
+ */
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+#define NVENCAPI __stdcall
+#else
+#define NVENCAPI
+#endif
+
+#ifdef _WIN32
+typedef RECT NVENC_RECT;
+#else
+#define NVENCAPI
+// =========================================================================================
+#if !defined(GUID) && !defined(GUID_DEFINED)
+#define GUID_DEFINED
+/*!
+ * \struct GUID
+ * Abstracts the GUID structure for non-windows platforms.
+ */
+// =========================================================================================
+typedef struct _GUID
+{
+ uint32_t Data1; /**< [in]: Specifies the first 8 hexadecimal digits of the GUID. */
+ uint16_t Data2; /**< [in]: Specifies the first group of 4 hexadecimal digits. */
+ uint16_t Data3; /**< [in]: Specifies the second group of 4 hexadecimal digits. */
+ uint8_t Data4[8]; /**< [in]: Array of 8 bytes. The first 2 bytes contain the third group of 4 hexadecimal digits.
+ The remaining 6 bytes contain the final 12 hexadecimal digits. */
+} GUID, *LPGUID;
+#endif // GUID
+
+/**
+ * \struct _NVENC_RECT
+ * Defines a Rectangle. Used in ::NV_ENC_PREPROCESS_FRAME.
+ */
+typedef struct _NVENC_RECT
+{
+ uint32_t left; /**< [in]: X coordinate of the upper left corner of rectangular area to be specified. */
+ uint32_t top; /**< [in]: Y coordinate of the upper left corner of the rectangular area to be specified. */
+ uint32_t right; /**< [in]: X coordinate of the bottom right corner of the rectangular area to be specified. */
+ uint32_t bottom; /**< [in]: Y coordinate of the bottom right corner of the rectangular area to be specified. */
+} NVENC_RECT;
+
+#endif // _WIN32
+
+/** @} */ /* End of GUID and NVENC_RECT structure grouping*/
+
+typedef void* NV_ENC_INPUT_PTR; /**< NVENCODE API input buffer */
+typedef void* NV_ENC_OUTPUT_PTR; /**< NVENCODE API output buffer*/
+typedef void* NV_ENC_REGISTERED_PTR; /**< A Resource that has been registered with NVENCODE API*/
+typedef void* NV_ENC_CUSTREAM_PTR; /**< Pointer to CUstream*/
+
+#define NVENCAPI_MAJOR_VERSION 12
+#define NVENCAPI_MINOR_VERSION 0
+
+#define NVENCAPI_VERSION (NVENCAPI_MAJOR_VERSION | (NVENCAPI_MINOR_VERSION << 24))
+
+/**
+ * Macro to generate per-structure version for use with API.
+ */
+#define NVENCAPI_STRUCT_VERSION(ver) ((uint32_t)NVENCAPI_VERSION | ((ver)<<16) | (0x7 << 28))
+
+
+#define NVENC_INFINITE_GOPLENGTH 0xffffffff
+
+#define NV_MAX_SEQ_HDR_LEN (512)
+
+#ifdef __GNUC__
+#define NV_ENC_DEPRECATED __attribute__ ((deprecated("WILL BE REMOVED IN A FUTURE VIDEO CODEC SDK VERSION")))
+#elif defined(_MSC_VER)
+#define NV_ENC_DEPRECATED __declspec(deprecated("WILL BE REMOVED IN A FUTURE VIDEO CODEC SDK VERSION"))
+#endif
+
+// =========================================================================================
+// Encode Codec GUIDS supported by the NvEncodeAPI interface.
+// =========================================================================================
+
+// {6BC82762-4E63-4ca4-AA85-1E50F321F6BF}
+static const GUID NV_ENC_CODEC_H264_GUID =
+{ 0x6bc82762, 0x4e63, 0x4ca4, { 0xaa, 0x85, 0x1e, 0x50, 0xf3, 0x21, 0xf6, 0xbf } };
+
+// {790CDC88-4522-4d7b-9425-BDA9975F7603}
+static const GUID NV_ENC_CODEC_HEVC_GUID =
+{ 0x790cdc88, 0x4522, 0x4d7b, { 0x94, 0x25, 0xbd, 0xa9, 0x97, 0x5f, 0x76, 0x3 } };
+
+// {0A352289-0AA7-4759-862D-5D15CD16D254}
+static const GUID NV_ENC_CODEC_AV1_GUID =
+{ 0x0a352289, 0x0aa7, 0x4759, { 0x86, 0x2d, 0x5d, 0x15, 0xcd, 0x16, 0xd2, 0x54 } };
+
+
+
+// =========================================================================================
+// * Encode Profile GUIDS supported by the NvEncodeAPI interface.
+// =========================================================================================
+
+// {BFD6F8E7-233C-4341-8B3E-4818523803F4}
+static const GUID NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID =
+{ 0xbfd6f8e7, 0x233c, 0x4341, { 0x8b, 0x3e, 0x48, 0x18, 0x52, 0x38, 0x3, 0xf4 } };
+
+// {0727BCAA-78C4-4c83-8C2F-EF3DFF267C6A}
+static const GUID NV_ENC_H264_PROFILE_BASELINE_GUID =
+{ 0x727bcaa, 0x78c4, 0x4c83, { 0x8c, 0x2f, 0xef, 0x3d, 0xff, 0x26, 0x7c, 0x6a } };
+
+// {60B5C1D4-67FE-4790-94D5-C4726D7B6E6D}
+static const GUID NV_ENC_H264_PROFILE_MAIN_GUID =
+{ 0x60b5c1d4, 0x67fe, 0x4790, { 0x94, 0xd5, 0xc4, 0x72, 0x6d, 0x7b, 0x6e, 0x6d } };
+
+// {E7CBC309-4F7A-4b89-AF2A-D537C92BE310}
+static const GUID NV_ENC_H264_PROFILE_HIGH_GUID =
+{ 0xe7cbc309, 0x4f7a, 0x4b89, { 0xaf, 0x2a, 0xd5, 0x37, 0xc9, 0x2b, 0xe3, 0x10 } };
+
+// {7AC663CB-A598-4960-B844-339B261A7D52}
+static const GUID NV_ENC_H264_PROFILE_HIGH_444_GUID =
+{ 0x7ac663cb, 0xa598, 0x4960, { 0xb8, 0x44, 0x33, 0x9b, 0x26, 0x1a, 0x7d, 0x52 } };
+
+// {40847BF5-33F7-4601-9084-E8FE3C1DB8B7}
+static const GUID NV_ENC_H264_PROFILE_STEREO_GUID =
+{ 0x40847bf5, 0x33f7, 0x4601, { 0x90, 0x84, 0xe8, 0xfe, 0x3c, 0x1d, 0xb8, 0xb7 } };
+
+// {B405AFAC-F32B-417B-89C4-9ABEED3E5978}
+static const GUID NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID =
+{ 0xb405afac, 0xf32b, 0x417b, { 0x89, 0xc4, 0x9a, 0xbe, 0xed, 0x3e, 0x59, 0x78 } };
+
+// {AEC1BD87-E85B-48f2-84C3-98BCA6285072}
+static const GUID NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID =
+{ 0xaec1bd87, 0xe85b, 0x48f2, { 0x84, 0xc3, 0x98, 0xbc, 0xa6, 0x28, 0x50, 0x72 } };
+
+// {B514C39A-B55B-40fa-878F-F1253B4DFDEC}
+static const GUID NV_ENC_HEVC_PROFILE_MAIN_GUID =
+{ 0xb514c39a, 0xb55b, 0x40fa, { 0x87, 0x8f, 0xf1, 0x25, 0x3b, 0x4d, 0xfd, 0xec } };
+
+// {fa4d2b6c-3a5b-411a-8018-0a3f5e3c9be5}
+static const GUID NV_ENC_HEVC_PROFILE_MAIN10_GUID =
+{ 0xfa4d2b6c, 0x3a5b, 0x411a, { 0x80, 0x18, 0x0a, 0x3f, 0x5e, 0x3c, 0x9b, 0xe5 } };
+
+// For HEVC Main 444 8 bit and HEVC Main 444 10 bit profiles only
+// {51ec32b5-1b4c-453c-9cbd-b616bd621341}
+static const GUID NV_ENC_HEVC_PROFILE_FREXT_GUID =
+{ 0x51ec32b5, 0x1b4c, 0x453c, { 0x9c, 0xbd, 0xb6, 0x16, 0xbd, 0x62, 0x13, 0x41 } };
+
+// {5f2a39f5-f14e-4f95-9a9e-b76d568fcf97}
+static const GUID NV_ENC_AV1_PROFILE_MAIN_GUID =
+{ 0x5f2a39f5, 0xf14e, 0x4f95, { 0x9a, 0x9e, 0xb7, 0x6d, 0x56, 0x8f, 0xcf, 0x97 } };
+
+// =========================================================================================
+// * Preset GUIDS supported by the NvEncodeAPI interface.
+// =========================================================================================
+// {B2DFB705-4EBD-4C49-9B5F-24A777D3E587}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_DEFAULT_GUID =
+{ 0xb2dfb705, 0x4ebd, 0x4c49, { 0x9b, 0x5f, 0x24, 0xa7, 0x77, 0xd3, 0xe5, 0x87 } };
+
+// {60E4C59F-E846-4484-A56D-CD45BE9FDDF6}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_HP_GUID =
+{ 0x60e4c59f, 0xe846, 0x4484, { 0xa5, 0x6d, 0xcd, 0x45, 0xbe, 0x9f, 0xdd, 0xf6 } };
+
+// {34DBA71D-A77B-4B8F-9C3E-B6D5DA24C012}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_HQ_GUID =
+{ 0x34dba71d, 0xa77b, 0x4b8f, { 0x9c, 0x3e, 0xb6, 0xd5, 0xda, 0x24, 0xc0, 0x12 } };
+
+// {82E3E450-BDBB-4e40-989C-82A90DF9EF32}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_BD_GUID =
+{ 0x82e3e450, 0xbdbb, 0x4e40, { 0x98, 0x9c, 0x82, 0xa9, 0xd, 0xf9, 0xef, 0x32 } };
+
+// {49DF21C5-6DFA-4feb-9787-6ACC9EFFB726}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID =
+{ 0x49df21c5, 0x6dfa, 0x4feb, { 0x97, 0x87, 0x6a, 0xcc, 0x9e, 0xff, 0xb7, 0x26 } };
+
+// {C5F733B9-EA97-4cf9-BEC2-BF78A74FD105}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_HQ_GUID =
+{ 0xc5f733b9, 0xea97, 0x4cf9, { 0xbe, 0xc2, 0xbf, 0x78, 0xa7, 0x4f, 0xd1, 0x5 } };
+
+// {67082A44-4BAD-48FA-98EA-93056D150A58}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOW_LATENCY_HP_GUID =
+{ 0x67082a44, 0x4bad, 0x48fa, { 0x98, 0xea, 0x93, 0x5, 0x6d, 0x15, 0xa, 0x58 } };
+
+// {D5BFB716-C604-44e7-9BB8-DEA5510FC3AC}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID =
+{ 0xd5bfb716, 0xc604, 0x44e7, { 0x9b, 0xb8, 0xde, 0xa5, 0x51, 0xf, 0xc3, 0xac } };
+
+// {149998E7-2364-411d-82EF-179888093409}
+NV_ENC_DEPRECATED static const GUID NV_ENC_PRESET_LOSSLESS_HP_GUID =
+{ 0x149998e7, 0x2364, 0x411d, { 0x82, 0xef, 0x17, 0x98, 0x88, 0x9, 0x34, 0x9 } };
+
+// Performance degrades and quality improves as we move from P1 to P7. Presets P3 to P7 for H264 and Presets P2 to P7 for HEVC have B frames enabled by default
+// for HIGH_QUALITY and LOSSLESS tuning info, and will not work with Weighted Prediction enabled. In case Weighted Prediction is required, disable B frames by
+// setting frameIntervalP = 1
+// {FC0A8D3E-45F8-4CF8-80C7-298871590EBF}
+static const GUID NV_ENC_PRESET_P1_GUID =
+{ 0xfc0a8d3e, 0x45f8, 0x4cf8, { 0x80, 0xc7, 0x29, 0x88, 0x71, 0x59, 0xe, 0xbf } };
+
+// {F581CFB8-88D6-4381-93F0-DF13F9C27DAB}
+static const GUID NV_ENC_PRESET_P2_GUID =
+{ 0xf581cfb8, 0x88d6, 0x4381, { 0x93, 0xf0, 0xdf, 0x13, 0xf9, 0xc2, 0x7d, 0xab } };
+
+// {36850110-3A07-441F-94D5-3670631F91F6}
+static const GUID NV_ENC_PRESET_P3_GUID =
+{ 0x36850110, 0x3a07, 0x441f, { 0x94, 0xd5, 0x36, 0x70, 0x63, 0x1f, 0x91, 0xf6 } };
+
+// {90A7B826-DF06-4862-B9D2-CD6D73A08681}
+static const GUID NV_ENC_PRESET_P4_GUID =
+{ 0x90a7b826, 0xdf06, 0x4862, { 0xb9, 0xd2, 0xcd, 0x6d, 0x73, 0xa0, 0x86, 0x81 } };
+
+// {21C6E6B4-297A-4CBA-998F-B6CBDE72ADE3}
+static const GUID NV_ENC_PRESET_P5_GUID =
+{ 0x21c6e6b4, 0x297a, 0x4cba, { 0x99, 0x8f, 0xb6, 0xcb, 0xde, 0x72, 0xad, 0xe3 } };
+
+// {8E75C279-6299-4AB6-8302-0B215A335CF5}
+static const GUID NV_ENC_PRESET_P6_GUID =
+{ 0x8e75c279, 0x6299, 0x4ab6, { 0x83, 0x2, 0xb, 0x21, 0x5a, 0x33, 0x5c, 0xf5 } };
+
+// {84848C12-6F71-4C13-931B-53E283F57974}
+static const GUID NV_ENC_PRESET_P7_GUID =
+{ 0x84848c12, 0x6f71, 0x4c13, { 0x93, 0x1b, 0x53, 0xe2, 0x83, 0xf5, 0x79, 0x74 } };
+
+/**
+ * \addtogroup ENCODER_STRUCTURE NvEncodeAPI Data structures
+ * @{
+ */
+
+/**
+ * Input frame encode modes
+ */
+typedef enum _NV_ENC_PARAMS_FRAME_FIELD_MODE
+{
+ NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME = 0x01, /**< Frame mode */
+ NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD = 0x02, /**< Field mode */
+ NV_ENC_PARAMS_FRAME_FIELD_MODE_MBAFF = 0x03 /**< MB adaptive frame/field */
+} NV_ENC_PARAMS_FRAME_FIELD_MODE;
+
+/**
+ * Rate Control Modes
+ */
+typedef enum _NV_ENC_PARAMS_RC_MODE
+{
+ NV_ENC_PARAMS_RC_CONSTQP = 0x0, /**< Constant QP mode */
+ NV_ENC_PARAMS_RC_VBR = 0x1, /**< Variable bitrate mode */
+ NV_ENC_PARAMS_RC_CBR = 0x2, /**< Constant bitrate mode */
+ NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ = 0x8, /**< Deprecated, use NV_ENC_PARAMS_RC_CBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION +
+ lowDelayKeyFrameScale=1 */
+ NV_ENC_PARAMS_RC_CBR_HQ = 0x10, /**< Deprecated, use NV_ENC_PARAMS_RC_CBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION */
+ NV_ENC_PARAMS_RC_VBR_HQ = 0x20 /**< Deprecated, use NV_ENC_PARAMS_RC_VBR + NV_ENC_TWO_PASS_QUARTER_RESOLUTION / NV_ENC_TWO_PASS_FULL_RESOLUTION */
+} NV_ENC_PARAMS_RC_MODE;
+
+/**
+ * Multi Pass encoding
+ */
+typedef enum _NV_ENC_MULTI_PASS
+{
+ NV_ENC_MULTI_PASS_DISABLED = 0x0, /**< Single Pass */
+ NV_ENC_TWO_PASS_QUARTER_RESOLUTION = 0x1, /**< Two Pass encoding is enabled where first Pass is quarter resolution */
+ NV_ENC_TWO_PASS_FULL_RESOLUTION = 0x2, /**< Two Pass encoding is enabled where first Pass is full resolution */
+} NV_ENC_MULTI_PASS;
+
+/**
+ * Emphasis Levels
+ */
+typedef enum _NV_ENC_EMPHASIS_MAP_LEVEL
+{
+ NV_ENC_EMPHASIS_MAP_LEVEL_0 = 0x0, /**< Emphasis Map Level 0, for zero Delta QP value */
+ NV_ENC_EMPHASIS_MAP_LEVEL_1 = 0x1, /**< Emphasis Map Level 1, for very low Delta QP value */
+ NV_ENC_EMPHASIS_MAP_LEVEL_2 = 0x2, /**< Emphasis Map Level 2, for low Delta QP value */
+ NV_ENC_EMPHASIS_MAP_LEVEL_3 = 0x3, /**< Emphasis Map Level 3, for medium Delta QP value */
+ NV_ENC_EMPHASIS_MAP_LEVEL_4 = 0x4, /**< Emphasis Map Level 4, for high Delta QP value */
+ NV_ENC_EMPHASIS_MAP_LEVEL_5 = 0x5 /**< Emphasis Map Level 5, for very high Delta QP value */
+} NV_ENC_EMPHASIS_MAP_LEVEL;
+
+/**
+ * QP MAP MODE
+ */
+typedef enum _NV_ENC_QP_MAP_MODE
+{
+ NV_ENC_QP_MAP_DISABLED = 0x0, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap have no effect. */
+ NV_ENC_QP_MAP_EMPHASIS = 0x1, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as Emphasis level. Currently this is only supported for H264 */
+ NV_ENC_QP_MAP_DELTA = 0x2, /**< Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as QP delta map. */
+ NV_ENC_QP_MAP = 0x3, /**< Currently This is not supported. Value in NV_ENC_PIC_PARAMS::qpDeltaMap will be treated as QP value. */
+} NV_ENC_QP_MAP_MODE;
+
+#define NV_ENC_PARAMS_RC_VBR_MINQP (NV_ENC_PARAMS_RC_MODE)0x4 /**< Deprecated */
+#define NV_ENC_PARAMS_RC_2_PASS_QUALITY NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ /**< Deprecated */
+#define NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP NV_ENC_PARAMS_RC_CBR_HQ /**< Deprecated */
+#define NV_ENC_PARAMS_RC_2_PASS_VBR NV_ENC_PARAMS_RC_VBR_HQ /**< Deprecated */
+#define NV_ENC_PARAMS_RC_CBR2 NV_ENC_PARAMS_RC_CBR /**< Deprecated */
+
+/**
+ * Input picture structure
+ */
+typedef enum _NV_ENC_PIC_STRUCT
+{
+ NV_ENC_PIC_STRUCT_FRAME = 0x01, /**< Progressive frame */
+ NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM = 0x02, /**< Field encoding top field first */
+ NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP = 0x03 /**< Field encoding bottom field first */
+} NV_ENC_PIC_STRUCT;
+
+/**
+ * Display picture structure
+ * Currently, this enum is only used for deciding the number of clock timestamp sets in Picture Timing SEI / Time Code SEI
+ * Otherwise, this has no impact on encoder behavior
+ */
+typedef enum _NV_ENC_DISPLAY_PIC_STRUCT
+{
+ NV_ENC_PIC_STRUCT_DISPLAY_FRAME = 0x00, /**< Field encoding top field first */
+ NV_ENC_PIC_STRUCT_DISPLAY_FIELD_TOP_BOTTOM = 0x01, /**< Field encoding top field first */
+ NV_ENC_PIC_STRUCT_DISPLAY_FIELD_BOTTOM_TOP = 0x02, /**< Field encoding bottom field first */
+ NV_ENC_PIC_STRUCT_DISPLAY_FRAME_DOUBLING = 0x03, /**< Frame doubling */
+ NV_ENC_PIC_STRUCT_DISPLAY_FRAME_TRIPLING = 0x04 /**< Field tripling */
+} NV_ENC_DISPLAY_PIC_STRUCT;
+
+/**
+ * Input picture type
+ */
+typedef enum _NV_ENC_PIC_TYPE
+{
+ NV_ENC_PIC_TYPE_P = 0x0, /**< Forward predicted */
+ NV_ENC_PIC_TYPE_B = 0x01, /**< Bi-directionally predicted picture */
+ NV_ENC_PIC_TYPE_I = 0x02, /**< Intra predicted picture */
+ NV_ENC_PIC_TYPE_IDR = 0x03, /**< IDR picture */
+ NV_ENC_PIC_TYPE_BI = 0x04, /**< Bi-directionally predicted with only Intra MBs */
+ NV_ENC_PIC_TYPE_SKIPPED = 0x05, /**< Picture is skipped */
+ NV_ENC_PIC_TYPE_INTRA_REFRESH = 0x06, /**< First picture in intra refresh cycle */
+ NV_ENC_PIC_TYPE_NONREF_P = 0x07, /**< Non reference P picture */
+ NV_ENC_PIC_TYPE_UNKNOWN = 0xFF /**< Picture type unknown */
+} NV_ENC_PIC_TYPE;
+
+/**
+ * Motion vector precisions
+ */
+typedef enum _NV_ENC_MV_PRECISION
+{
+ NV_ENC_MV_PRECISION_DEFAULT = 0x0, /**< Driver selects Quarter-Pel motion vector precision by default */
+ NV_ENC_MV_PRECISION_FULL_PEL = 0x01, /**< Full-Pel motion vector precision */
+ NV_ENC_MV_PRECISION_HALF_PEL = 0x02, /**< Half-Pel motion vector precision */
+ NV_ENC_MV_PRECISION_QUARTER_PEL = 0x03 /**< Quarter-Pel motion vector precision */
+} NV_ENC_MV_PRECISION;
+
+
+/**
+ * Input buffer formats
+ */
+typedef enum _NV_ENC_BUFFER_FORMAT
+{
+ NV_ENC_BUFFER_FORMAT_UNDEFINED = 0x00000000, /**< Undefined buffer format */
+
+ NV_ENC_BUFFER_FORMAT_NV12 = 0x00000001, /**< Semi-Planar YUV [Y plane followed by interleaved UV plane] */
+ NV_ENC_BUFFER_FORMAT_YV12 = 0x00000010, /**< Planar YUV [Y plane followed by V and U planes] */
+ NV_ENC_BUFFER_FORMAT_IYUV = 0x00000100, /**< Planar YUV [Y plane followed by U and V planes] */
+ NV_ENC_BUFFER_FORMAT_YUV444 = 0x00001000, /**< Planar YUV [Y plane followed by U and V planes] */
+ NV_ENC_BUFFER_FORMAT_YUV420_10BIT = 0x00010000, /**< 10 bit Semi-Planar YUV [Y plane followed by interleaved UV plane]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
+ NV_ENC_BUFFER_FORMAT_YUV444_10BIT = 0x00100000, /**< 10 bit Planar YUV444 [Y plane followed by U and V planes]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
+ NV_ENC_BUFFER_FORMAT_ARGB = 0x01000000, /**< 8 bit Packed A8R8G8B8. This is a word-ordered format
+ where a pixel is represented by a 32-bit word with B
+ in the lowest 8 bits, G in the next 8 bits, R in the
+ 8 bits after that and A in the highest 8 bits. */
+ NV_ENC_BUFFER_FORMAT_ARGB10 = 0x02000000, /**< 10 bit Packed A2R10G10B10. This is a word-ordered format
+ where a pixel is represented by a 32-bit word with B
+ in the lowest 10 bits, G in the next 10 bits, R in the
+ 10 bits after that and A in the highest 2 bits. */
+ NV_ENC_BUFFER_FORMAT_AYUV = 0x04000000, /**< 8 bit Packed A8Y8U8V8. This is a word-ordered format
+ where a pixel is represented by a 32-bit word with V
+ in the lowest 8 bits, U in the next 8 bits, Y in the
+ 8 bits after that and A in the highest 8 bits. */
+ NV_ENC_BUFFER_FORMAT_ABGR = 0x10000000, /**< 8 bit Packed A8B8G8R8. This is a word-ordered format
+ where a pixel is represented by a 32-bit word with R
+ in the lowest 8 bits, G in the next 8 bits, B in the
+ 8 bits after that and A in the highest 8 bits. */
+ NV_ENC_BUFFER_FORMAT_ABGR10 = 0x20000000, /**< 10 bit Packed A2B10G10R10. This is a word-ordered format
+ where a pixel is represented by a 32-bit word with R
+ in the lowest 10 bits, G in the next 10 bits, B in the
+ 10 bits after that and A in the highest 2 bits. */
+ NV_ENC_BUFFER_FORMAT_U8 = 0x40000000, /**< Buffer format representing one-dimensional buffer.
+ This format should be used only when registering the
+ resource as output buffer, which will be used to write
+ the encoded bit stream or H.264 ME only mode output. */
+} NV_ENC_BUFFER_FORMAT;
+
+#define NV_ENC_BUFFER_FORMAT_NV12_PL NV_ENC_BUFFER_FORMAT_NV12
+#define NV_ENC_BUFFER_FORMAT_YV12_PL NV_ENC_BUFFER_FORMAT_YV12
+#define NV_ENC_BUFFER_FORMAT_IYUV_PL NV_ENC_BUFFER_FORMAT_IYUV
+#define NV_ENC_BUFFER_FORMAT_YUV444_PL NV_ENC_BUFFER_FORMAT_YUV444
+
+/**
+ * Encoding levels
+ */
+typedef enum _NV_ENC_LEVEL
+{
+ NV_ENC_LEVEL_AUTOSELECT = 0,
+
+ NV_ENC_LEVEL_H264_1 = 10,
+ NV_ENC_LEVEL_H264_1b = 9,
+ NV_ENC_LEVEL_H264_11 = 11,
+ NV_ENC_LEVEL_H264_12 = 12,
+ NV_ENC_LEVEL_H264_13 = 13,
+ NV_ENC_LEVEL_H264_2 = 20,
+ NV_ENC_LEVEL_H264_21 = 21,
+ NV_ENC_LEVEL_H264_22 = 22,
+ NV_ENC_LEVEL_H264_3 = 30,
+ NV_ENC_LEVEL_H264_31 = 31,
+ NV_ENC_LEVEL_H264_32 = 32,
+ NV_ENC_LEVEL_H264_4 = 40,
+ NV_ENC_LEVEL_H264_41 = 41,
+ NV_ENC_LEVEL_H264_42 = 42,
+ NV_ENC_LEVEL_H264_5 = 50,
+ NV_ENC_LEVEL_H264_51 = 51,
+ NV_ENC_LEVEL_H264_52 = 52,
+ NV_ENC_LEVEL_H264_60 = 60,
+ NV_ENC_LEVEL_H264_61 = 61,
+ NV_ENC_LEVEL_H264_62 = 62,
+
+ NV_ENC_LEVEL_HEVC_1 = 30,
+ NV_ENC_LEVEL_HEVC_2 = 60,
+ NV_ENC_LEVEL_HEVC_21 = 63,
+ NV_ENC_LEVEL_HEVC_3 = 90,
+ NV_ENC_LEVEL_HEVC_31 = 93,
+ NV_ENC_LEVEL_HEVC_4 = 120,
+ NV_ENC_LEVEL_HEVC_41 = 123,
+ NV_ENC_LEVEL_HEVC_5 = 150,
+ NV_ENC_LEVEL_HEVC_51 = 153,
+ NV_ENC_LEVEL_HEVC_52 = 156,
+ NV_ENC_LEVEL_HEVC_6 = 180,
+ NV_ENC_LEVEL_HEVC_61 = 183,
+ NV_ENC_LEVEL_HEVC_62 = 186,
+
+ NV_ENC_TIER_HEVC_MAIN = 0,
+ NV_ENC_TIER_HEVC_HIGH = 1,
+
+ NV_ENC_LEVEL_AV1_2 = 0,
+ NV_ENC_LEVEL_AV1_21 = 1,
+ NV_ENC_LEVEL_AV1_22 = 2,
+ NV_ENC_LEVEL_AV1_23 = 3,
+ NV_ENC_LEVEL_AV1_3 = 4,
+ NV_ENC_LEVEL_AV1_31 = 5,
+ NV_ENC_LEVEL_AV1_32 = 6,
+ NV_ENC_LEVEL_AV1_33 = 7,
+ NV_ENC_LEVEL_AV1_4 = 8,
+ NV_ENC_LEVEL_AV1_41 = 9,
+ NV_ENC_LEVEL_AV1_42 = 10,
+ NV_ENC_LEVEL_AV1_43 = 11,
+ NV_ENC_LEVEL_AV1_5 = 12,
+ NV_ENC_LEVEL_AV1_51 = 13,
+ NV_ENC_LEVEL_AV1_52 = 14,
+ NV_ENC_LEVEL_AV1_53 = 15,
+ NV_ENC_LEVEL_AV1_6 = 16,
+ NV_ENC_LEVEL_AV1_61 = 17,
+ NV_ENC_LEVEL_AV1_62 = 18,
+ NV_ENC_LEVEL_AV1_63 = 19,
+ NV_ENC_LEVEL_AV1_7 = 20,
+ NV_ENC_LEVEL_AV1_71 = 21,
+ NV_ENC_LEVEL_AV1_72 = 22,
+ NV_ENC_LEVEL_AV1_73 = 23,
+ NV_ENC_LEVEL_AV1_AUTOSELECT ,
+
+ NV_ENC_TIER_AV1_0 = 0,
+ NV_ENC_TIER_AV1_1 = 1
+} NV_ENC_LEVEL;
+
+/**
+ * Error Codes
+ */
+typedef enum _NVENCSTATUS
+{
+ /**
+ * This indicates that API call returned with no errors.
+ */
+ NV_ENC_SUCCESS,
+
+ /**
+ * This indicates that no encode capable devices were detected.
+ */
+ NV_ENC_ERR_NO_ENCODE_DEVICE,
+
+ /**
+ * This indicates that devices pass by the client is not supported.
+ */
+ NV_ENC_ERR_UNSUPPORTED_DEVICE,
+
+ /**
+ * This indicates that the encoder device supplied by the client is not
+ * valid.
+ */
+ NV_ENC_ERR_INVALID_ENCODERDEVICE,
+
+ /**
+ * This indicates that device passed to the API call is invalid.
+ */
+ NV_ENC_ERR_INVALID_DEVICE,
+
+ /**
+ * This indicates that device passed to the API call is no longer available and
+ * needs to be reinitialized. The clients need to destroy the current encoder
+ * session by freeing the allocated input output buffers and destroying the device
+ * and create a new encoding session.
+ */
+ NV_ENC_ERR_DEVICE_NOT_EXIST,
+
+ /**
+ * This indicates that one or more of the pointers passed to the API call
+ * is invalid.
+ */
+ NV_ENC_ERR_INVALID_PTR,
+
+ /**
+ * This indicates that completion event passed in ::NvEncEncodePicture() call
+ * is invalid.
+ */
+ NV_ENC_ERR_INVALID_EVENT,
+
+ /**
+ * This indicates that one or more of the parameter passed to the API call
+ * is invalid.
+ */
+ NV_ENC_ERR_INVALID_PARAM,
+
+ /**
+ * This indicates that an API call was made in wrong sequence/order.
+ */
+ NV_ENC_ERR_INVALID_CALL,
+
+ /**
+ * This indicates that the API call failed because it was unable to allocate
+ * enough memory to perform the requested operation.
+ */
+ NV_ENC_ERR_OUT_OF_MEMORY,
+
+ /**
+ * This indicates that the encoder has not been initialized with
+ * ::NvEncInitializeEncoder() or that initialization has failed.
+ * The client cannot allocate input or output buffers or do any encoding
+ * related operation before successfully initializing the encoder.
+ */
+ NV_ENC_ERR_ENCODER_NOT_INITIALIZED,
+
+ /**
+ * This indicates that an unsupported parameter was passed by the client.
+ */
+ NV_ENC_ERR_UNSUPPORTED_PARAM,
+
+ /**
+ * This indicates that the ::NvEncLockBitstream() failed to lock the output
+ * buffer. This happens when the client makes a non blocking lock call to
+ * access the output bitstream by passing NV_ENC_LOCK_BITSTREAM::doNotWait flag.
+ * This is not a fatal error and client should retry the same operation after
+ * few milliseconds.
+ */
+ NV_ENC_ERR_LOCK_BUSY,
+
+ /**
+ * This indicates that the size of the user buffer passed by the client is
+ * insufficient for the requested operation.
+ */
+ NV_ENC_ERR_NOT_ENOUGH_BUFFER,
+
+ /**
+ * This indicates that an invalid struct version was used by the client.
+ */
+ NV_ENC_ERR_INVALID_VERSION,
+
+ /**
+ * This indicates that ::NvEncMapInputResource() API failed to map the client
+ * provided input resource.
+ */
+ NV_ENC_ERR_MAP_FAILED,
+
+ /**
+ * This indicates encode driver requires more input buffers to produce an output
+ * bitstream. If this error is returned from ::NvEncEncodePicture() API, this
+ * is not a fatal error. If the client is encoding with B frames then,
+ * ::NvEncEncodePicture() API might be buffering the input frame for re-ordering.
+ *
+ * A client operating in synchronous mode cannot call ::NvEncLockBitstream()
+ * API on the output bitstream buffer if ::NvEncEncodePicture() returned the
+ * ::NV_ENC_ERR_NEED_MORE_INPUT error code.
+ * The client must continue providing input frames until encode driver returns
+ * ::NV_ENC_SUCCESS. After receiving ::NV_ENC_SUCCESS status the client can call
+ * ::NvEncLockBitstream() API on the output buffers in the same order in which
+ * it has called ::NvEncEncodePicture().
+ */
+ NV_ENC_ERR_NEED_MORE_INPUT,
+
+ /**
+ * This indicates that the HW encoder is busy encoding and is unable to encode
+ * the input. The client should call ::NvEncEncodePicture() again after few
+ * milliseconds.
+ */
+ NV_ENC_ERR_ENCODER_BUSY,
+
+ /**
+ * This indicates that the completion event passed in ::NvEncEncodePicture()
+ * API has not been registered with encoder driver using ::NvEncRegisterAsyncEvent().
+ */
+ NV_ENC_ERR_EVENT_NOT_REGISTERD,
+
+ /**
+ * This indicates that an unknown internal error has occurred.
+ */
+ NV_ENC_ERR_GENERIC,
+
+ /**
+ * This indicates that the client is attempting to use a feature
+ * that is not available for the license type for the current system.
+ */
+ NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY,
+
+ /**
+ * This indicates that the client is attempting to use a feature
+ * that is not implemented for the current version.
+ */
+ NV_ENC_ERR_UNIMPLEMENTED,
+
+ /**
+ * This indicates that the ::NvEncRegisterResource API failed to register the resource.
+ */
+ NV_ENC_ERR_RESOURCE_REGISTER_FAILED,
+
+ /**
+ * This indicates that the client is attempting to unregister a resource
+ * that has not been successfully registered.
+ */
+ NV_ENC_ERR_RESOURCE_NOT_REGISTERED,
+
+ /**
+ * This indicates that the client is attempting to unmap a resource
+ * that has not been successfully mapped.
+ */
+ NV_ENC_ERR_RESOURCE_NOT_MAPPED,
+
+} NVENCSTATUS;
+
+/**
+ * Encode Picture encode flags.
+ */
+typedef enum _NV_ENC_PIC_FLAGS
+{
+ NV_ENC_PIC_FLAG_FORCEINTRA = 0x1, /**< Encode the current picture as an Intra picture */
+ NV_ENC_PIC_FLAG_FORCEIDR = 0x2, /**< Encode the current picture as an IDR picture.
+ This flag is only valid when Picture type decision is taken by the Encoder
+ [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */
+ NV_ENC_PIC_FLAG_OUTPUT_SPSPPS = 0x4, /**< Write the sequence and picture header in encoded bitstream of the current picture */
+ NV_ENC_PIC_FLAG_EOS = 0x8, /**< Indicates end of the input stream */
+} NV_ENC_PIC_FLAGS;
+
+/**
+ * Memory heap to allocate input and output buffers.
+ */
+typedef enum _NV_ENC_MEMORY_HEAP
+{
+ NV_ENC_MEMORY_HEAP_AUTOSELECT = 0, /**< Memory heap to be decided by the encoder driver based on the usage */
+ NV_ENC_MEMORY_HEAP_VID = 1, /**< Memory heap is in local video memory */
+ NV_ENC_MEMORY_HEAP_SYSMEM_CACHED = 2, /**< Memory heap is in cached system memory */
+ NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED = 3 /**< Memory heap is in uncached system memory */
+} NV_ENC_MEMORY_HEAP;
+
+/**
+ * B-frame used as reference modes
+ */
+typedef enum _NV_ENC_BFRAME_REF_MODE
+{
+ NV_ENC_BFRAME_REF_MODE_DISABLED = 0x0, /**< B frame is not used for reference */
+ NV_ENC_BFRAME_REF_MODE_EACH = 0x1, /**< Each B-frame will be used for reference */
+ NV_ENC_BFRAME_REF_MODE_MIDDLE = 0x2, /**< Only(Number of B-frame)/2 th B-frame will be used for reference */
+} NV_ENC_BFRAME_REF_MODE;
+
+/**
+ * H.264 entropy coding modes.
+ */
+typedef enum _NV_ENC_H264_ENTROPY_CODING_MODE
+{
+ NV_ENC_H264_ENTROPY_CODING_MODE_AUTOSELECT = 0x0, /**< Entropy coding mode is auto selected by the encoder driver */
+ NV_ENC_H264_ENTROPY_CODING_MODE_CABAC = 0x1, /**< Entropy coding mode is CABAC */
+ NV_ENC_H264_ENTROPY_CODING_MODE_CAVLC = 0x2 /**< Entropy coding mode is CAVLC */
+} NV_ENC_H264_ENTROPY_CODING_MODE;
+
+/**
+ * H.264 specific BDirect modes
+ */
+typedef enum _NV_ENC_H264_BDIRECT_MODE
+{
+ NV_ENC_H264_BDIRECT_MODE_AUTOSELECT = 0x0, /**< BDirect mode is auto selected by the encoder driver */
+ NV_ENC_H264_BDIRECT_MODE_DISABLE = 0x1, /**< Disable BDirect mode */
+ NV_ENC_H264_BDIRECT_MODE_TEMPORAL = 0x2, /**< Temporal BDirect mode */
+ NV_ENC_H264_BDIRECT_MODE_SPATIAL = 0x3 /**< Spatial BDirect mode */
+} NV_ENC_H264_BDIRECT_MODE;
+
+/**
+ * H.264 specific FMO usage
+ */
+typedef enum _NV_ENC_H264_FMO_MODE
+{
+ NV_ENC_H264_FMO_AUTOSELECT = 0x0, /**< FMO usage is auto selected by the encoder driver */
+ NV_ENC_H264_FMO_ENABLE = 0x1, /**< Enable FMO */
+ NV_ENC_H264_FMO_DISABLE = 0x2, /**< Disable FMO */
+} NV_ENC_H264_FMO_MODE;
+
+/**
+ * H.264 specific Adaptive Transform modes
+ */
+typedef enum _NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE
+{
+ NV_ENC_H264_ADAPTIVE_TRANSFORM_AUTOSELECT = 0x0, /**< Adaptive Transform 8x8 mode is auto selected by the encoder driver*/
+ NV_ENC_H264_ADAPTIVE_TRANSFORM_DISABLE = 0x1, /**< Adaptive Transform 8x8 mode disabled */
+ NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE = 0x2, /**< Adaptive Transform 8x8 mode should be used */
+} NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE;
+
+/**
+ * Stereo frame packing modes.
+ */
+typedef enum _NV_ENC_STEREO_PACKING_MODE
+{
+ NV_ENC_STEREO_PACKING_MODE_NONE = 0x0, /**< No Stereo packing required */
+ NV_ENC_STEREO_PACKING_MODE_CHECKERBOARD = 0x1, /**< Checkerboard mode for packing stereo frames */
+ NV_ENC_STEREO_PACKING_MODE_COLINTERLEAVE = 0x2, /**< Column Interleave mode for packing stereo frames */
+ NV_ENC_STEREO_PACKING_MODE_ROWINTERLEAVE = 0x3, /**< Row Interleave mode for packing stereo frames */
+ NV_ENC_STEREO_PACKING_MODE_SIDEBYSIDE = 0x4, /**< Side-by-side mode for packing stereo frames */
+ NV_ENC_STEREO_PACKING_MODE_TOPBOTTOM = 0x5, /**< Top-Bottom mode for packing stereo frames */
+ NV_ENC_STEREO_PACKING_MODE_FRAMESEQ = 0x6 /**< Frame Sequential mode for packing stereo frames */
+} NV_ENC_STEREO_PACKING_MODE;
+
+/**
+ * Input Resource type
+ */
+typedef enum _NV_ENC_INPUT_RESOURCE_TYPE
+{
+ NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX = 0x0, /**< input resource type is a directx9 surface*/
+ NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR = 0x1, /**< input resource type is a cuda device pointer surface*/
+ NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY = 0x2, /**< input resource type is a cuda array surface.
+ This array must be a 2D array and the CUDA_ARRAY3D_SURFACE_LDST
+ flag must have been specified when creating it. */
+ NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX = 0x3 /**< input resource type is an OpenGL texture */
+} NV_ENC_INPUT_RESOURCE_TYPE;
+
+/**
+ * Buffer usage
+ */
+typedef enum _NV_ENC_BUFFER_USAGE
+{
+ NV_ENC_INPUT_IMAGE = 0x0, /**< Registered surface will be used for input image */
+ NV_ENC_OUTPUT_MOTION_VECTOR = 0x1, /**< Registered surface will be used for output of H.264 ME only mode.
+ This buffer usage type is not supported for HEVC ME only mode. */
+ NV_ENC_OUTPUT_BITSTREAM = 0x2, /**< Registered surface will be used for output bitstream in encoding */
+} NV_ENC_BUFFER_USAGE;
+
+/**
+ * Encoder Device type
+ */
+typedef enum _NV_ENC_DEVICE_TYPE
+{
+ NV_ENC_DEVICE_TYPE_DIRECTX = 0x0, /**< encode device type is a directx9 device */
+ NV_ENC_DEVICE_TYPE_CUDA = 0x1, /**< encode device type is a cuda device */
+ NV_ENC_DEVICE_TYPE_OPENGL = 0x2 /**< encode device type is an OpenGL device.
+ Use of this device type is supported only on Linux */
+} NV_ENC_DEVICE_TYPE;
+
+/**
+ * Number of reference frames
+ */
+typedef enum _NV_ENC_NUM_REF_FRAMES
+{
+ NV_ENC_NUM_REF_FRAMES_AUTOSELECT = 0x0, /**< Number of reference frames is auto selected by the encoder driver */
+ NV_ENC_NUM_REF_FRAMES_1 = 0x1, /**< Number of reference frames equal to 1 */
+ NV_ENC_NUM_REF_FRAMES_2 = 0x2, /**< Number of reference frames equal to 2 */
+ NV_ENC_NUM_REF_FRAMES_3 = 0x3, /**< Number of reference frames equal to 3 */
+ NV_ENC_NUM_REF_FRAMES_4 = 0x4, /**< Number of reference frames equal to 4 */
+ NV_ENC_NUM_REF_FRAMES_5 = 0x5, /**< Number of reference frames equal to 5 */
+ NV_ENC_NUM_REF_FRAMES_6 = 0x6, /**< Number of reference frames equal to 6 */
+ NV_ENC_NUM_REF_FRAMES_7 = 0x7 /**< Number of reference frames equal to 7 */
+} NV_ENC_NUM_REF_FRAMES;
+
+/**
+ * Encoder capabilities enumeration.
+ */
+typedef enum _NV_ENC_CAPS
+{
+ /**
+ * Maximum number of B-Frames supported.
+ */
+ NV_ENC_CAPS_NUM_MAX_BFRAMES,
+
+ /**
+ * Rate control modes supported.
+ * \n The API return value is a bitmask of the values in NV_ENC_PARAMS_RC_MODE.
+ */
+ NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES,
+
+ /**
+ * Indicates HW support for field mode encoding.
+ * \n 0 : Interlaced mode encoding is not supported.
+ * \n 1 : Interlaced field mode encoding is supported.
+ * \n 2 : Interlaced frame encoding and field mode encoding are both supported.
+ */
+ NV_ENC_CAPS_SUPPORT_FIELD_ENCODING,
+
+ /**
+ * Indicates HW support for monochrome mode encoding.
+ * \n 0 : Monochrome mode not supported.
+ * \n 1 : Monochrome mode supported.
+ */
+ NV_ENC_CAPS_SUPPORT_MONOCHROME,
+
+ /**
+ * Indicates HW support for FMO.
+ * \n 0 : FMO not supported.
+ * \n 1 : FMO supported.
+ */
+ NV_ENC_CAPS_SUPPORT_FMO,
+
+ /**
+ * Indicates HW capability for Quarter pel motion estimation.
+ * \n 0 : Quarter-Pel Motion Estimation not supported.
+ * \n 1 : Quarter-Pel Motion Estimation supported.
+ */
+ NV_ENC_CAPS_SUPPORT_QPELMV,
+
+ /**
+ * H.264 specific. Indicates HW support for BDirect modes.
+ * \n 0 : BDirect mode encoding not supported.
+ * \n 1 : BDirect mode encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_BDIRECT_MODE,
+
+ /**
+ * H264 specific. Indicates HW support for CABAC entropy coding mode.
+ * \n 0 : CABAC entropy coding not supported.
+ * \n 1 : CABAC entropy coding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_CABAC,
+
+ /**
+ * Indicates HW support for Adaptive Transform.
+ * \n 0 : Adaptive Transform not supported.
+ * \n 1 : Adaptive Transform supported.
+ */
+ NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM,
+
+ /**
+ * Indicates HW support for Multi View Coding.
+ * \n 0 : Multi View Coding not supported.
+ * \n 1 : Multi View Coding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_STEREO_MVC,
+
+ /**
+ * Indicates HW support for encoding Temporal layers.
+ * \n 0 : Encoding Temporal layers not supported.
+ * \n 1 : Encoding Temporal layers supported.
+ */
+ NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS,
+
+ /**
+ * Indicates HW support for Hierarchical P frames.
+ * \n 0 : Hierarchical P frames not supported.
+ * \n 1 : Hierarchical P frames supported.
+ */
+ NV_ENC_CAPS_SUPPORT_HIERARCHICAL_PFRAMES,
+
+ /**
+ * Indicates HW support for Hierarchical B frames.
+ * \n 0 : Hierarchical B frames not supported.
+ * \n 1 : Hierarchical B frames supported.
+ */
+ NV_ENC_CAPS_SUPPORT_HIERARCHICAL_BFRAMES,
+
+ /**
+ * Maximum Encoding level supported (See ::NV_ENC_LEVEL for details).
+ */
+ NV_ENC_CAPS_LEVEL_MAX,
+
+ /**
+ * Minimum Encoding level supported (See ::NV_ENC_LEVEL for details).
+ */
+ NV_ENC_CAPS_LEVEL_MIN,
+
+ /**
+ * Indicates HW support for separate colour plane encoding.
+ * \n 0 : Separate colour plane encoding not supported.
+ * \n 1 : Separate colour plane encoding supported.
+ */
+ NV_ENC_CAPS_SEPARATE_COLOUR_PLANE,
+
+ /**
+ * Maximum output width supported.
+ */
+ NV_ENC_CAPS_WIDTH_MAX,
+
+ /**
+ * Maximum output height supported.
+ */
+ NV_ENC_CAPS_HEIGHT_MAX,
+
+ /**
+ * Indicates Temporal Scalability Support.
+ * \n 0 : Temporal SVC encoding not supported.
+ * \n 1 : Temporal SVC encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_TEMPORAL_SVC,
+
+ /**
+ * Indicates Dynamic Encode Resolution Change Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Dynamic Encode Resolution Change not supported.
+ * \n 1 : Dynamic Encode Resolution Change supported.
+ */
+ NV_ENC_CAPS_SUPPORT_DYN_RES_CHANGE,
+
+ /**
+ * Indicates Dynamic Encode Bitrate Change Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Dynamic Encode bitrate change not supported.
+ * \n 1 : Dynamic Encode bitrate change supported.
+ */
+ NV_ENC_CAPS_SUPPORT_DYN_BITRATE_CHANGE,
+
+ /**
+ * Indicates Forcing Constant QP On The Fly Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Forcing constant QP on the fly not supported.
+ * \n 1 : Forcing constant QP on the fly supported.
+ */
+ NV_ENC_CAPS_SUPPORT_DYN_FORCE_CONSTQP,
+
+ /**
+ * Indicates Dynamic rate control mode Change Support.
+ * \n 0 : Dynamic rate control mode change not supported.
+ * \n 1 : Dynamic rate control mode change supported.
+ */
+ NV_ENC_CAPS_SUPPORT_DYN_RCMODE_CHANGE,
+
+ /**
+ * Indicates Subframe readback support for slice-based encoding. If this feature is supported, it can be enabled by setting enableSubFrameWrite = 1.
+ * \n 0 : Subframe readback not supported.
+ * \n 1 : Subframe readback supported.
+ */
+ NV_ENC_CAPS_SUPPORT_SUBFRAME_READBACK,
+
+ /**
+ * Indicates Constrained Encoding mode support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Constrained encoding mode not supported.
+ * \n 1 : Constrained encoding mode supported.
+ * If this mode is supported client can enable this during initialization.
+ * Client can then force a picture to be coded as constrained picture where
+ * in-loop filtering is disabled across slice boundaries and prediction vectors for inter
+ * macroblocks in each slice will be restricted to the slice region.
+ */
+ NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING,
+
+ /**
+ * Indicates Intra Refresh Mode Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Intra Refresh Mode not supported.
+ * \n 1 : Intra Refresh Mode supported.
+ */
+ NV_ENC_CAPS_SUPPORT_INTRA_REFRESH,
+
+ /**
+ * Indicates Custom VBV Buffer Size support. It can be used for capping frame size.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Custom VBV buffer size specification from client, not supported.
+ * \n 1 : Custom VBV buffer size specification from client, supported.
+ */
+ NV_ENC_CAPS_SUPPORT_CUSTOM_VBV_BUF_SIZE,
+
+ /**
+ * Indicates Dynamic Slice Mode Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Dynamic Slice Mode not supported.
+ * \n 1 : Dynamic Slice Mode supported.
+ */
+ NV_ENC_CAPS_SUPPORT_DYNAMIC_SLICE_MODE,
+
+ /**
+ * Indicates Reference Picture Invalidation Support.
+ * Support added from NvEncodeAPI version 2.0.
+ * \n 0 : Reference Picture Invalidation not supported.
+ * \n 1 : Reference Picture Invalidation supported.
+ */
+ NV_ENC_CAPS_SUPPORT_REF_PIC_INVALIDATION,
+
+ /**
+ * Indicates support for Pre-Processing.
+ * The API return value is a bitmask of the values defined in ::NV_ENC_PREPROC_FLAGS
+ */
+ NV_ENC_CAPS_PREPROC_SUPPORT,
+
+ /**
+ * Indicates support Async mode.
+ * \n 0 : Async Encode mode not supported.
+ * \n 1 : Async Encode mode supported.
+ */
+ NV_ENC_CAPS_ASYNC_ENCODE_SUPPORT,
+
+ /**
+ * Maximum MBs per frame supported.
+ */
+ NV_ENC_CAPS_MB_NUM_MAX,
+
+ /**
+ * Maximum aggregate throughput in MBs per sec.
+ */
+ NV_ENC_CAPS_MB_PER_SEC_MAX,
+
+ /**
+ * Indicates HW support for YUV444 mode encoding.
+ * \n 0 : YUV444 mode encoding not supported.
+ * \n 1 : YUV444 mode encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_YUV444_ENCODE,
+
+ /**
+ * Indicates HW support for lossless encoding.
+ * \n 0 : lossless encoding not supported.
+ * \n 1 : lossless encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE,
+
+ /**
+ * Indicates HW support for Sample Adaptive Offset.
+ * \n 0 : SAO not supported.
+ * \n 1 : SAO encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_SAO,
+
+ /**
+ * Indicates HW support for Motion Estimation Only Mode.
+ * \n 0 : MEOnly Mode not supported.
+ * \n 1 : MEOnly Mode supported for I and P frames.
+ * \n 2 : MEOnly Mode supported for I, P and B frames.
+ */
+ NV_ENC_CAPS_SUPPORT_MEONLY_MODE,
+
+ /**
+ * Indicates HW support for lookahead encoding (enableLookahead=1).
+ * \n 0 : Lookahead not supported.
+ * \n 1 : Lookahead supported.
+ */
+ NV_ENC_CAPS_SUPPORT_LOOKAHEAD,
+
+ /**
+ * Indicates HW support for temporal AQ encoding (enableTemporalAQ=1).
+ * \n 0 : Temporal AQ not supported.
+ * \n 1 : Temporal AQ supported.
+ */
+ NV_ENC_CAPS_SUPPORT_TEMPORAL_AQ,
+ /**
+ * Indicates HW support for 10 bit encoding.
+ * \n 0 : 10 bit encoding not supported.
+ * \n 1 : 10 bit encoding supported.
+ */
+ NV_ENC_CAPS_SUPPORT_10BIT_ENCODE,
+ /**
+ * Maximum number of Long Term Reference frames supported
+ */
+ NV_ENC_CAPS_NUM_MAX_LTR_FRAMES,
+
+ /**
+ * Indicates HW support for Weighted Prediction.
+ * \n 0 : Weighted Prediction not supported.
+ * \n 1 : Weighted Prediction supported.
+ */
+ NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION,
+
+
+ /**
+ * On managed (vGPU) platforms (Windows only), this API, in conjunction with other GRID Management APIs, can be used
+ * to estimate the residual capacity of the hardware encoder on the GPU as a percentage of the total available encoder capacity.
+ * This API can be called at any time; i.e. during the encode session or before opening the encode session.
+ * If the available encoder capacity is returned as zero, applications may choose to switch to software encoding
+ * and continue to call this API (e.g. polling once per second) until capacity becomes available.
+ *
+ * On bare metal (non-virtualized GPU) and linux platforms, this API always returns 100.
+ */
+ NV_ENC_CAPS_DYNAMIC_QUERY_ENCODER_CAPACITY,
+
+ /**
+ * Indicates B as reference support.
+ * \n 0 : B as reference is not supported.
+ * \n 1 : each B-Frame as reference is supported.
+ * \n 2 : only Middle B-frame as reference is supported.
+ */
+ NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE,
+
+ /**
+ * Indicates HW support for Emphasis Level Map based delta QP computation.
+ * \n 0 : Emphasis Level Map based delta QP not supported.
+ * \n 1 : Emphasis Level Map based delta QP is supported.
+ */
+ NV_ENC_CAPS_SUPPORT_EMPHASIS_LEVEL_MAP,
+
+ /**
+ * Minimum input width supported.
+ */
+ NV_ENC_CAPS_WIDTH_MIN,
+
+ /**
+ * Minimum input height supported.
+ */
+ NV_ENC_CAPS_HEIGHT_MIN,
+
+ /**
+ * Indicates HW support for multiple reference frames.
+ */
+ NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES,
+
+ /**
+ * Indicates HW support for HEVC with alpha encoding.
+ * \n 0 : HEVC with alpha encoding not supported.
+ * \n 1 : HEVC with alpha encoding is supported.
+ */
+ NV_ENC_CAPS_SUPPORT_ALPHA_LAYER_ENCODING,
+
+ /**
+ * Indicates number of Encoding engines present on GPU.
+ */
+ NV_ENC_CAPS_NUM_ENCODER_ENGINES,
+
+ /**
+ * Indicates single slice intra refresh support.
+ */
+ NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH,
+
+ /**
+ * Reserved - Not to be used by clients.
+ */
+ NV_ENC_CAPS_EXPOSED_COUNT
+
+} NV_ENC_CAPS;
+
+/**
+ * HEVC CU SIZE
+ */
+typedef enum _NV_ENC_HEVC_CUSIZE
+{
+ NV_ENC_HEVC_CUSIZE_AUTOSELECT = 0,
+ NV_ENC_HEVC_CUSIZE_8x8 = 1,
+ NV_ENC_HEVC_CUSIZE_16x16 = 2,
+ NV_ENC_HEVC_CUSIZE_32x32 = 3,
+ NV_ENC_HEVC_CUSIZE_64x64 = 4,
+}NV_ENC_HEVC_CUSIZE;
+
+/**
+* AV1 PART SIZE
+*/
+typedef enum _NV_ENC_AV1_PART_SIZE
+{
+ NV_ENC_AV1_PART_SIZE_AUTOSELECT = 0,
+ NV_ENC_AV1_PART_SIZE_4x4 = 1,
+ NV_ENC_AV1_PART_SIZE_8x8 = 2,
+ NV_ENC_AV1_PART_SIZE_16x16 = 3,
+ NV_ENC_AV1_PART_SIZE_32x32 = 4,
+ NV_ENC_AV1_PART_SIZE_64x64 = 5,
+}NV_ENC_AV1_PART_SIZE;
+
+/**
+* Enums related to fields in VUI parameters.
+*/
+typedef enum _NV_ENC_VUI_VIDEO_FORMAT
+{
+ NV_ENC_VUI_VIDEO_FORMAT_COMPONENT = 0,
+ NV_ENC_VUI_VIDEO_FORMAT_PAL = 1,
+ NV_ENC_VUI_VIDEO_FORMAT_NTSC = 2,
+ NV_ENC_VUI_VIDEO_FORMAT_SECAM = 3,
+ NV_ENC_VUI_VIDEO_FORMAT_MAC = 4,
+ NV_ENC_VUI_VIDEO_FORMAT_UNSPECIFIED = 5,
+}NV_ENC_VUI_VIDEO_FORMAT;
+
+typedef enum _NV_ENC_VUI_COLOR_PRIMARIES
+{
+ NV_ENC_VUI_COLOR_PRIMARIES_UNDEFINED = 0,
+ NV_ENC_VUI_COLOR_PRIMARIES_BT709 = 1,
+ NV_ENC_VUI_COLOR_PRIMARIES_UNSPECIFIED = 2,
+ NV_ENC_VUI_COLOR_PRIMARIES_RESERVED = 3,
+ NV_ENC_VUI_COLOR_PRIMARIES_BT470M = 4,
+ NV_ENC_VUI_COLOR_PRIMARIES_BT470BG = 5,
+ NV_ENC_VUI_COLOR_PRIMARIES_SMPTE170M = 6,
+ NV_ENC_VUI_COLOR_PRIMARIES_SMPTE240M = 7,
+ NV_ENC_VUI_COLOR_PRIMARIES_FILM = 8,
+ NV_ENC_VUI_COLOR_PRIMARIES_BT2020 = 9,
+ NV_ENC_VUI_COLOR_PRIMARIES_SMPTE428 = 10,
+ NV_ENC_VUI_COLOR_PRIMARIES_SMPTE431 = 11,
+ NV_ENC_VUI_COLOR_PRIMARIES_SMPTE432 = 12,
+ NV_ENC_VUI_COLOR_PRIMARIES_JEDEC_P22 = 22,
+}NV_ENC_VUI_COLOR_PRIMARIES;
+
+typedef enum _NV_ENC_VUI_TRANSFER_CHARACTERISTIC
+{
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_UNDEFINED = 0,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT709 = 1,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_UNSPECIFIED = 2,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_RESERVED = 3,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT470M = 4,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT470BG = 5,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE170M = 6,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE240M = 7,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_LINEAR = 8,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_LOG = 9,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_LOG_SQRT = 10,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_IEC61966_2_4 = 11,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT1361_ECG = 12,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SRGB = 13,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT2020_10 = 14,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT2020_12 = 15,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE2084 = 16,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE428 = 17,
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC_ARIB_STD_B67 = 18,
+}NV_ENC_VUI_TRANSFER_CHARACTERISTIC;
+
+typedef enum _NV_ENC_VUI_MATRIX_COEFFS
+{
+ NV_ENC_VUI_MATRIX_COEFFS_RGB = 0,
+ NV_ENC_VUI_MATRIX_COEFFS_BT709 = 1,
+ NV_ENC_VUI_MATRIX_COEFFS_UNSPECIFIED = 2,
+ NV_ENC_VUI_MATRIX_COEFFS_RESERVED = 3,
+ NV_ENC_VUI_MATRIX_COEFFS_FCC = 4,
+ NV_ENC_VUI_MATRIX_COEFFS_BT470BG = 5,
+ NV_ENC_VUI_MATRIX_COEFFS_SMPTE170M = 6,
+ NV_ENC_VUI_MATRIX_COEFFS_SMPTE240M = 7,
+ NV_ENC_VUI_MATRIX_COEFFS_YCGCO = 8,
+ NV_ENC_VUI_MATRIX_COEFFS_BT2020_NCL = 9,
+ NV_ENC_VUI_MATRIX_COEFFS_BT2020_CL = 10,
+ NV_ENC_VUI_MATRIX_COEFFS_SMPTE2085 = 11,
+}NV_ENC_VUI_MATRIX_COEFFS;
+
+/**
+ * Input struct for querying Encoding capabilities.
+ */
+typedef struct _NV_ENC_CAPS_PARAM
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CAPS_PARAM_VER */
+ NV_ENC_CAPS capsToQuery; /**< [in]: Specifies the encode capability to be queried. Client should pass a member for ::NV_ENC_CAPS enum. */
+ uint32_t reserved[62]; /**< [in]: Reserved and must be set to 0 */
+} NV_ENC_CAPS_PARAM;
+
+/** NV_ENC_CAPS_PARAM struct version. */
+#define NV_ENC_CAPS_PARAM_VER NVENCAPI_STRUCT_VERSION(1)
+
+
+/**
+ * Encoder Output parameters
+ */
+typedef struct _NV_ENC_ENCODE_OUT_PARAMS
+{
+ uint32_t version; /**< [out]: Struct version. */
+ uint32_t bitstreamSizeInBytes; /**< [out]: Encoded bitstream size in bytes */
+ uint32_t reserved[62]; /**< [out]: Reserved and must be set to 0 */
+} NV_ENC_ENCODE_OUT_PARAMS;
+
+/** NV_ENC_ENCODE_OUT_PARAMS struct version. */
+#define NV_ENC_ENCODE_OUT_PARAMS_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * Creation parameters for input buffer.
+ */
+typedef struct _NV_ENC_CREATE_INPUT_BUFFER
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_INPUT_BUFFER_VER */
+ uint32_t width; /**< [in]: Input frame width */
+ uint32_t height; /**< [in]: Input frame height */
+ NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */
+ NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Input buffer format */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
+ NV_ENC_INPUT_PTR inputBuffer; /**< [out]: Pointer to input buffer */
+ void* pSysMemBuffer; /**< [in]: Pointer to existing system memory buffer */
+ uint32_t reserved1[57]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CREATE_INPUT_BUFFER;
+
+/** NV_ENC_CREATE_INPUT_BUFFER struct version. */
+#define NV_ENC_CREATE_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * Creation parameters for output bitstream buffer.
+ */
+typedef struct _NV_ENC_CREATE_BITSTREAM_BUFFER
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_BITSTREAM_BUFFER_VER */
+ uint32_t size; /**< [in]: Deprecated. Do not use */
+ NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
+ NV_ENC_OUTPUT_PTR bitstreamBuffer; /**< [out]: Pointer to the output bitstream buffer */
+ void* bitstreamBufferPtr; /**< [out]: Reserved and should not be used */
+ uint32_t reserved1[58]; /**< [in]: Reserved and should be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and should be set to NULL */
+} NV_ENC_CREATE_BITSTREAM_BUFFER;
+
+/** NV_ENC_CREATE_BITSTREAM_BUFFER struct version. */
+#define NV_ENC_CREATE_BITSTREAM_BUFFER_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * Structs needed for ME only mode.
+ */
+typedef struct _NV_ENC_MVECTOR
+{
+ int16_t mvx; /**< the x component of MV in quarter-pel units */
+ int16_t mvy; /**< the y component of MV in quarter-pel units */
+} NV_ENC_MVECTOR;
+
+/**
+ * Motion vector structure per macroblock for H264 motion estimation.
+ */
+typedef struct _NV_ENC_H264_MV_DATA
+{
+ NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors for 8x8 partition */
+ uint8_t mbType; /**< 0 (I), 1 (P), 2 (IPCM), 3 (B) */
+ uint8_t partitionType; /**< Specifies the block partition type. 0:16x16, 1:8x8, 2:16x8, 3:8x16 */
+ uint16_t reserved; /**< reserved padding for alignment */
+ uint32_t mbCost;
+} NV_ENC_H264_MV_DATA;
+
+/**
+ * Motion vector structure per CU for HEVC motion estimation.
+ */
+typedef struct _NV_ENC_HEVC_MV_DATA
+{
+ NV_ENC_MVECTOR mv[4]; /**< up to 4 vectors within a CU */
+ uint8_t cuType; /**< 0 (I), 1(P) */
+ uint8_t cuSize; /**< 0: 8x8, 1: 16x16, 2: 32x32, 3: 64x64 */
+ uint8_t partitionMode; /**< The CU partition mode
+ 0 (2Nx2N), 1 (2NxN), 2(Nx2N), 3 (NxN),
+ 4 (2NxnU), 5 (2NxnD), 6(nLx2N), 7 (nRx2N) */
+ uint8_t lastCUInCTB; /**< Marker to separate CUs in the current CTB from CUs in the next CTB */
+} NV_ENC_HEVC_MV_DATA;
+
+/**
+ * Creation parameters for output motion vector buffer for ME only mode.
+ */
+typedef struct _NV_ENC_CREATE_MV_BUFFER
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_CREATE_MV_BUFFER_VER */
+ NV_ENC_OUTPUT_PTR mvBuffer; /**< [out]: Pointer to the output motion vector buffer */
+ uint32_t reserved1[255]; /**< [in]: Reserved and should be set to 0 */
+ void* reserved2[63]; /**< [in]: Reserved and should be set to NULL */
+} NV_ENC_CREATE_MV_BUFFER;
+
+/** NV_ENC_CREATE_MV_BUFFER struct version*/
+#define NV_ENC_CREATE_MV_BUFFER_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * QP value for frames
+ */
+typedef struct _NV_ENC_QP
+{
+ uint32_t qpInterP; /**< [in]: Specifies QP value for P-frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */
+ uint32_t qpInterB; /**< [in]: Specifies QP value for B-frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */
+ uint32_t qpIntra; /**< [in]: Specifies QP value for Intra Frame. Even though this field is uint32_t for legacy reasons, the client should treat this as a signed parameter(int32_t) for cases in which negative QP values are to be specified. */
+} NV_ENC_QP;
+
+/**
+ * Rate Control Configuration Parameters
+ */
+ typedef struct _NV_ENC_RC_PARAMS
+ {
+ uint32_t version;
+ NV_ENC_PARAMS_RC_MODE rateControlMode; /**< [in]: Specifies the rate control mode. Check support for various rate control modes using ::NV_ENC_CAPS_SUPPORTED_RATECONTROL_MODES caps. */
+ NV_ENC_QP constQP; /**< [in]: Specifies the initial QP to be used for encoding, these values would be used for all frames if in Constant QP mode. */
+ uint32_t averageBitRate; /**< [in]: Specifies the average bitrate(in bits/sec) used for encoding. */
+ uint32_t maxBitRate; /**< [in]: Specifies the maximum bitrate for the encoded output. This is used for VBR and ignored for CBR mode. */
+ uint32_t vbvBufferSize; /**< [in]: Specifies the VBV(HRD) buffer size. in bits. Set 0 to use the default VBV buffer size. */
+ uint32_t vbvInitialDelay; /**< [in]: Specifies the VBV(HRD) initial delay in bits. Set 0 to use the default VBV initial delay .*/
+ uint32_t enableMinQP :1; /**< [in]: Set this to 1 if minimum QP used for rate control. */
+ uint32_t enableMaxQP :1; /**< [in]: Set this to 1 if maximum QP used for rate control. */
+ uint32_t enableInitialRCQP :1; /**< [in]: Set this to 1 if user supplied initial QP is used for rate control. */
+ uint32_t enableAQ :1; /**< [in]: Set this to 1 to enable adaptive quantization (Spatial). */
+ uint32_t reservedBitField1 :1; /**< [in]: Reserved bitfields and must be set to 0. */
+ uint32_t enableLookahead :1; /**< [in]: Set this to 1 to enable lookahead with depth <lookaheadDepth> (if lookahead is enabled, input frames must remain available to the encoder until encode completion) */
+ uint32_t disableIadapt :1; /**< [in]: Set this to 1 to disable adaptive I-frame insertion at scene cuts (only has an effect when lookahead is enabled) */
+ uint32_t disableBadapt :1; /**< [in]: Set this to 1 to disable adaptive B-frame decision (only has an effect when lookahead is enabled) */
+ uint32_t enableTemporalAQ :1; /**< [in]: Set this to 1 to enable temporal AQ */
+ uint32_t zeroReorderDelay :1; /**< [in]: Set this to 1 to indicate zero latency operation (no reordering delay, num_reorder_frames=0) */
+ uint32_t enableNonRefP :1; /**< [in]: Set this to 1 to enable automatic insertion of non-reference P-frames (no effect if enablePTD=0) */
+ uint32_t strictGOPTarget :1; /**< [in]: Set this to 1 to minimize GOP-to-GOP rate fluctuations */
+ uint32_t aqStrength :4; /**< [in]: When AQ (Spatial) is enabled (i.e. NV_ENC_RC_PARAMS::enableAQ is set), this field is used to specify AQ strength. AQ strength scale is from 1 (low) - 15 (aggressive).
+ If not set, strength is auto selected by driver. */
+ uint32_t reservedBitFields :16; /**< [in]: Reserved bitfields and must be set to 0 */
+ NV_ENC_QP minQP; /**< [in]: Specifies the minimum QP used for rate control. Client must set NV_ENC_CONFIG::enableMinQP to 1. */
+ NV_ENC_QP maxQP; /**< [in]: Specifies the maximum QP used for rate control. Client must set NV_ENC_CONFIG::enableMaxQP to 1. */
+ NV_ENC_QP initialRCQP; /**< [in]: Specifies the initial QP used for rate control. Client must set NV_ENC_CONFIG::enableInitialRCQP to 1. */
+ uint32_t temporallayerIdxMask; /**< [in]: Specifies the temporal layers (as a bitmask) whose QPs have changed. Valid max bitmask is [2^NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS - 1].
+ Applicable only for constant QP mode (NV_ENC_RC_PARAMS::rateControlMode = NV_ENC_PARAMS_RC_CONSTQP). */
+ uint8_t temporalLayerQP[8]; /**< [in]: Specifies the temporal layer QPs used for rate control. Temporal layer index is used as the array index.
+ Applicable only for constant QP mode (NV_ENC_RC_PARAMS::rateControlMode = NV_ENC_PARAMS_RC_CONSTQP). */
+ uint8_t targetQuality; /**< [in]: Target CQ (Constant Quality) level for VBR mode (range 0-51 with 0-automatic) */
+ uint8_t targetQualityLSB; /**< [in]: Fractional part of target quality (as 8.8 fixed point format) */
+ uint16_t lookaheadDepth; /**< [in]: Maximum depth of lookahead with range 0-(31 - number of B frames).
+ lookaheadDepth is only used if enableLookahead=1.*/
+ uint8_t lowDelayKeyFrameScale; /**< [in]: Specifies the ratio of I frame bits to P frame bits in case of single frame VBV and CBR rate control mode,
+ is set to 2 by default for low latency tuning info and 1 by default for ultra low latency tuning info */
+ int8_t yDcQPIndexOffset; /**< [in]: Specifies the value of 'deltaQ_y_dc' in AV1.*/
+ int8_t uDcQPIndexOffset; /**< [in]: Specifies the value of 'deltaQ_u_dc' in AV1.*/
+ int8_t vDcQPIndexOffset; /**< [in]: Specifies the value of 'deltaQ_v_dc' in AV1 (for future use only - deltaQ_v_dc is currently always internally set to same value as deltaQ_u_dc). */
+ NV_ENC_QP_MAP_MODE qpMapMode; /**< [in]: This flag is used to interpret values in array specified by NV_ENC_PIC_PARAMS::qpDeltaMap.
+ Set this to NV_ENC_QP_MAP_EMPHASIS to treat values specified by NV_ENC_PIC_PARAMS::qpDeltaMap as Emphasis Level Map.
+ Emphasis Level can be assigned any value specified in enum NV_ENC_EMPHASIS_MAP_LEVEL.
+ Emphasis Level Map is used to specify regions to be encoded at varying levels of quality.
+ The hardware encoder adjusts the quantization within the image as per the provided emphasis map,
+ by adjusting the quantization parameter (QP) assigned to each macroblock. This adjustment is commonly called "Delta QP".
+ The adjustment depends on the absolute QP decided by the rate control algorithm, and is applied after the rate control has decided each macroblock's QP.
+ Since the Delta QP overrides rate control, enabling Emphasis Level Map may violate bitrate and VBV buffer size constraints.
+ Emphasis Level Map is useful in situations where client has a priori knowledge of the image complexity (e.g. via use of NVFBC's Classification feature) and encoding those high-complexity areas at higher quality (lower QP) is important, even at the possible cost of violating bitrate/VBV buffer size constraints
+ This feature is not supported when AQ( Spatial/Temporal) is enabled.
+ This feature is only supported for H264 codec currently.
+
+ Set this to NV_ENC_QP_MAP_DELTA to treat values specified by NV_ENC_PIC_PARAMS::qpDeltaMap as QP Delta. This specifies QP modifier to be applied on top of the QP chosen by rate control
+
+ Set this to NV_ENC_QP_MAP_DISABLED to ignore NV_ENC_PIC_PARAMS::qpDeltaMap values. In this case, qpDeltaMap should be set to NULL.
+
+ Other values are reserved for future use.*/
+ NV_ENC_MULTI_PASS multiPass; /**< [in]: This flag is used to enable multi-pass encoding for a given ::NV_ENC_PARAMS_RC_MODE. This flag is not valid for H264 and HEVC MEOnly mode */
+ uint32_t alphaLayerBitrateRatio; /**< [in]: Specifies the ratio in which bitrate should be split between base and alpha layer. A value 'x' for this field will split the target bitrate in a ratio of x : 1 between base and alpha layer.
+ The default split ratio is 15.*/
+ int8_t cbQPIndexOffset; /**< [in]: Specifies the value of 'chroma_qp_index_offset' in H264 / 'pps_cb_qp_offset' in HEVC / 'deltaQ_u_ac' in AV1.*/
+ int8_t crQPIndexOffset; /**< [in]: Specifies the value of 'second_chroma_qp_index_offset' in H264 / 'pps_cr_qp_offset' in HEVC / 'deltaQ_v_ac' in AV1 (for future use only - deltaQ_v_ac is currently always internally set to same value as deltaQ_u_ac). */
+ uint16_t reserved2;
+ uint32_t reserved[4];
+ } NV_ENC_RC_PARAMS;
+
+/** macro for constructing the version field of ::_NV_ENC_RC_PARAMS */
+#define NV_ENC_RC_PARAMS_VER NVENCAPI_STRUCT_VERSION(1)
+
+#define MAX_NUM_CLOCK_TS 3
+
+/**
+* Clock Timestamp set parameters
+* For H264, this structure is used to populate Picture Timing SEI when NV_ENC_CONFIG_H264::enableTimeCode is set to 1.
+* For HEVC, this structure is used to populate Time Code SEI when NV_ENC_CONFIG_HEVC::enableTimeCodeSEI is set to 1.
+* For more details, refer to Annex D of ITU-T Specification.
+*/
+
+typedef struct _NV_ENC_CLOCK_TIMESTAMP_SET
+{
+ uint32_t countingType : 1; /**< [in] Specifies the 'counting_type' */
+ uint32_t discontinuityFlag : 1; /**< [in] Specifies the 'discontinuity_flag' */
+ uint32_t cntDroppedFrames : 1; /**< [in] Specifies the 'cnt_dropped_flag' */
+ uint32_t nFrames : 8; /**< [in] Specifies the value of 'n_frames' */
+ uint32_t secondsValue : 6; /**< [in] Specifies the 'seconds_value' */
+ uint32_t minutesValue : 6; /**< [in] Specifies the 'minutes_value' */
+ uint32_t hoursValue : 5; /**< [in] Specifies the 'hours_value' */
+ uint32_t reserved2 : 4; /**< [in] Reserved and must be set to 0 */
+ uint32_t timeOffset; /**< [in] Specifies the 'time_offset_value' */
+} NV_ENC_CLOCK_TIMESTAMP_SET;
+
+typedef struct _NV_ENC_TIME_CODE
+{
+ NV_ENC_DISPLAY_PIC_STRUCT displayPicStruct; /**< [in] Display picStruct */
+ NV_ENC_CLOCK_TIMESTAMP_SET clockTimestamp[MAX_NUM_CLOCK_TS]; /**< [in] Clock Timestamp set */
+} NV_ENC_TIME_CODE;
+
+
+/**
+ * \struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS
+ * H264 Video Usability Info parameters
+ */
+typedef struct _NV_ENC_CONFIG_H264_VUI_PARAMETERS
+{
+ uint32_t overscanInfoPresentFlag; /**< [in]: If set to 1 , it specifies that the overscanInfo is present */
+ uint32_t overscanInfo; /**< [in]: Specifies the overscan info(as defined in Annex E of the ITU-T Specification). */
+ uint32_t videoSignalTypePresentFlag; /**< [in]: If set to 1, it specifies that the videoFormat, videoFullRangeFlag and colourDescriptionPresentFlag are present. */
+ NV_ENC_VUI_VIDEO_FORMAT videoFormat; /**< [in]: Specifies the source video format(as defined in Annex E of the ITU-T Specification).*/
+ uint32_t videoFullRangeFlag; /**< [in]: Specifies the output range of the luma and chroma samples(as defined in Annex E of the ITU-T Specification). */
+ uint32_t colourDescriptionPresentFlag; /**< [in]: If set to 1, it specifies that the colourPrimaries, transferCharacteristics and colourMatrix are present. */
+ NV_ENC_VUI_COLOR_PRIMARIES colourPrimaries; /**< [in]: Specifies color primaries for converting to RGB(as defined in Annex E of the ITU-T Specification) */
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC transferCharacteristics; /**< [in]: Specifies the opto-electronic transfer characteristics to use (as defined in Annex E of the ITU-T Specification) */
+ NV_ENC_VUI_MATRIX_COEFFS colourMatrix; /**< [in]: Specifies the matrix coefficients used in deriving the luma and chroma from the RGB primaries (as defined in Annex E of the ITU-T Specification). */
+ uint32_t chromaSampleLocationFlag; /**< [in]: If set to 1 , it specifies that the chromaSampleLocationTop and chromaSampleLocationBot are present.*/
+ uint32_t chromaSampleLocationTop; /**< [in]: Specifies the chroma sample location for top field(as defined in Annex E of the ITU-T Specification) */
+ uint32_t chromaSampleLocationBot; /**< [in]: Specifies the chroma sample location for bottom field(as defined in Annex E of the ITU-T Specification) */
+ uint32_t bitstreamRestrictionFlag; /**< [in]: If set to 1, it specifies the bitstream restriction parameters are present in the bitstream.*/
+ uint32_t timingInfoPresentFlag; /**< [in]: If set to 1, it specifies that the timingInfo is present and the 'numUnitInTicks' and 'timeScale' fields are specified by the application. */
+ /**< [in]: If not set, the timingInfo may still be present with timing related fields calculated internally basedon the frame rate specified by the application. */
+ uint32_t numUnitInTicks; /**< [in]: Specifies the number of time units of the clock(as defined in Annex E of the ITU-T Specification). */
+ uint32_t timeScale; /**< [in]: Specifies the frquency of the clock(as defined in Annex E of the ITU-T Specification). */
+ uint32_t reserved[12]; /**< [in]: Reserved and must be set to 0 */
+}NV_ENC_CONFIG_H264_VUI_PARAMETERS;
+
+typedef NV_ENC_CONFIG_H264_VUI_PARAMETERS NV_ENC_CONFIG_HEVC_VUI_PARAMETERS;
+
+/**
+ * \struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
+ * External motion vector hint counts per block type.
+ * H264 and AV1 support multiple hint while HEVC supports one hint for each valid candidate.
+ */
+typedef struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
+{
+ uint32_t numCandsPerBlk16x16 : 4; /**< [in]: Supported for H264, HEVC. It Specifies the number of candidates per 16x16 block. */
+ uint32_t numCandsPerBlk16x8 : 4; /**< [in]: Supported for H264 only. Specifies the number of candidates per 16x8 block. */
+ uint32_t numCandsPerBlk8x16 : 4; /**< [in]: Supported for H264 only. Specifies the number of candidates per 8x16 block. */
+ uint32_t numCandsPerBlk8x8 : 4; /**< [in]: Supported for H264, HEVC. Specifies the number of candidates per 8x8 block. */
+ uint32_t numCandsPerSb : 8; /**< [in]: Supported for AV1 only. Specifies the number of candidates per SB. */
+ uint32_t reserved : 8; /**< [in]: Reserved for padding. */
+ uint32_t reserved1[3]; /**< [in]: Reserved for future use. */
+} NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE;
+
+
+/**
+ * \struct _NVENC_EXTERNAL_ME_HINT
+ * External Motion Vector hint structure for H264 and HEVC.
+ */
+typedef struct _NVENC_EXTERNAL_ME_HINT
+{
+ int32_t mvx : 12; /**< [in]: Specifies the x component of integer pixel MV (relative to current MB) S12.0. */
+ int32_t mvy : 10; /**< [in]: Specifies the y component of integer pixel MV (relative to current MB) S10.0 .*/
+ int32_t refidx : 5; /**< [in]: Specifies the reference index (31=invalid). Current we support only 1 reference frame per direction for external hints, so \p refidx must be 0. */
+ int32_t dir : 1; /**< [in]: Specifies the direction of motion estimation . 0=L0 1=L1.*/
+ int32_t partType : 2; /**< [in]: Specifies the block partition type.0=16x16 1=16x8 2=8x16 3=8x8 (blocks in partition must be consecutive).*/
+ int32_t lastofPart : 1; /**< [in]: Set to 1 for the last MV of (sub) partition */
+ int32_t lastOfMB : 1; /**< [in]: Set to 1 for the last MV of macroblock. */
+} NVENC_EXTERNAL_ME_HINT;
+
+/**
+ * \struct _NVENC_EXTERNAL_ME_SB_HINT
+ * External Motion Vector SB hint structure for AV1
+ */
+typedef struct _NVENC_EXTERNAL_ME_SB_HINT
+{
+ int16_t refidx : 5; /**< [in]: Specifies the reference index (31=invalid) */
+ int16_t direction : 1; /**< [in]: Specifies the direction of motion estimation . 0=L0 1=L1.*/
+ int16_t bi : 1; /**< [in]: Specifies reference mode 0=single mv, 1=compound mv */
+ int16_t partition_type : 3; /**< [in]: Specifies the partition type: 0: 2NX2N, 1:2NxN, 2:Nx2N. reserved 3bits for future modes */
+ int16_t x8 : 3; /**< [in]: Specifies the current partition's top left x position in 8 pixel unit */
+ int16_t last_of_cu : 1; /**< [in]: Set to 1 for the last MV current CU */
+ int16_t last_of_sb : 1; /**< [in]: Set to 1 for the last MV of current SB */
+ int16_t reserved0 : 1; /**< [in]: Reserved and must be set to 0 */
+ int16_t mvx : 14; /**< [in]: Specifies the x component of integer pixel MV (relative to current MB) S12.2. */
+ int16_t cu_size : 2; /**< [in]: Specifies the CU size: 0: 8x8, 1: 16x16, 2:32x32, 3:64x64 */
+ int16_t mvy : 12; /**< [in]: Specifies the y component of integer pixel MV (relative to current MB) S10.2 .*/
+ int16_t y8 : 3; /**< [in]: Specifies the current partition's top left y position in 8 pixel unit */
+ int16_t reserved1 : 1; /**< [in]: Reserved and must be set to 0 */
+} NVENC_EXTERNAL_ME_SB_HINT;
+
+/**
+ * \struct _NV_ENC_CONFIG_H264
+ * H264 encoder configuration parameters
+ */
+typedef struct _NV_ENC_CONFIG_H264
+{
+ uint32_t enableTemporalSVC :1; /**< [in]: Set to 1 to enable SVC temporal*/
+ uint32_t enableStereoMVC :1; /**< [in]: Set to 1 to enable stereo MVC*/
+ uint32_t hierarchicalPFrames :1; /**< [in]: Set to 1 to enable hierarchical P Frames */
+ uint32_t hierarchicalBFrames :1; /**< [in]: Set to 1 to enable hierarchical B Frames */
+ uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set to 1 to write SEI buffering period syntax in the bitstream */
+ uint32_t outputPictureTimingSEI :1; /**< [in]: Set to 1 to write SEI picture timing syntax in the bitstream. */
+ uint32_t outputAUD :1; /**< [in]: Set to 1 to write access unit delimiter syntax in bitstream */
+ uint32_t disableSPSPPS :1; /**< [in]: Set to 1 to disable writing of Sequence and Picture parameter info in bitstream */
+ uint32_t outputFramePackingSEI :1; /**< [in]: Set to 1 to enable writing of frame packing arrangement SEI messages to bitstream */
+ uint32_t outputRecoveryPointSEI :1; /**< [in]: Set to 1 to enable writing of recovery point SEI message */
+ uint32_t enableIntraRefresh :1; /**< [in]: Set to 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */
+ uint32_t enableConstrainedEncoding :1; /**< [in]: Set this to 1 to enable constrainedFrame encoding where each slice in the constrained picture is independent of other slices.
+ Constrained encoding works only with rectangular slices.
+ Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */
+ uint32_t repeatSPSPPS :1; /**< [in]: Set to 1 to enable writing of Sequence and Picture parameter for every IDR frame */
+ uint32_t enableVFR :1; /**< [in]: Setting enableVFR=1 currently only sets the fixed_frame_rate_flag=0 in the VUI but otherwise
+ has no impact on the encoder behavior. For more details please refer to E.1 VUI syntax of H.264 standard. Note, however, that NVENC does not support VFR encoding and rate control. */
+ uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode.
+ LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1.
+ Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future.
+ LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting
+ ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode
+ for using LTR.
+ Note that LTRs are not supported if encoding session is configured with B-frames */
+ uint32_t qpPrimeYZeroTransformBypassFlag :1; /**< [in]: To enable lossless encode set this to 1, set QP to 0 and RC_mode to NV_ENC_PARAMS_RC_CONSTQP and profile to HIGH_444_PREDICTIVE_PROFILE.
+ Check support for lossless encoding using ::NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE caps. */
+ uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */
+ uint32_t enableFillerDataInsertion :1; /**< [in]: Set to 1 to enable insertion of filler data in the bitstream.
+ This flag will take effect only when one of the CBR rate
+ control modes (NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_HQ,
+ NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) is in use and both
+ NV_ENC_INITIALIZE_PARAMS::frameRateNum and
+ NV_ENC_INITIALIZE_PARAMS::frameRateDen are set to non-zero
+ values. Setting this field when
+ NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is also set
+ is currently not supported and will make ::NvEncInitializeEncoder()
+ return an error. */
+ uint32_t disableSVCPrefixNalu :1; /**< [in]: Set to 1 to disable writing of SVC Prefix NALU preceding each slice in bitstream.
+ Applicable only when temporal SVC is enabled (NV_ENC_CONFIG_H264::enableTemporalSVC = 1). */
+ uint32_t enableScalabilityInfoSEI :1; /**< [in]: Set to 1 to enable writing of Scalability Information SEI message preceding each IDR picture in bitstream
+ Applicable only when temporal SVC is enabled (NV_ENC_CONFIG_H264::enableTemporalSVC = 1). */
+ uint32_t singleSliceIntraRefresh :1; /**< [in]: Set to 1 to maintain single slice in frames during intra refresh.
+ Check support for single slice intra refresh using ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps.
+ This flag will be ignored if the value returned for ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps is false. */
+ uint32_t enableTimeCode :1; /**< [in]: Set to 1 to enable writing of clock timestamp sets in picture timing SEI. Note that this flag will be ignored for D3D12 interface. */
+ uint32_t reservedBitFields :10; /**< [in]: Reserved bitfields and must be set to 0 */
+ uint32_t level; /**< [in]: Specifies the encoding level. Client is recommended to set this to NV_ENC_LEVEL_AUTOSELECT in order to enable the NvEncodeAPI interface to select the correct level. */
+ uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG.Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */
+ uint32_t separateColourPlaneFlag; /**< [in]: Set to 1 to enable 4:4:4 separate colour planes */
+ uint32_t disableDeblockingFilterIDC; /**< [in]: Specifies the deblocking filter mode. Permissible value range: [0,2]. This flag corresponds
+ to the flag disable_deblocking_filter_idc specified in section 7.4.3 of H.264 specification,
+ which specifies whether the operation of the deblocking filter shall be disabled across some
+ block edges of the slice and specifies for which edges the filtering is disabled. See section
+ 7.4.3 of H.264 specification for more details.*/
+ uint32_t numTemporalLayers; /**< [in]: Specifies number of temporal layers to be used for hierarchical coding / temporal SVC. Valid value range is [1,::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS] */
+ uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */
+ uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */
+ NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE adaptiveTransformMode; /**< [in]: Specifies the AdaptiveTransform Mode. Check support for AdaptiveTransform mode using ::NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM caps. */
+ NV_ENC_H264_FMO_MODE fmoMode; /**< [in]: Specified the FMO Mode. Check support for FMO using ::NV_ENC_CAPS_SUPPORT_FMO caps. */
+ NV_ENC_H264_BDIRECT_MODE bdirectMode; /**< [in]: Specifies the BDirect mode. Check support for BDirect mode using ::NV_ENC_CAPS_SUPPORT_BDIRECT_MODE caps.*/
+ NV_ENC_H264_ENTROPY_CODING_MODE entropyCodingMode; /**< [in]: Specifies the entropy coding mode. Check support for CABAC mode using ::NV_ENC_CAPS_SUPPORT_CABAC caps. */
+ NV_ENC_STEREO_PACKING_MODE stereoMode; /**< [in]: Specifies the stereo frame packing mode which is to be signaled in frame packing arrangement SEI */
+ uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set.
+ Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */
+ uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */
+ uint32_t maxNumRefFrames; /**< [in]: Specifies the DPB size used for encoding. Setting it to 0 will let driver use the default DPB size.
+ The low latency application which wants to invalidate reference frame as an error resilience tool
+ is recommended to use a large DPB size so that the encoder can keep old reference frames which can be used if recent
+ frames are invalidated. */
+ uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices
+ sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3 numSlices in Picture.
+ When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting
+ When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */
+ uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For:
+ sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice)
+ sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice)
+ sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice)
+ sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
+ NV_ENC_CONFIG_H264_VUI_PARAMETERS h264VUIParameters; /**< [in]: Specifies the H264 video usability info parameters */
+ uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames. This parameter has different meaning in two LTR modes.
+ In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR.
+ In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB. */
+ uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_H264::enableLTR for description of the two modes.
+ Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may
+ be deprecated in future releases.
+ Set to 0 when using "LTR Per Picture" mode of LTR operation. */
+ uint32_t chromaFormatIDC; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input.
+ Check support for YUV444 encoding using ::NV_ENC_CAPS_SUPPORT_YUV444_ENCODE caps.*/
+ uint32_t maxTemporalLayers; /**< [in]: Specifies the max temporal layer used for temporal SVC / hierarchical coding.
+ Defaut value of this field is NV_ENC_CAPS::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS. Note that the value NV_ENC_CONFIG_H264::maxNumRefFrames should
+ be greater than or equal to (NV_ENC_CONFIG_H264::maxTemporalLayers - 2) * 2, for NV_ENC_CONFIG_H264::maxTemporalLayers >= 2.*/
+ NV_ENC_BFRAME_REF_MODE useBFramesAsRef; /**< [in]: Specifies the B-Frame as reference mode. Check support for useBFramesAsRef mode using ::NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE caps.*/
+ NV_ENC_NUM_REF_FRAMES numRefL0; /**< [in]: Specifies max number of reference frames in reference picture list L0, that can be used by hardware for prediction of a frame.
+ Check support for numRefL0 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */
+ NV_ENC_NUM_REF_FRAMES numRefL1; /**< [in]: Specifies max number of reference frames in reference picture list L1, that can be used by hardware for prediction of a frame.
+ Check support for numRefL1 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */
+
+ uint32_t reserved1[267]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG_H264;
+
+/**
+ * \struct _NV_ENC_CONFIG_HEVC
+ * HEVC encoder configuration parameters to be set during initialization.
+ */
+typedef struct _NV_ENC_CONFIG_HEVC
+{
+ uint32_t level; /**< [in]: Specifies the level of the encoded bitstream.*/
+ uint32_t tier; /**< [in]: Specifies the level tier of the encoded bitstream.*/
+ NV_ENC_HEVC_CUSIZE minCUSize; /**< [in]: Specifies the minimum size of luma coding unit.*/
+ NV_ENC_HEVC_CUSIZE maxCUSize; /**< [in]: Specifies the maximum size of luma coding unit. Currently NVENC SDK only supports maxCUSize equal to NV_ENC_HEVC_CUSIZE_32x32.*/
+ uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */
+ uint32_t disableDeblockAcrossSliceBoundary :1; /**< [in]: Set 1 to disable in loop filtering across slice boundary.*/
+ uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set 1 to write SEI buffering period syntax in the bitstream */
+ uint32_t outputPictureTimingSEI :1; /**< [in]: Set 1 to write SEI picture timing syntax in the bitstream */
+ uint32_t outputAUD :1; /**< [in]: Set 1 to write Access Unit Delimiter syntax. */
+ uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode.
+ LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1.
+ Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future releases.
+ LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting
+ ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode
+ for using LTR.
+ Note that LTRs are not supported if encoding session is configured with B-frames */
+ uint32_t disableSPSPPS :1; /**< [in]: Set 1 to disable VPS, SPS and PPS signaling in the bitstream. */
+ uint32_t repeatSPSPPS :1; /**< [in]: Set 1 to output VPS,SPS and PPS for every IDR frame.*/
+ uint32_t enableIntraRefresh :1; /**< [in]: Set 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */
+ uint32_t chromaFormatIDC :2; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input.*/
+ uint32_t pixelBitDepthMinus8 :3; /**< [in]: Specifies pixel bit depth minus 8. Should be set to 0 for 8 bit input, 2 for 10 bit input.*/
+ uint32_t enableFillerDataInsertion :1; /**< [in]: Set to 1 to enable insertion of filler data in the bitstream.
+ This flag will take effect only when one of the CBR rate
+ control modes (NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_HQ,
+ NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ) is in use and both
+ NV_ENC_INITIALIZE_PARAMS::frameRateNum and
+ NV_ENC_INITIALIZE_PARAMS::frameRateDen are set to non-zero
+ values. Setting this field when
+ NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is also set
+ is currently not supported and will make ::NvEncInitializeEncoder()
+ return an error. */
+ uint32_t enableConstrainedEncoding :1; /**< [in]: Set this to 1 to enable constrainedFrame encoding where each slice in the constrained picture is independent of other slices.
+ Constrained encoding works only with rectangular slices.
+ Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */
+ uint32_t enableAlphaLayerEncoding :1; /**< [in]: Set this to 1 to enable HEVC encode with alpha layer. */
+ uint32_t singleSliceIntraRefresh :1; /**< [in]: Set this to 1 to maintain single slice frames during intra refresh.
+ Check support for single slice intra refresh using ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps.
+ This flag will be ignored if the value returned for ::NV_ENC_CAPS_SINGLE_SLICE_INTRA_REFRESH caps is false. */
+ uint32_t outputRecoveryPointSEI :1; /**< [in]: Set to 1 to enable writing of recovery point SEI message */
+ uint32_t outputTimeCodeSEI :1; /**< [in]: Set 1 to write SEI time code syntax in the bitstream. Note that this flag will be ignored for D3D12 interface.*/
+ uint32_t reserved :12; /**< [in]: Reserved bitfields.*/
+ uint32_t idrPeriod; /**< [in]: Specifies the IDR interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG. Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */
+ uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set.
+ Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */
+ uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */
+ uint32_t maxNumRefFramesInDPB; /**< [in]: Specifies the maximum number of references frames in the DPB.*/
+ uint32_t ltrNumFrames; /**< [in]: This parameter has different meaning in two LTR modes.
+ In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR.
+ In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB.
+ These ltrNumFrames acts as a guidance to the encoder and are not necessarily honored. To achieve a right balance between the encoding
+ quality and keeping LTR frames in the DPB queue, the encoder can internally limit the number of LTR frames.
+ The number of LTR frames actually used depends upon the encoding preset being used; Faster encoding presets will use fewer LTR frames.*/
+ uint32_t vpsId; /**< [in]: Specifies the VPS id of the video parameter set */
+ uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */
+ uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */
+ uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices
+ sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture
+ When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */
+ uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For:
+ sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice)
+ sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice)
+ sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice)
+ sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
+ uint32_t maxTemporalLayersMinus1; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */
+ NV_ENC_CONFIG_HEVC_VUI_PARAMETERS hevcVUIParameters; /**< [in]: Specifies the HEVC video usability info parameters */
+ uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_HEVC::enableLTR for description of the two modes.
+ Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may
+ be deprecated in future releases.
+ Set to 0 when using "LTR Per Picture" mode of LTR operation. */
+ NV_ENC_BFRAME_REF_MODE useBFramesAsRef; /**< [in]: Specifies the B-Frame as reference mode. Check support for useBFramesAsRef mode using ::NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE caps.*/
+ NV_ENC_NUM_REF_FRAMES numRefL0; /**< [in]: Specifies max number of reference frames in reference picture list L0, that can be used by hardware for prediction of a frame.
+ Check support for numRefL0 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */
+ NV_ENC_NUM_REF_FRAMES numRefL1; /**< [in]: Specifies max number of reference frames in reference picture list L1, that can be used by hardware for prediction of a frame.
+ Check support for numRefL1 using ::NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES caps. */
+ uint32_t reserved1[214]; /**< [in]: Reserved and must be set to 0.*/
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG_HEVC;
+
+#define NV_MAX_TILE_COLS_AV1 64
+#define NV_MAX_TILE_ROWS_AV1 64
+
+/**
+ * \struct _NV_ENC_FILM_GRAIN_PARAMS_AV1
+ * AV1 Film Grain Parameters structure
+ */
+
+typedef struct _NV_ENC_FILM_GRAIN_PARAMS_AV1
+{
+ uint32_t applyGrain :1; /**< [in]: Set to 1 to specify film grain should be added to frame */
+ uint32_t chromaScalingFromLuma :1; /**< [in]: Set to 1 to specify the chroma scaling is inferred from luma scaling */
+ uint32_t overlapFlag :1; /**< [in]: Set to 1 to indicate that overlap between film grain blocks should be applied*/
+ uint32_t clipToRestrictedRange :1; /**< [in]: Set to 1 to clip values to restricted (studio) range after adding film grain */
+ uint32_t grainScalingMinus8 :2; /**< [in]: Represents the shift - 8 applied to the values of the chroma component */
+ uint32_t arCoeffLag :2; /**< [in]: Specifies the number of auto-regressive coefficients for luma and chroma */
+ uint32_t numYPoints :4; /**< [in]: Specifies the number of points for the piecewise linear scaling function of the luma component */
+ uint32_t numCbPoints :4; /**< [in]: Specifies the number of points for the piecewise linear scaling function of the cb component */
+ uint32_t numCrPoints :4; /**< [in]: Specifies the number of points for the piecewise linear scaling function of the cr component */
+ uint32_t arCoeffShiftMinus6 :2; /**< [in]: specifies the range of the auto-regressive coefficients */
+ uint32_t grainScaleShift :2; /**< [in]: Specifies how much the Gaussian random numbers should be scaled down during the grain synthesi process */
+ uint32_t reserved1 :8; /**< [in]: Reserved bits field - should be set to 0 */
+ uint8_t pointYValue[14]; /**< [in]: pointYValue[i]: x coordinate for i-th point of luma piecewise linear scaling function. Values on a scale of 0...255 */
+ uint8_t pointYScaling[14]; /**< [in]: pointYScaling[i]: i-th point output value of luma piecewise linear scaling function */
+ uint8_t pointCbValue[10]; /**< [in]: pointCbValue[i]: x coordinate for i-th point of cb piecewise linear scaling function. Values on a scale of 0...255 */
+ uint8_t pointCbScaling[10]; /**< [in]: pointCbScaling[i]: i-th point output value of cb piecewise linear scaling function */
+ uint8_t pointCrValue[10]; /**< [in]: pointCrValue[i]: x coordinate for i-th point of cr piecewise linear scaling function. Values on a scale of 0...255 */
+ uint8_t pointCrScaling[10]; /**< [in]: pointCrScaling[i]: i-th point output value of cr piecewise linear scaling function */
+ uint8_t arCoeffsYPlus128[24]; /**< [in]: Specifies auto-regressive coefficients used for the Y plane */
+ uint8_t arCoeffsCbPlus128[25]; /**< [in]: Specifies auto-regressive coefficients used for the U plane */
+ uint8_t arCoeffsCrPlus128[25]; /**< [in]: Specifies auto-regressive coefficients used for the V plane */
+ uint8_t reserved2[2]; /**< [in]: Reserved bytes - should be set to 0 */
+ uint8_t cbMult; /**< [in]: Represents a multiplier for the cb component used in derivation of the input index to the cb component scaling function */
+ uint8_t cbLumaMult; /**< [in]: represents a multiplier for the average luma component used in derivation of the input index to the cb component scaling function. */
+ uint16_t cbOffset; /**< [in]: Represents an offset used in derivation of the input index to the cb component scaling function */
+ uint8_t crMult; /**< [in]: Represents a multiplier for the cr component used in derivation of the input index to the cr component scaling function */
+ uint8_t crLumaMult; /**< [in]: represents a multiplier for the average luma component used in derivation of the input index to the cr component scaling function. */
+ uint16_t crOffset; /**< [in]: Represents an offset used in derivation of the input index to the cr component scaling function */
+} NV_ENC_FILM_GRAIN_PARAMS_AV1;
+
+/**
+* \struct _NV_ENC_CONFIG_AV1
+* AV1 encoder configuration parameters to be set during initialization.
+*/
+typedef struct _NV_ENC_CONFIG_AV1
+{
+ uint32_t level; /**< [in]: Specifies the level of the encoded bitstream.*/
+ uint32_t tier; /**< [in]: Specifies the level tier of the encoded bitstream.*/
+ NV_ENC_AV1_PART_SIZE minPartSize; /**< [in]: Specifies the minimum size of luma coding block partition.*/
+ NV_ENC_AV1_PART_SIZE maxPartSize; /**< [in]: Specifies the maximum size of luma coding block partition.*/
+ uint32_t outputAnnexBFormat : 1; /**< [in]: Set 1 to use Annex B format for bitstream output.*/
+ uint32_t enableTimingInfo : 1; /**< [in]: Set 1 to write Timing Info into sequence/frame headers */
+ uint32_t enableDecoderModelInfo : 1; /**< [in]: Set 1 to write Decoder Model Info into sequence/frame headers */
+ uint32_t enableFrameIdNumbers : 1; /**< [in]: Set 1 to write Frame id numbers in bitstream */
+ uint32_t disableSeqHdr : 1; /**< [in]: Set 1 to disable Sequence Header signaling in the bitstream. */
+ uint32_t repeatSeqHdr : 1; /**< [in]: Set 1 to output Sequence Header for every Key frame.*/
+ uint32_t enableIntraRefresh : 1; /**< [in]: Set 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */
+ uint32_t chromaFormatIDC : 2; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input (yuv444 input currently not supported).*/
+ uint32_t enableBitstreamPadding : 1; /**< [in]: Set 1 to enable bitstream padding. */
+ uint32_t enableCustomTileConfig : 1; /**< [in]: Set 1 to enable custom tile configuration: numTileColumns and numTileRows must have non zero values and tileWidths and tileHeights must point to a valid address */
+ uint32_t enableFilmGrainParams : 1; /**< [in]: Set 1 to enable custom film grain parameters: filmGrainParams must point to a valid address */
+ uint32_t inputPixelBitDepthMinus8 : 3; /**< [in]: Specifies pixel bit depth minus 8 of video input. Should be set to 0 for 8 bit input, 2 for 10 bit input.*/
+ uint32_t pixelBitDepthMinus8 : 3; /**< [in]: Specifies pixel bit depth minus 8 of encoded video. Should be set to 0 for 8 bit, 2 for 10 bit.
+ HW will do the bitdepth conversion internally from inputPixelBitDepthMinus8 -> pixelBitDepthMinus8 if bit dpeths differ
+ Support for 8 bit input to 10 bit encode conversion only */
+ uint32_t reserved : 14; /**< [in]: Reserved bitfields.*/
+ uint32_t idrPeriod; /**< [in]: Specifies the IDR/Key frame interval. If not set, this is made equal to gopLength in NV_ENC_CONFIG.Low latency application client can set IDR interval to NVENC_INFINITE_GOPLENGTH so that IDR frames are not inserted automatically. */
+ uint32_t intraRefreshPeriod; /**< [in]: Specifies the interval between successive intra refresh if enableIntrarefresh is set. Requires enableIntraRefresh to be set.
+ Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */
+ uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */
+ uint32_t maxNumRefFramesInDPB; /**< [in]: Specifies the maximum number of references frames in the DPB.*/
+ uint32_t numTileColumns; /**< [in]: This parameter in conjunction with the flag enableCustomTileConfig and the array tileWidths[] specifies the way in which the picture is divided into tile columns.
+ When enableCustomTileConfig == 0, the picture will be uniformly divided into numTileColumns tile columns. If numTileColumns is not a power of 2,
+ it will be rounded down to the next power of 2 value. If numTileColumns == 0, the picture will be coded with the smallest number of vertical tiles as allowed by standard.
+ When enableCustomTileConfig == 1, numTileColumns must be > 0 and <= NV_MAX_TILE_COLS_AV1 and tileWidths must point to a valid array of numTileColumns entries.
+ Entry i specifies the width in 64x64 CTU unit of tile colum i. The sum of all the entries should be equal to the picture width in 64x64 CTU units. */
+ uint32_t numTileRows; /**< [in]: This parameter in conjunction with the flag enableCustomTileConfig and the array tileHeights[] specifies the way in which the picture is divided into tiles rows
+ When enableCustomTileConfig == 0, the picture will be uniformly divided into numTileRows tile rows. If numTileRows is not a power of 2,
+ it will be rounded down to the next power of 2 value. If numTileRows == 0, the picture will be coded with the smallest number of horizontal tiles as allowed by standard.
+ When enableCustomTileConfig == 1, numTileRows must be > 0 and <= NV_MAX_TILE_ROWS_AV1 and tileHeights must point to a valid array of numTileRows entries.
+ Entry i specifies the height in 64x64 CTU unit of tile row i. The sum of all the entries should be equal to the picture hieght in 64x64 CTU units. */
+ uint32_t *tileWidths; /**< [in]: If enableCustomTileConfig == 1, tileWidths[i] specifies the width of tile column i in 64x64 CTU unit, with 0 <= i <= numTileColumns -1. */
+ uint32_t *tileHeights; /**< [in]: If enableCustomTileConfig == 1, tileHeights[i] specifies the height of tile row i in 64x64 CTU unit, with 0 <= i <= numTileRows -1. */
+ uint32_t maxTemporalLayersMinus1; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */
+ NV_ENC_VUI_COLOR_PRIMARIES colorPrimaries; /**< [in]: as defined in section of ISO/IEC 23091-4/ITU-T H.273 */
+ NV_ENC_VUI_TRANSFER_CHARACTERISTIC transferCharacteristics; /**< [in]: as defined in section of ISO/IEC 23091-4/ITU-T H.273 */
+ NV_ENC_VUI_MATRIX_COEFFS matrixCoefficients; /**< [in]: as defined in section of ISO/IEC 23091-4/ITU-T H.273 */
+ uint32_t colorRange; /**< [in]: 0: studio swing representation - 1: full swing representation */
+ uint32_t chromaSamplePosition; /**< [in]: 0: unknown
+ 1: Horizontally collocated with luma (0,0) sample, between two vertical samples
+ 2: Co-located with luma (0,0) sample */
+ NV_ENC_BFRAME_REF_MODE useBFramesAsRef; /**< [in]: Specifies the B-Frame as reference mode. Check support for useBFramesAsRef mode using ::NV_ENC_CAPS_SUPPORT_BFRAME_REF_MODE caps.*/
+ NV_ENC_FILM_GRAIN_PARAMS_AV1 *filmGrainParams; /**< [in]: If enableFilmGrainParams == 1, filmGrainParams must point to a valid NV_ENC_FILM_GRAIN_PARAMS_AV1 structure */
+ NV_ENC_NUM_REF_FRAMES numFwdRefs; /**< [in]: Specifies max number of forward reference frame used for prediction of a frame. It must be in range 1-4 (Last, Last2, last3 and Golden). It's a suggestive value not necessarily be honored always. */
+ NV_ENC_NUM_REF_FRAMES numBwdRefs; /**< [in]: Specifies max number of L1 list reference frame used for prediction of a frame. It must be in range 1-3 (Backward, Altref2, Altref). It's a suggestive value not necessarily be honored always. */
+ uint32_t reserved1[235]; /**< [in]: Reserved and must be set to 0.*/
+ void* reserved2[62]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG_AV1;
+
+/**
+ * \struct _NV_ENC_CONFIG_H264_MEONLY
+ * H264 encoder configuration parameters for ME only Mode
+ *
+ */
+typedef struct _NV_ENC_CONFIG_H264_MEONLY
+{
+ uint32_t disablePartition16x16 :1; /**< [in]: Disable Motion Estimation on 16x16 blocks*/
+ uint32_t disablePartition8x16 :1; /**< [in]: Disable Motion Estimation on 8x16 blocks*/
+ uint32_t disablePartition16x8 :1; /**< [in]: Disable Motion Estimation on 16x8 blocks*/
+ uint32_t disablePartition8x8 :1; /**< [in]: Disable Motion Estimation on 8x8 blocks*/
+ uint32_t disableIntraSearch :1; /**< [in]: Disable Intra search during Motion Estimation*/
+ uint32_t bStereoEnable :1; /**< [in]: Enable Stereo Mode for Motion Estimation where each view is independently executed*/
+ uint32_t reserved :26; /**< [in]: Reserved and must be set to 0 */
+ uint32_t reserved1 [255]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG_H264_MEONLY;
+
+
+/**
+ * \struct _NV_ENC_CONFIG_HEVC_MEONLY
+ * HEVC encoder configuration parameters for ME only Mode
+ *
+ */
+typedef struct _NV_ENC_CONFIG_HEVC_MEONLY
+{
+ uint32_t reserved [256]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved1[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG_HEVC_MEONLY;
+
+/**
+ * \struct _NV_ENC_CODEC_CONFIG
+ * Codec-specific encoder configuration parameters to be set during initialization.
+ */
+typedef union _NV_ENC_CODEC_CONFIG
+{
+ NV_ENC_CONFIG_H264 h264Config; /**< [in]: Specifies the H.264-specific encoder configuration. */
+ NV_ENC_CONFIG_HEVC hevcConfig; /**< [in]: Specifies the HEVC-specific encoder configuration. */
+ NV_ENC_CONFIG_AV1 av1Config; /**< [in]: Specifies the AV1-specific encoder configuration. */
+ NV_ENC_CONFIG_H264_MEONLY h264MeOnlyConfig; /**< [in]: Specifies the H.264-specific ME only encoder configuration. */
+ NV_ENC_CONFIG_HEVC_MEONLY hevcMeOnlyConfig; /**< [in]: Specifies the HEVC-specific ME only encoder configuration. */
+ uint32_t reserved[320]; /**< [in]: Reserved and must be set to 0 */
+} NV_ENC_CODEC_CONFIG;
+
+
+/**
+ * \struct _NV_ENC_CONFIG
+ * Encoder configuration parameters to be set during initialization.
+ */
+typedef struct _NV_ENC_CONFIG
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CONFIG_VER. */
+ GUID profileGUID; /**< [in]: Specifies the codec profile GUID. If client specifies \p NV_ENC_CODEC_PROFILE_AUTOSELECT_GUID the NvEncodeAPI interface will select the appropriate codec profile. */
+ uint32_t gopLength; /**< [in]: Specifies the number of pictures in one GOP. Low latency application client can set goplength to NVENC_INFINITE_GOPLENGTH so that keyframes are not inserted automatically. */
+ int32_t frameIntervalP; /**< [in]: Specifies the GOP pattern as follows: \p frameIntervalP = 0: I, 1: IPP, 2: IBP, 3: IBBP If goplength is set to NVENC_INFINITE_GOPLENGTH \p frameIntervalP should be set to 1. */
+ uint32_t monoChromeEncoding; /**< [in]: Set this to 1 to enable monochrome encoding for this session. */
+ NV_ENC_PARAMS_FRAME_FIELD_MODE frameFieldMode; /**< [in]: Specifies the frame/field mode.
+ Check support for field encoding using ::NV_ENC_CAPS_SUPPORT_FIELD_ENCODING caps.
+ Using a frameFieldMode other than NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME for RGB input is not supported. */
+ NV_ENC_MV_PRECISION mvPrecision; /**< [in]: Specifies the desired motion vector prediction precision. */
+ NV_ENC_RC_PARAMS rcParams; /**< [in]: Specifies the rate control parameters for the current encoding session. */
+ NV_ENC_CODEC_CONFIG encodeCodecConfig; /**< [in]: Specifies the codec specific config parameters through this union. */
+ uint32_t reserved [278]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_CONFIG;
+
+/** macro for constructing the version field of ::_NV_ENC_CONFIG */
+#define NV_ENC_CONFIG_VER (NVENCAPI_STRUCT_VERSION(8) | ( 1u<<31 ))
+
+/**
+ * Tuning information of NVENC encoding (TuningInfo is not applicable to H264 and HEVC MEOnly mode).
+ */
+typedef enum NV_ENC_TUNING_INFO
+{
+ NV_ENC_TUNING_INFO_UNDEFINED = 0, /**< Undefined tuningInfo. Invalid value for encoding. */
+ NV_ENC_TUNING_INFO_HIGH_QUALITY = 1, /**< Tune presets for latency tolerant encoding.*/
+ NV_ENC_TUNING_INFO_LOW_LATENCY = 2, /**< Tune presets for low latency streaming.*/
+ NV_ENC_TUNING_INFO_ULTRA_LOW_LATENCY = 3, /**< Tune presets for ultra low latency streaming.*/
+ NV_ENC_TUNING_INFO_LOSSLESS = 4, /**< Tune presets for lossless encoding.*/
+ NV_ENC_TUNING_INFO_COUNT /**< Count number of tuningInfos. Invalid value. */
+}NV_ENC_TUNING_INFO;
+
+/**
+ * \struct _NV_ENC_INITIALIZE_PARAMS
+ * Encode Session Initialization parameters.
+ */
+typedef struct _NV_ENC_INITIALIZE_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */
+ GUID encodeGUID; /**< [in]: Specifies the Encode GUID for which the encoder is being created. ::NvEncInitializeEncoder() API will fail if this is not set, or set to unsupported value. */
+ GUID presetGUID; /**< [in]: Specifies the preset for encoding. If the preset GUID is set then , the preset configuration will be applied before any other parameter. */
+ uint32_t encodeWidth; /**< [in]: Specifies the encode width. If not set ::NvEncInitializeEncoder() API will fail. */
+ uint32_t encodeHeight; /**< [in]: Specifies the encode height. If not set ::NvEncInitializeEncoder() API will fail. */
+ uint32_t darWidth; /**< [in]: Specifies the display aspect ratio width (H264/HEVC) or the render width (AV1). */
+ uint32_t darHeight; /**< [in]: Specifies the display aspect ratio height (H264/HEVC) or the render height (AV1). */
+ uint32_t frameRateNum; /**< [in]: Specifies the numerator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */
+ uint32_t frameRateDen; /**< [in]: Specifies the denominator for frame rate used for encoding in frames per second ( Frame rate = frameRateNum / frameRateDen ). */
+ uint32_t enableEncodeAsync; /**< [in]: Set this to 1 to enable asynchronous mode and is expected to use events to get picture completion notification. */
+ uint32_t enablePTD; /**< [in]: Set this to 1 to enable the Picture Type Decision is be taken by the NvEncodeAPI interface. */
+ uint32_t reportSliceOffsets :1; /**< [in]: Set this to 1 to enable reporting slice offsets in ::_NV_ENC_LOCK_BITSTREAM. NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync must be set to 0 to use this feature. Client must set this to 0 if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs */
+ uint32_t enableSubFrameWrite :1; /**< [in]: Set this to 1 to write out available bitstream to memory at subframe intervals.
+ If enableSubFrameWrite = 1, then the hardware encoder returns data as soon as a slice (H264/HEVC) or tile (AV1) has completed encoding.
+ This results in better encoding latency, but the downside is that the application has to keep polling via a call to nvEncLockBitstream API continuously to see if any encoded slice/tile data is available.
+ Use this mode if you feel that the marginal reduction in latency from sub-frame encoding is worth the increase in complexity due to CPU-based polling. */
+ uint32_t enableExternalMEHints :1; /**< [in]: Set to 1 to enable external ME hints for the current frame. For NV_ENC_INITIALIZE_PARAMS::enablePTD=1 with B frames, programming L1 hints is optional for B frames since Client doesn't know internal GOP structure.
+ NV_ENC_PIC_PARAMS::meHintRefPicDist should preferably be set with enablePTD=1. */
+ uint32_t enableMEOnlyMode :1; /**< [in]: Set to 1 to enable ME Only Mode .*/
+ uint32_t enableWeightedPrediction :1; /**< [in]: Set this to 1 to enable weighted prediction. Not supported if encode session is configured for B-Frames (i.e. NV_ENC_CONFIG::frameIntervalP > 1 or preset >=P3 when tuningInfo = ::NV_ENC_TUNING_INFO_HIGH_QUALITY or
+ tuningInfo = ::NV_ENC_TUNING_INFO_LOSSLESS. This is because preset >=p3 internally enables B frames when tuningInfo = ::NV_ENC_TUNING_INFO_HIGH_QUALITY or ::NV_ENC_TUNING_INFO_LOSSLESS). */
+ uint32_t enableOutputInVidmem :1; /**< [in]: Set this to 1 to enable output of NVENC in video memory buffer created by application. This feature is not supported for HEVC ME only mode. */
+ uint32_t reservedBitFields :26; /**< [in]: Reserved bitfields and must be set to 0 */
+ uint32_t privDataSize; /**< [in]: Reserved private data buffer size and must be set to 0 */
+ void* privData; /**< [in]: Reserved private data buffer and must be set to NULL */
+ NV_ENC_CONFIG* encodeConfig; /**< [in]: Specifies the advanced codec specific structure. If client has sent a valid codec config structure, it will override parameters set by the NV_ENC_INITIALIZE_PARAMS::presetGUID parameter. If set to NULL the NvEncodeAPI interface will use the NV_ENC_INITIALIZE_PARAMS::presetGUID to set the codec specific parameters.
+ Client can also optionally query the NvEncodeAPI interface to get codec specific parameters for a presetGUID using ::NvEncGetEncodePresetConfig() API. It can then modify (if required) some of the codec config parameters and send down a custom config structure as part of ::_NV_ENC_INITIALIZE_PARAMS.
+ Even in this case client is recommended to pass the same preset guid it has used in ::NvEncGetEncodePresetConfig() API to query the config structure; as NV_ENC_INITIALIZE_PARAMS::presetGUID. This will not override the custom config structure but will be used to determine other Encoder HW specific parameters not exposed in the API. */
+ uint32_t maxEncodeWidth; /**< [in]: Maximum encode width to be used for current Encode session.
+ Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encoder will not allow dynamic resolution change. */
+ uint32_t maxEncodeHeight; /**< [in]: Maximum encode height to be allowed for current Encode session.
+ Client should allocate output buffers according to this dimension for dynamic resolution change. If set to 0, Encode will not allow dynamic resolution change. */
+ NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE maxMEHintCountsPerBlock[2]; /**< [in]: If Client wants to pass external motion vectors in NV_ENC_PIC_PARAMS::meExternalHints buffer it must specify the maximum number of hint candidates per block per direction for the encode session.
+ The NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[0] is for L0 predictors and NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[1] is for L1 predictors.
+ This client must also set NV_ENC_INITIALIZE_PARAMS::enableExternalMEHints to 1. */
+ NV_ENC_TUNING_INFO tuningInfo; /**< [in]: Tuning Info of NVENC encoding(TuningInfo is not applicable to H264 and HEVC meonly mode). */
+ NV_ENC_BUFFER_FORMAT bufferFormat; /**< [in]: Input buffer format. Used only when DX12 interface type is used */
+ uint32_t reserved [287]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_INITIALIZE_PARAMS;
+
+/** macro for constructing the version field of ::_NV_ENC_INITIALIZE_PARAMS */
+#define NV_ENC_INITIALIZE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(5) | ( 1u<<31 ))
+
+
+/**
+ * \struct _NV_ENC_RECONFIGURE_PARAMS
+ * Encode Session Reconfigured parameters.
+ */
+typedef struct _NV_ENC_RECONFIGURE_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_RECONFIGURE_PARAMS_VER. */
+ NV_ENC_INITIALIZE_PARAMS reInitEncodeParams; /**< [in]: Encoder session re-initialization parameters.
+ If reInitEncodeParams.encodeConfig is NULL and
+ reInitEncodeParams.presetGUID is the same as the preset
+ GUID specified on the call to NvEncInitializeEncoder(),
+ EncodeAPI will continue to use the existing encode
+ configuration.
+ If reInitEncodeParams.encodeConfig is NULL and
+ reInitEncodeParams.presetGUID is different from the preset
+ GUID specified on the call to NvEncInitializeEncoder(),
+ EncodeAPI will try to use the default configuration for
+ the preset specified by reInitEncodeParams.presetGUID.
+ In this case, reconfiguration may fail if the new
+ configuration is incompatible with the existing
+ configuration (e.g. the new configuration results in
+ a change in the GOP structure). */
+ uint32_t resetEncoder :1; /**< [in]: This resets the rate control states and other internal encoder states. This should be used only with an IDR frame.
+ If NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1, encoder will force the frame type to IDR */
+ uint32_t forceIDR :1; /**< [in]: Encode the current picture as an IDR picture. This flag is only valid when Picture type decision is taken by the Encoder
+ [_NV_ENC_INITIALIZE_PARAMS::enablePTD == 1]. */
+ uint32_t reserved :30;
+
+}NV_ENC_RECONFIGURE_PARAMS;
+
+/** macro for constructing the version field of ::_NV_ENC_RECONFIGURE_PARAMS */
+#define NV_ENC_RECONFIGURE_PARAMS_VER (NVENCAPI_STRUCT_VERSION(1) | ( 1u<<31 ))
+
+/**
+ * \struct _NV_ENC_PRESET_CONFIG
+ * Encoder preset config
+ */
+typedef struct _NV_ENC_PRESET_CONFIG
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PRESET_CONFIG_VER. */
+ NV_ENC_CONFIG presetCfg; /**< [out]: preset config returned by the Nvidia Video Encoder interface. */
+ uint32_t reserved1[255]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+}NV_ENC_PRESET_CONFIG;
+
+/** macro for constructing the version field of ::_NV_ENC_PRESET_CONFIG */
+#define NV_ENC_PRESET_CONFIG_VER (NVENCAPI_STRUCT_VERSION(4) | ( 1u<<31 ))
+
+
+/**
+ * \struct _NV_ENC_PIC_PARAMS_MVC
+ * MVC-specific parameters to be sent on a per-frame basis.
+ */
+typedef struct _NV_ENC_PIC_PARAMS_MVC
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PIC_PARAMS_MVC_VER. */
+ uint32_t viewID; /**< [in]: Specifies the view ID associated with the current input view. */
+ uint32_t temporalID; /**< [in]: Specifies the temporal ID associated with the current input view. */
+ uint32_t priorityID; /**< [in]: Specifies the priority ID associated with the current input view. Reserved and ignored by the NvEncodeAPI interface. */
+ uint32_t reserved1[12]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[8]; /**< [in]: Reserved and must be set to NULL. */
+}NV_ENC_PIC_PARAMS_MVC;
+
+/** macro for constructing the version field of ::_NV_ENC_PIC_PARAMS_MVC */
+#define NV_ENC_PIC_PARAMS_MVC_VER NVENCAPI_STRUCT_VERSION(1)
+
+
+/**
+ * \union _NV_ENC_PIC_PARAMS_H264_EXT
+ * H264 extension picture parameters
+ */
+typedef union _NV_ENC_PIC_PARAMS_H264_EXT
+{
+ NV_ENC_PIC_PARAMS_MVC mvcPicParams; /**< [in]: Specifies the MVC picture parameters. */
+ uint32_t reserved1[32]; /**< [in]: Reserved and must be set to 0. */
+}NV_ENC_PIC_PARAMS_H264_EXT;
+
+/**
+ * \struct _NV_ENC_SEI_PAYLOAD
+ * User SEI message
+ */
+typedef struct _NV_ENC_SEI_PAYLOAD
+{
+ uint32_t payloadSize; /**< [in] SEI payload size in bytes. SEI payload must be byte aligned, as described in Annex D */
+ uint32_t payloadType; /**< [in] SEI payload types and syntax can be found in Annex D of the H.264 Specification. */
+ uint8_t *payload; /**< [in] pointer to user data */
+} NV_ENC_SEI_PAYLOAD;
+
+#define NV_ENC_H264_SEI_PAYLOAD NV_ENC_SEI_PAYLOAD
+
+/**
+ * \struct _NV_ENC_PIC_PARAMS_H264
+ * H264 specific enc pic params. sent on a per frame basis.
+ */
+typedef struct _NV_ENC_PIC_PARAMS_H264
+{
+ uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */
+ uint32_t reserved3; /**< [in]: Reserved and must be set to 0 */
+ uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t colourPlaneId; /**< [in]: Specifies the colour plane ID associated with the current input. */
+ uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt.
+ When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message
+ forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */
+ uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame.
+ NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */
+ uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter
+ When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */
+ uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */
+ uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */
+ uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */
+ uint8_t* sliceTypeData; /**< [in]: Deprecated. */
+ uint32_t sliceTypeArrayCnt; /**< [in]: Deprecated. */
+ uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */
+ NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */
+ uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices
+ sliceMode = 0 MB based slices, sliceMode = 1 Byte based slices, sliceMode = 2 MB row based slices, sliceMode = 3, numSlices in Picture
+ When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting
+ When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */
+ uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For:
+ sliceMode = 0, sliceModeData specifies # of MBs in each slice (except last slice)
+ sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice)
+ sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice)
+ sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
+ uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term referenceframe index to use for marking this frame as LTR.*/
+ uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices to use when encoding this frame. */
+ uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */
+ uint32_t forceIntraSliceCount; /**< [in]: Specifies the number of slices to be forced to Intra in the current picture.
+ This option along with forceIntraSliceIdx[] array needs to be used with sliceMode = 3 only */
+ uint32_t *forceIntraSliceIdx; /**< [in]: Slice indices to be forced to intra in the current picture. Each slice index should be <= num_slices_in_picture -1. Index starts from 0 for first slice.
+ The number of entries in this array should be equal to forceIntraSliceCount */
+ NV_ENC_PIC_PARAMS_H264_EXT h264ExtPicParams; /**< [in]: Specifies the H264 extension config parameters using this config. */
+ NV_ENC_TIME_CODE timeCode; /**< [in]: Specifies the clock timestamp sets used in picture timing SEI. Applicable only when NV_ENC_CONFIG_H264::enableTimeCode is set to 1. */
+ uint32_t reserved [203]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[61]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_PIC_PARAMS_H264;
+
+/**
+ * \struct _NV_ENC_PIC_PARAMS_HEVC
+ * HEVC specific enc pic params. sent on a per frame basis.
+ */
+typedef struct _NV_ENC_PIC_PARAMS_HEVC
+{
+ uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */
+ uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t temporalId; /**< [in]: Specifies the temporal id of the picture */
+ uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt.
+ When outputRecoveryPointSEI is set this is value is used for recovery_frame_cnt in recovery point SEI message
+ forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */
+ uint32_t constrainedFrame :1; /**< [in]: Set to 1 if client wants to encode this frame with each slice completely independent of other slices in the frame.
+ NV_ENC_INITIALIZE_PARAMS::enableConstrainedEncoding should be set to 1 */
+ uint32_t sliceModeDataUpdate :1; /**< [in]: Set to 1 if client wants to change the sliceModeData field to specify new sliceSize Parameter
+ When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting */
+ uint32_t ltrMarkFrame :1; /**< [in]: Set to 1 if client wants to mark this frame as LTR */
+ uint32_t ltrUseFrames :1; /**< [in]: Set to 1 if client allows encoding this frame using the LTR frames specified in ltrFrameBitmap */
+ uint32_t reservedBitFields :28; /**< [in]: Reserved bit fields and must be set to 0 */
+ uint8_t* sliceTypeData; /**< [in]: Array which specifies the slice type used to force intra slice for a particular slice. Currently supported only for NV_ENC_CONFIG_H264::sliceMode == 3.
+ Client should allocate array of size sliceModeData where sliceModeData is specified in field of ::_NV_ENC_CONFIG_H264
+ Array element with index n corresponds to nth slice. To force a particular slice to intra client should set corresponding array element to NV_ENC_SLICE_TYPE_I
+ all other array elements should be set to NV_ENC_SLICE_TYPE_DEFAULT */
+ uint32_t sliceTypeArrayCnt; /**< [in]: Client should set this to the number of elements allocated in sliceTypeData array. If sliceTypeData is NULL then this should be set to 0 */
+ uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices
+ sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture
+ When forceIntraRefreshWithFrameCnt is set it will have priority over sliceMode setting
+ When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */
+ uint32_t sliceModeData; /**< [in]: Specifies the parameter needed for sliceMode. For:
+ sliceMode = 0, sliceModeData specifies # of CTUs in each slice (except last slice)
+ sliceMode = 1, sliceModeData specifies maximum # of bytes in each slice (except last slice)
+ sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice)
+ sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
+ uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term reference frame index to use for marking this frame as LTR.*/
+ uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices to use when encoding this frame. */
+ uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */
+ uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
+ NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */
+ NV_ENC_TIME_CODE timeCode; /**< [in]: Specifies the clock timestamp sets used in time code SEI. Applicable only when NV_ENC_CONFIG_HEVC::enableTimeCodeSEI is set to 1. */
+ uint32_t reserved2 [237]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved3[61]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_PIC_PARAMS_HEVC;
+
+#define NV_ENC_AV1_OBU_PAYLOAD NV_ENC_SEI_PAYLOAD
+
+/**
+* \struct _NV_ENC_PIC_PARAMS_AV1
+* AV1 specific enc pic params. sent on a per frame basis.
+*/
+typedef struct _NV_ENC_PIC_PARAMS_AV1
+{
+ uint32_t displayPOCSyntax; /**< [in]: Specifies the display POC syntax This is required to be set if client is handling the picture type decision. */
+ uint32_t refPicFlag; /**< [in]: Set to 1 for a reference picture. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t temporalId; /**< [in]: Specifies the temporal id of the picture */
+ uint32_t forceIntraRefreshWithFrameCnt; /**< [in]: Forces an intra refresh with duration equal to intraRefreshFrameCnt.
+ forceIntraRefreshWithFrameCnt cannot be used if B frames are used in the GOP structure specified */
+ uint32_t goldenFrameFlag : 1; /**< [in]: Encode frame as Golden Frame. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t arfFrameFlag : 1; /**< [in]: Encode frame as Alternate Reference Frame. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t arf2FrameFlag : 1; /**< [in]: Encode frame as Alternate Reference 2 Frame. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t bwdFrameFlag : 1; /**< [in]: Encode frame as Backward Reference Frame. This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t overlayFrameFlag : 1; /**< [in]: Encode frame as overlay frame. A previously encoded frame with the same displayPOCSyntax value should be present in reference frame buffer.
+ This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t showExistingFrameFlag : 1; /**< [in]: When ovelayFrameFlag is set to 1, this flag controls the value of the show_existing_frame syntax element associated with the overlay frame.
+ This flag is added to the interface as a placeholder. Its value is ignored for now and always assumed to be set to 1.
+ This is ignored if NV_ENC_INITIALIZE_PARAMS::enablePTD is set to 1. */
+ uint32_t errorResilientModeFlag : 1; /**< [in]: encode frame independently from previously encoded frames */
+
+ uint32_t tileConfigUpdate : 1; /**< [in]: Set to 1 if client wants to overwrite the default tile configuration with the tile parameters specified below
+ When forceIntraRefreshWithFrameCnt is set it will have priority over tileConfigUpdate setting */
+ uint32_t enableCustomTileConfig : 1; /**< [in]: Set 1 to enable custom tile configuration: numTileColumns and numTileRows must have non zero values and tileWidths and tileHeights must point to a valid address */
+ uint32_t filmGrainParamsUpdate : 1; /**< [in]: Set to 1 if client wants to update previous film grain parameters: filmGrainParams must point to a valid address and encoder must have been configured with film grain enabled */
+ uint32_t reservedBitFields : 22; /**< [in]: Reserved bitfields and must be set to 0 */
+ uint32_t numTileColumns; /**< [in]: This parameter in conjunction with the flag enableCustomTileConfig and the array tileWidths[] specifies the way in which the picture is divided into tile columns.
+ When enableCustomTileConfig == 0, the picture will be uniformly divided into numTileColumns tile columns. If numTileColumns is not a power of 2,
+ it will be rounded down to the next power of 2 value. If numTileColumns == 0, the picture will be coded with the smallest number of vertical tiles as allowed by standard.
+ When enableCustomTileConfig == 1, numTileColumns must be > 0 and <= NV_MAX_TILE_COLS_AV1 and tileWidths must point to a valid array of numTileColumns entries.
+ Entry i specifies the width in 64x64 CTU unit of tile colum i. The sum of all the entries should be equal to the picture width in 64x64 CTU units. */
+ uint32_t numTileRows; /**< [in]: This parameter in conjunction with the flag enableCustomTileConfig and the array tileHeights[] specifies the way in which the picture is divided into tiles rows
+ When enableCustomTileConfig == 0, the picture will be uniformly divided into numTileRows tile rows. If numTileRows is not a power of 2,
+ it will be rounded down to the next power of 2 value. If numTileRows == 0, the picture will be coded with the smallest number of horizontal tiles as allowed by standard.
+ When enableCustomTileConfig == 1, numTileRows must be > 0 and <= NV_MAX_TILE_ROWS_AV1 and tileHeights must point to a valid array of numTileRows entries.
+ Entry i specifies the height in 64x64 CTU unit of tile row i. The sum of all the entries should be equal to the picture hieght in 64x64 CTU units. */
+ uint32_t *tileWidths; /**< [in]: If enableCustomTileConfig == 1, tileWidths[i] specifies the width of tile column i in 64x64 CTU unit, with 0 <= i <= numTileColumns -1. */
+ uint32_t *tileHeights; /**< [in]: If enableCustomTileConfig == 1, tileHeights[i] specifies the height of tile row i in 64x64 CTU unit, with 0 <= i <= numTileRows -1. */
+ uint32_t obuPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in obuPayloadArray array. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
+ NV_ENC_AV1_OBU_PAYLOAD* obuPayloadArray; /**< [in]: Array of OBU payloads which will be inserted for this frame. */
+ NV_ENC_FILM_GRAIN_PARAMS_AV1 *filmGrainParams; /**< [in]: If filmGrainParamsUpdate == 1, filmGrainParams must point to a valid NV_ENC_FILM_GRAIN_PARAMS_AV1 structure */
+ uint32_t reserved2[247]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved3[61]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_PIC_PARAMS_AV1;
+
+/**
+ * Codec specific per-picture encoding parameters.
+ */
+typedef union _NV_ENC_CODEC_PIC_PARAMS
+{
+ NV_ENC_PIC_PARAMS_H264 h264PicParams; /**< [in]: H264 encode picture params. */
+ NV_ENC_PIC_PARAMS_HEVC hevcPicParams; /**< [in]: HEVC encode picture params. */
+ NV_ENC_PIC_PARAMS_AV1 av1PicParams; /**< [in]: AV1 encode picture params. */
+ uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0. */
+} NV_ENC_CODEC_PIC_PARAMS;
+
+
+/**
+ * \struct _NV_ENC_PIC_PARAMS
+ * Encoding parameters that need to be sent on a per frame basis.
+ */
+typedef struct _NV_ENC_PIC_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_PIC_PARAMS_VER. */
+ uint32_t inputWidth; /**< [in]: Specifies the input frame width */
+ uint32_t inputHeight; /**< [in]: Specifies the input frame height */
+ uint32_t inputPitch; /**< [in]: Specifies the input buffer pitch. If pitch value is not known, set this to inputWidth. */
+ uint32_t encodePicFlags; /**< [in]: Specifies bit-wise OR of encode picture flags. See ::NV_ENC_PIC_FLAGS enum. */
+ uint32_t frameIdx; /**< [in]: Specifies the frame index associated with the input frame [optional]. */
+ uint64_t inputTimeStamp; /**< [in]: Specifies opaque data which is associated with the encoded frame, but not actually encoded in the output bitstream.
+ This opaque data can be used later to uniquely refer to the corresponding encoded frame. For example, it can be used
+ for identifying the frame to be invalidated in the reference picture buffer, if lost at the client. */
+ uint64_t inputDuration; /**< [in]: Specifies duration of the input picture */
+ NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource() APIs.*/
+ NV_ENC_OUTPUT_PTR outputBitstream; /**< [in]: Specifies the output buffer pointer.
+ If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 0, specifies the pointer to output buffer. Client should use a pointer obtained from ::NvEncCreateBitstreamBuffer() API.
+ If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 1, client should allocate buffer in video memory for NV_ENC_ENCODE_OUT_PARAMS struct and encoded bitstream data. Client
+ should use a pointer obtained from ::NvEncMapInputResource() API, when mapping this output buffer and assign it to NV_ENC_PIC_PARAMS::outputBitstream.
+ First 256 bytes of this buffer should be interpreted as NV_ENC_ENCODE_OUT_PARAMS struct followed by encoded bitstream data. Recommended size for output buffer is sum of size of
+ NV_ENC_ENCODE_OUT_PARAMS struct and twice the input frame size for lower resolution eg. CIF and 1.5 times the input frame size for higher resolutions. If encoded bitstream size is
+ greater than the allocated buffer size for encoded bitstream, then the output buffer will have encoded bitstream data equal to buffer size. All CUDA operations on this buffer must use
+ the default stream. */
+ void* completionEvent; /**< [in]: Specifies an event to be signaled on completion of encoding of this Frame [only if operating in Asynchronous mode]. Each output buffer should be associated with a distinct event pointer. */
+ NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */
+ NV_ENC_PIC_STRUCT pictureStruct; /**< [in]: Specifies structure of the input picture. */
+ NV_ENC_PIC_TYPE pictureType; /**< [in]: Specifies input picture type. Client required to be set explicitly by the client if the client has not set NV_ENC_INITALIZE_PARAMS::enablePTD to 1 while calling NvInitializeEncoder. */
+ NV_ENC_CODEC_PIC_PARAMS codecPicParams; /**< [in]: Specifies the codec specific per-picture encoding parameters. */
+ NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE meHintCountsPerBlock[2]; /**< [in]: For H264 and Hevc, specifies the number of hint candidates per block per direction for the current frame. meHintCountsPerBlock[0] is for L0 predictors and meHintCountsPerBlock[1] is for L1 predictors.
+ The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder initialization. */
+ NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: For H264 and Hevc, Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock.
+ The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8
+ + 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */
+ uint32_t reserved1[6]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[2]; /**< [in]: Reserved and must be set to NULL */
+ int8_t *qpDeltaMap; /**< [in]: Specifies the pointer to signed byte array containing value per MB for H264, per CTB for HEVC and per SB for AV1 in raster scan order for the current picture, which will be interpreted depending on NV_ENC_RC_PARAMS::qpMapMode.
+ If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_DELTA, qpDeltaMap specifies QP modifier per MB for H264, per CTB for HEVC and per SB for AV1. This QP modifier will be applied on top of the QP chosen by rate control.
+ If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_EMPHASIS, qpDeltaMap specifies Emphasis Level Map per MB for H264. This level value along with QP chosen by rate control is used to
+ compute the QP modifier, which in turn is applied on top of QP chosen by rate control.
+ If NV_ENC_RC_PARAMS::qpMapMode is NV_ENC_QP_MAP_DISABLED, value in qpDeltaMap will be ignored.*/
+ uint32_t qpDeltaMapSize; /**< [in]: Specifies the size in bytes of qpDeltaMap surface allocated by client and pointed to by NV_ENC_PIC_PARAMS::qpDeltaMap. Surface (array) should be picWidthInMbs * picHeightInMbs for H264, picWidthInCtbs * picHeightInCtbs for HEVC and
+ picWidthInSbs * picHeightInSbs for AV1 */
+ uint32_t reservedBitFields; /**< [in]: Reserved bitfields and must be set to 0 */
+ uint16_t meHintRefPicDist[2]; /**< [in]: Specifies temporal distance for reference picture (NVENC_EXTERNAL_ME_HINT::refidx = 0) used during external ME with NV_ENC_INITALIZE_PARAMS::enablePTD = 1 . meHintRefPicDist[0] is for L0 hints and meHintRefPicDist[1] is for L1 hints.
+ If not set, will internally infer distance of 1. Ignored for NV_ENC_INITALIZE_PARAMS::enablePTD = 0 */
+ NV_ENC_INPUT_PTR alphaBuffer; /**< [in]: Specifies the input alpha buffer pointer. Client must use a pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource() APIs.
+ Applicable only when encoding hevc with alpha layer is enabled. */
+ NVENC_EXTERNAL_ME_SB_HINT *meExternalSbHints; /**< [in]: For AV1,Specifies the pointer to ME external SB hints for the current frame. The size of ME hint buffer should be equal to meSbHintsCount. */
+ uint32_t meSbHintsCount; /**< [in]: For AV1, specifies the total number of external ME SB hint candidates for the frame
+ NV_ENC_PIC_PARAMS::meSbHintsCount must never exceed the total number of SBs in frame * the max number of candidates per SB provided during encoder initialization.
+ The max number of candidates per SB is maxMeHintCountsPerBlock[0].numCandsPerSb + maxMeHintCountsPerBlock[1].numCandsPerSb */
+ uint32_t reserved3[285]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved4[58]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_PIC_PARAMS;
+
+/** Macro for constructing the version field of ::_NV_ENC_PIC_PARAMS */
+#define NV_ENC_PIC_PARAMS_VER (NVENCAPI_STRUCT_VERSION(6) | ( 1u<<31 ))
+
+
+/**
+ * \struct _NV_ENC_MEONLY_PARAMS
+ * MEOnly parameters that need to be sent on a per motion estimation basis.
+ * NV_ENC_MEONLY_PARAMS::meExternalHints is supported for H264 only.
+ */
+typedef struct _NV_ENC_MEONLY_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to NV_ENC_MEONLY_PARAMS_VER.*/
+ uint32_t inputWidth; /**< [in]: Specifies the input frame width */
+ uint32_t inputHeight; /**< [in]: Specifies the input frame height */
+ NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Specifies the input buffer pointer. Client must use a pointer obtained from NvEncCreateInputBuffer() or NvEncMapInputResource() APIs. */
+ NV_ENC_INPUT_PTR referenceFrame; /**< [in]: Specifies the reference frame pointer */
+ NV_ENC_OUTPUT_PTR mvBuffer; /**< [in]: Specifies the output buffer pointer.
+ If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 0, specifies the pointer to motion vector data buffer allocated by NvEncCreateMVBuffer.
+ Client must lock mvBuffer using ::NvEncLockBitstream() API to get the motion vector data.
+ If NV_ENC_INITIALIZE_PARAMS::enableOutputInVidmem is set to 1, client should allocate buffer in video memory for storing the motion vector data. The size of this buffer must
+ be equal to total number of macroblocks multiplied by size of NV_ENC_H264_MV_DATA struct. Client should use a pointer obtained from ::NvEncMapInputResource() API, when mapping this
+ output buffer and assign it to NV_ENC_MEONLY_PARAMS::mvBuffer. All CUDA operations on this buffer must use the default stream. */
+ NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Specifies the input buffer format. */
+ void* completionEvent; /**< [in]: Specifies an event to be signaled on completion of motion estimation
+ of this Frame [only if operating in Asynchronous mode].
+ Each output buffer should be associated with a distinct event pointer. */
+ uint32_t viewID; /**< [in]: Specifies left or right viewID if NV_ENC_CONFIG_H264_MEONLY::bStereoEnable is set.
+ viewID can be 0,1 if bStereoEnable is set, 0 otherwise. */
+ NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
+ meHintCountsPerBlock[2]; /**< [in]: Specifies the number of hint candidates per block for the current frame. meHintCountsPerBlock[0] is for L0 predictors.
+ The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder initialization. */
+ NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock.
+ The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8
+ + 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */
+ uint32_t reserved1[243]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[59]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_MEONLY_PARAMS;
+
+/** NV_ENC_MEONLY_PARAMS struct version*/
+#define NV_ENC_MEONLY_PARAMS_VER NVENCAPI_STRUCT_VERSION(3)
+
+
+/**
+ * \struct _NV_ENC_LOCK_BITSTREAM
+ * Bitstream buffer lock parameters.
+ */
+typedef struct _NV_ENC_LOCK_BITSTREAM
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_BITSTREAM_VER. */
+ uint32_t doNotWait :1; /**< [in]: If this flag is set, the NvEncodeAPI interface will return buffer pointer even if operation is not completed. If not set, the call will block until operation completes. */
+ uint32_t ltrFrame :1; /**< [out]: Flag indicating this frame is marked as LTR frame */
+ uint32_t getRCStats :1; /**< [in]: If this flag is set then lockBitstream call will add additional intra-inter MB count and average MVX, MVY */
+ uint32_t reservedBitFields :29; /**< [in]: Reserved bit fields and must be set to 0 */
+ void* outputBitstream; /**< [in]: Pointer to the bitstream buffer being locked. */
+ uint32_t* sliceOffsets; /**< [in, out]: Array which receives the slice (H264/HEVC) or tile (AV1) offsets. This is not supported if NV_ENC_CONFIG_H264::sliceMode is 1 on Kepler GPUs. Array size must be equal to size of frame in MBs. */
+ uint32_t frameIdx; /**< [out]: Frame no. for which the bitstream is being retrieved. */
+ uint32_t hwEncodeStatus; /**< [out]: The NvEncodeAPI interface status for the locked picture. */
+ uint32_t numSlices; /**< [out]: Number of slices (H264/HEVC) or tiles (AV1) in the encoded picture. Will be reported only if NV_ENC_INITIALIZE_PARAMS::reportSliceOffsets set to 1. */
+ uint32_t bitstreamSizeInBytes; /**< [out]: Actual number of bytes generated and copied to the memory pointed by bitstreamBufferPtr.
+ When HEVC alpha layer encoding is enabled, this field reports the total encoded size in bytes i.e it is the encoded size of the base plus the alpha layer.
+ For AV1 when enablePTD is set, this field reports the total encoded size in bytes of all the encoded frames packed into the current output surface i.e. show frame plus all preceding no-show frames */
+ uint64_t outputTimeStamp; /**< [out]: Presentation timestamp associated with the encoded output. */
+ uint64_t outputDuration; /**< [out]: Presentation duration associates with the encoded output. */
+ void* bitstreamBufferPtr; /**< [out]: Pointer to the generated output bitstream.
+ For MEOnly mode _NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr should be typecast to
+ NV_ENC_H264_MV_DATA/NV_ENC_HEVC_MV_DATA pointer respectively for H264/HEVC */
+ NV_ENC_PIC_TYPE pictureType; /**< [out]: Picture type of the encoded picture. */
+ NV_ENC_PIC_STRUCT pictureStruct; /**< [out]: Structure of the generated output picture. */
+ uint32_t frameAvgQP; /**< [out]: Average QP of the frame. */
+ uint32_t frameSatd; /**< [out]: Total SATD cost for whole frame. */
+ uint32_t ltrFrameIdx; /**< [out]: Frame index associated with this LTR frame. */
+ uint32_t ltrFrameBitmap; /**< [out]: Bitmap of LTR frames indices which were used for encoding this frame. Value of 0 if no LTR frames were used. */
+ uint32_t temporalId; /**< [out]: TemporalId value of the frame when using temporalSVC encoding */
+ uint32_t reserved[12]; /**< [in]: Reserved and must be set to 0 */
+ uint32_t intraMBCount; /**< [out]: For H264, Number of Intra MBs in the encoded frame. For HEVC, Number of Intra CTBs in the encoded frame. For AV1, Number of Intra SBs in the encoded show frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */
+ uint32_t interMBCount; /**< [out]: For H264, Number of Inter MBs in the encoded frame, includes skip MBs. For HEVC, Number of Inter CTBs in the encoded frame. For AV1, Number of Inter SBs in the encoded show frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */
+ int32_t averageMVX; /**< [out]: Average Motion Vector in X direction for the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */
+ int32_t averageMVY; /**< [out]: Average Motion Vector in y direction for the encoded frame. Supported only if _NV_ENC_LOCK_BITSTREAM::getRCStats set to 1. */
+ uint32_t alphaLayerSizeInBytes; /**< [out]: Number of bytes generated for the alpha layer in the encoded output. Applicable only when HEVC with alpha encoding is enabled. */
+
+ uint32_t reserved1[218]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_LOCK_BITSTREAM;
+
+/** Macro for constructing the version field of ::_NV_ENC_LOCK_BITSTREAM */
+#define NV_ENC_LOCK_BITSTREAM_VER NVENCAPI_STRUCT_VERSION(2)
+
+
+/**
+ * \struct _NV_ENC_LOCK_INPUT_BUFFER
+ * Uncompressed Input Buffer lock parameters.
+ */
+typedef struct _NV_ENC_LOCK_INPUT_BUFFER
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_LOCK_INPUT_BUFFER_VER. */
+ uint32_t doNotWait :1; /**< [in]: Set to 1 to make ::NvEncLockInputBuffer() a unblocking call. If the encoding is not completed, driver will return ::NV_ENC_ERR_ENCODER_BUSY error code. */
+ uint32_t reservedBitFields :31; /**< [in]: Reserved bitfields and must be set to 0 */
+ NV_ENC_INPUT_PTR inputBuffer; /**< [in]: Pointer to the input buffer to be locked, client should pass the pointer obtained from ::NvEncCreateInputBuffer() or ::NvEncMapInputResource API. */
+ void* bufferDataPtr; /**< [out]: Pointed to the locked input buffer data. Client can only access input buffer using the \p bufferDataPtr. */
+ uint32_t pitch; /**< [out]: Pitch of the locked input buffer. */
+ uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_LOCK_INPUT_BUFFER;
+
+/** Macro for constructing the version field of ::_NV_ENC_LOCK_INPUT_BUFFER */
+#define NV_ENC_LOCK_INPUT_BUFFER_VER NVENCAPI_STRUCT_VERSION(1)
+
+
+/**
+ * \struct _NV_ENC_MAP_INPUT_RESOURCE
+ * Map an input resource to a Nvidia Encoder Input Buffer
+ */
+typedef struct _NV_ENC_MAP_INPUT_RESOURCE
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_MAP_INPUT_RESOURCE_VER. */
+ uint32_t subResourceIndex; /**< [in]: Deprecated. Do not use. */
+ void* inputResource; /**< [in]: Deprecated. Do not use. */
+ NV_ENC_REGISTERED_PTR registeredResource; /**< [in]: The Registered resource handle obtained by calling NvEncRegisterInputResource. */
+ NV_ENC_INPUT_PTR mappedResource; /**< [out]: Mapped pointer corresponding to the registeredResource. This pointer must be used in NV_ENC_PIC_PARAMS::inputBuffer parameter in ::NvEncEncodePicture() API. */
+ NV_ENC_BUFFER_FORMAT mappedBufferFmt; /**< [out]: Buffer format of the outputResource. This buffer format must be used in NV_ENC_PIC_PARAMS::bufferFmt if client using the above mapped resource pointer. */
+ uint32_t reserved1[251]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[63]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_MAP_INPUT_RESOURCE;
+
+/** Macro for constructing the version field of ::_NV_ENC_MAP_INPUT_RESOURCE */
+#define NV_ENC_MAP_INPUT_RESOURCE_VER NVENCAPI_STRUCT_VERSION(4)
+
+/**
+ * \struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX
+ * NV_ENC_REGISTER_RESOURCE::resourceToRegister must be a pointer to a variable of this type,
+ * when NV_ENC_REGISTER_RESOURCE::resourceType is NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX
+ */
+typedef struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX
+{
+ uint32_t texture; /**< [in]: The name of the texture to be used. */
+ uint32_t target; /**< [in]: Accepted values are GL_TEXTURE_RECTANGLE and GL_TEXTURE_2D. */
+} NV_ENC_INPUT_RESOURCE_OPENGL_TEX;
+
+/** \struct NV_ENC_FENCE_POINT_D3D12
+* Fence and fence value for synchronization.
+*/
+typedef struct _NV_ENC_FENCE_POINT_D3D12
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_FENCE_POINT_D3D12_VER. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
+ void* pFence; /**< [in]: Pointer to ID3D12Fence. This fence object is used for synchronization. */
+ uint64_t waitValue; /**< [in]: Fence value to reach or exceed before the GPU operation. */
+ uint64_t signalValue; /**< [in]: Fence value to set the fence to, after the GPU operation. */
+ uint32_t bWait:1; /**< [in]: Wait on 'waitValue' if bWait is set to 1, before starting GPU operation. */
+ uint32_t bSignal:1; /**< [in]: Signal on 'signalValue' if bSignal is set to 1, after GPU operation is complete. */
+ uint32_t reservedBitField:30; /**< [in]: Reserved and must be set to 0. */
+ uint32_t reserved1[7]; /**< [in]: Reserved and must be set to 0. */
+} NV_ENC_FENCE_POINT_D3D12;
+
+#define NV_ENC_FENCE_POINT_D3D12_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * \struct _NV_ENC_INPUT_RESOURCE_D3D12
+ * NV_ENC_PIC_PARAMS::inputBuffer and NV_ENC_PIC_PARAMS::alphaBuffer must be a pointer to a struct of this type,
+ * when D3D12 interface is used
+ */
+typedef struct _NV_ENC_INPUT_RESOURCE_D3D12
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INPUT_RESOURCE_D3D12_VER. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
+ NV_ENC_INPUT_PTR pInputBuffer; /**< [in]: Specifies the input surface pointer. Client must use a pointer obtained from NvEncMapInputResource() in NV_ENC_MAP_INPUT_RESOURCE::mappedResource
+ when mapping the input surface. */
+ NV_ENC_FENCE_POINT_D3D12 inputFencePoint; /**< [in]: Specifies the fence and corresponding fence values to do GPU wait and signal. */
+ uint32_t reserved1[16]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[16]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_INPUT_RESOURCE_D3D12;
+
+#define NV_ENC_INPUT_RESOURCE_D3D12_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * \struct _NV_ENC_OUTPUT_RESOURCE_D3D12
+ * NV_ENC_PIC_PARAMS::outputBitstream and NV_ENC_LOCK_BITSTREAM::outputBitstream must be a pointer to a struct of this type,
+ * when D3D12 interface is used
+ */
+typedef struct _NV_ENC_OUTPUT_RESOURCE_D3D12
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_OUTPUT_RESOURCE_D3D12_VER. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
+ NV_ENC_INPUT_PTR pOutputBuffer; /**< [in]: Specifies the output buffer pointer. Client must use a pointer obtained from NvEncMapInputResource() in NV_ENC_MAP_INPUT_RESOURCE::mappedResource
+ when mapping output bitstream buffer */
+ NV_ENC_FENCE_POINT_D3D12 outputFencePoint; /**< [in]: Specifies the fence and corresponding fence values to do GPU wait and signal.*/
+ uint32_t reserved1[16]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[16]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_OUTPUT_RESOURCE_D3D12;
+
+#define NV_ENC_OUTPUT_RESOURCE_D3D12_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * \struct _NV_ENC_REGISTER_RESOURCE
+ * Register a resource for future use with the Nvidia Video Encoder Interface.
+ */
+typedef struct _NV_ENC_REGISTER_RESOURCE
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_REGISTER_RESOURCE_VER. */
+ NV_ENC_INPUT_RESOURCE_TYPE resourceType; /**< [in]: Specifies the type of resource to be registered.
+ Supported values are
+ ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX,
+ ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR,
+ ::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX */
+ uint32_t width; /**< [in]: Input frame width. */
+ uint32_t height; /**< [in]: Input frame height. */
+ uint32_t pitch; /**< [in]: Input buffer pitch.
+ For ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX resources, set this to 0.
+ For ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR resources, set this to
+ the pitch as obtained from cuMemAllocPitch(), or to the width in
+ bytes (if this resource was created by using cuMemAlloc()). This
+ value must be a multiple of 4.
+ For ::NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY resources, set this to the
+ width of the allocation in bytes (i.e.
+ CUDA_ARRAY3D_DESCRIPTOR::Width * CUDA_ARRAY3D_DESCRIPTOR::NumChannels).
+ For ::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX resources, set this to the
+ texture width multiplied by the number of components in the texture
+ format. */
+ uint32_t subResourceIndex; /**< [in]: Subresource Index of the DirectX resource to be registered. Should be set to 0 for other interfaces. */
+ void* resourceToRegister; /**< [in]: Handle to the resource that is being registered. */
+ NV_ENC_REGISTERED_PTR registeredResource; /**< [out]: Registered resource handle. This should be used in future interactions with the Nvidia Video Encoder Interface. */
+ NV_ENC_BUFFER_FORMAT bufferFormat; /**< [in]: Buffer format of resource to be registered. */
+ NV_ENC_BUFFER_USAGE bufferUsage; /**< [in]: Usage of resource to be registered. */
+ NV_ENC_FENCE_POINT_D3D12* pInputFencePoint; /**< [in]: Specifies the input fence and corresponding fence values to do GPU wait and signal.
+ To be used only when NV_ENC_REGISTER_RESOURCE::resourceToRegister represents D3D12 surface and
+ NV_ENC_BUFFER_USAGE::bufferUsage is NV_ENC_INPUT_IMAGE.
+ The fence NV_ENC_FENCE_POINT_D3D12::pFence and NV_ENC_FENCE_POINT_D3D12::waitValue will be used to do GPU wait
+ before starting GPU operation, if NV_ENC_FENCE_POINT_D3D12::bWait is set.
+ The fence NV_ENC_FENCE_POINT_D3D12::pFence and NV_ENC_FENCE_POINT_D3D12::signalValue will be used to do GPU signal
+ when GPU operation finishes, if NV_ENC_FENCE_POINT_D3D12::bSignal is set. */
+ uint32_t reserved1[247]; /**< [in]: Reserved and must be set to 0. */
+ void* reserved2[61]; /**< [in]: Reserved and must be set to NULL. */
+} NV_ENC_REGISTER_RESOURCE;
+
+/** Macro for constructing the version field of ::_NV_ENC_REGISTER_RESOURCE */
+#define NV_ENC_REGISTER_RESOURCE_VER NVENCAPI_STRUCT_VERSION(4)
+
+/**
+ * \struct _NV_ENC_STAT
+ * Encode Stats structure.
+ */
+typedef struct _NV_ENC_STAT
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_STAT_VER. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
+ NV_ENC_OUTPUT_PTR outputBitStream; /**< [out]: Specifies the pointer to output bitstream. */
+ uint32_t bitStreamSize; /**< [out]: Size of generated bitstream in bytes. */
+ uint32_t picType; /**< [out]: Picture type of encoded picture. See ::NV_ENC_PIC_TYPE. */
+ uint32_t lastValidByteOffset; /**< [out]: Offset of last valid bytes of completed bitstream */
+ uint32_t sliceOffsets[16]; /**< [out]: Offsets of each slice */
+ uint32_t picIdx; /**< [out]: Picture number */
+ uint32_t frameAvgQP; /**< [out]: Average QP of the frame. */
+ uint32_t ltrFrame :1; /**< [out]: Flag indicating this frame is marked as LTR frame */
+ uint32_t reservedBitFields :31; /**< [in]: Reserved bit fields and must be set to 0 */
+ uint32_t ltrFrameIdx; /**< [out]: Frame index associated with this LTR frame. */
+ uint32_t intraMBCount; /**< [out]: For H264, Number of Intra MBs in the encoded frame. For HEVC, Number of Intra CTBs in the encoded frame. */
+ uint32_t interMBCount; /**< [out]: For H264, Number of Inter MBs in the encoded frame, includes skip MBs. For HEVC, Number of Inter CTBs in the encoded frame. */
+ int32_t averageMVX; /**< [out]: Average Motion Vector in X direction for the encoded frame. */
+ int32_t averageMVY; /**< [out]: Average Motion Vector in y direction for the encoded frame. */
+ uint32_t reserved1[226]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_STAT;
+
+/** Macro for constructing the version field of ::_NV_ENC_STAT */
+#define NV_ENC_STAT_VER NVENCAPI_STRUCT_VERSION(1)
+
+
+/**
+ * \struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD
+ * Sequence and picture paramaters payload.
+ */
+typedef struct _NV_ENC_SEQUENCE_PARAM_PAYLOAD
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_INITIALIZE_PARAMS_VER. */
+ uint32_t inBufferSize; /**< [in]: Specifies the size of the spsppsBuffer provided by the client */
+ uint32_t spsId; /**< [in]: Specifies the SPS id to be used in sequence header. Default value is 0. */
+ uint32_t ppsId; /**< [in]: Specifies the PPS id to be used in picture header. Default value is 0. */
+ void* spsppsBuffer; /**< [in]: Specifies bitstream header pointer of size NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize.
+ It is the client's responsibility to manage this memory. */
+ uint32_t* outSPSPPSPayloadSize; /**< [out]: Size of the sequence and picture header in bytes. */
+ uint32_t reserved [250]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_SEQUENCE_PARAM_PAYLOAD;
+
+/** Macro for constructing the version field of ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD */
+#define NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER NVENCAPI_STRUCT_VERSION(1)
+
+
+/**
+ * Event registration/unregistration parameters.
+ */
+typedef struct _NV_ENC_EVENT_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_EVENT_PARAMS_VER. */
+ uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
+ void* completionEvent; /**< [in]: Handle to event to be registered/unregistered with the NvEncodeAPI interface. */
+ uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_EVENT_PARAMS;
+
+/** Macro for constructing the version field of ::_NV_ENC_EVENT_PARAMS */
+#define NV_ENC_EVENT_PARAMS_VER NVENCAPI_STRUCT_VERSION(1)
+
+/**
+ * Encoder Session Creation parameters
+ */
+typedef struct _NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS
+{
+ uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER. */
+ NV_ENC_DEVICE_TYPE deviceType; /**< [in]: Specified the device Type */
+ void* device; /**< [in]: Pointer to client device. */
+ void* reserved; /**< [in]: Reserved and must be set to 0. */
+ uint32_t apiVersion; /**< [in]: API version. Should be set to NVENCAPI_VERSION. */
+ uint32_t reserved1[253]; /**< [in]: Reserved and must be set to 0 */
+ void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS;
+/** Macro for constructing the version field of ::_NV_ENC_OPEN_ENCODE_SESSIONEX_PARAMS */
+#define NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER NVENCAPI_STRUCT_VERSION(1)
+
+/** @} */ /* END ENCODER_STRUCTURE */
+
+
+/**
+ * \addtogroup ENCODE_FUNC NvEncodeAPI Functions
+ * @{
+ */
+
+// NvEncOpenEncodeSession
+/**
+ * \brief Opens an encoding session.
+ *
+ * Deprecated.
+ *
+ * \return
+ * ::NV_ENC_ERR_INVALID_CALL\n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncOpenEncodeSession (void* device, uint32_t deviceType, void** encoder);
+
+// NvEncGetEncodeGuidCount
+/**
+ * \brief Retrieves the number of supported encode GUIDs.
+ *
+ * The function returns the number of codec GUIDs supported by the NvEncodeAPI
+ * interface.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [out] encodeGUIDCount
+ * Number of supported encode GUIDs.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDCount (void* encoder, uint32_t* encodeGUIDCount);
+
+
+// NvEncGetEncodeGUIDs
+/**
+ * \brief Retrieves an array of supported encoder codec GUIDs.
+ *
+ * The function returns an array of codec GUIDs supported by the NvEncodeAPI interface.
+ * The client must allocate an array where the NvEncodeAPI interface can
+ * fill the supported GUIDs and pass the pointer in \p *GUIDs parameter.
+ * The size of the array can be determined by using ::NvEncGetEncodeGUIDCount() API.
+ * The Nvidia Encoding interface returns the number of codec GUIDs it has actually
+ * filled in the GUID array in the \p GUIDCount parameter.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] guidArraySize
+ * Number of GUIDs to retrieved. Should be set to the number retrieved using
+ * ::NvEncGetEncodeGUIDCount.
+ * \param [out] GUIDs
+ * Array of supported Encode GUIDs.
+ * \param [out] GUIDCount
+ * Number of supported Encode GUIDs.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeGUIDs (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
+
+
+// NvEncGetEncodeProfileGuidCount
+/**
+ * \brief Retrieves the number of supported profile GUIDs.
+ *
+ * The function returns the number of profile GUIDs supported for a given codec.
+ * The client must first enumerate the codec GUIDs supported by the NvEncodeAPI
+ * interface. After determining the codec GUID, it can query the NvEncodeAPI
+ * interface to determine the number of profile GUIDs supported for a particular
+ * codec GUID.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * The codec GUID for which the profile GUIDs are being enumerated.
+ * \param [out] encodeProfileGUIDCount
+ * Number of encode profiles supported for the given encodeGUID.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDCount (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount);
+
+
+// NvEncGetEncodeProfileGUIDs
+/**
+ * \brief Retrieves an array of supported encode profile GUIDs.
+ *
+ * The function returns an array of supported profile GUIDs for a particular
+ * codec GUID. The client must allocate an array where the NvEncodeAPI interface
+ * can populate the profile GUIDs. The client can determine the array size using
+ * ::NvEncGetEncodeProfileGUIDCount() API. The client must also validiate that the
+ * NvEncodeAPI interface supports the GUID the client wants to pass as \p encodeGUID
+ * parameter.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * The encode GUID whose profile GUIDs are being enumerated.
+ * \param [in] guidArraySize
+ * Number of GUIDs to be retrieved. Should be set to the number retrieved using
+ * ::NvEncGetEncodeProfileGUIDCount.
+ * \param [out] profileGUIDs
+ * Array of supported Encode Profile GUIDs
+ * \param [out] GUIDCount
+ * Number of valid encode profile GUIDs in \p profileGUIDs array.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeProfileGUIDs (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
+
+// NvEncGetInputFormatCount
+/**
+ * \brief Retrieve the number of supported Input formats.
+ *
+ * The function returns the number of supported input formats. The client must
+ * query the NvEncodeAPI interface to determine the supported input formats
+ * before creating the input surfaces.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the number of supported input formats
+ * is to be retrieved.
+ * \param [out] inputFmtCount
+ * Number of input formats supported for specified Encode GUID.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncGetInputFormatCount (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount);
+
+
+// NvEncGetInputFormats
+/**
+ * \brief Retrieves an array of supported Input formats
+ *
+ * Returns an array of supported input formats The client must use the input
+ * format to create input surface using ::NvEncCreateInputBuffer() API.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the number of supported input formats
+ * is to be retrieved.
+ *\param [in] inputFmtArraySize
+ * Size input format count array passed in \p inputFmts.
+ *\param [out] inputFmts
+ * Array of input formats supported for this Encode GUID.
+ *\param [out] inputFmtCount
+ * The number of valid input format types returned by the NvEncodeAPI
+ * interface in \p inputFmts array.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetInputFormats (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount);
+
+
+// NvEncGetEncodeCaps
+/**
+ * \brief Retrieves the capability value for a specified encoder attribute.
+ *
+ * The function returns the capability value for a given encoder attribute. The
+ * client must validate the encodeGUID using ::NvEncGetEncodeGUIDs() API before
+ * calling this function. The encoder attribute being queried are enumerated in
+ * ::NV_ENC_CAPS_PARAM enum.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the capability attribute is to be retrieved.
+ * \param [in] capsParam
+ * Used to specify attribute being queried. Refer ::NV_ENC_CAPS_PARAM for more
+ * details.
+ * \param [out] capsVal
+ * The value corresponding to the capability attribute being queried.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeCaps (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal);
+
+
+// NvEncGetEncodePresetCount
+/**
+ * \brief Retrieves the number of supported preset GUIDs.
+ *
+ * The function returns the number of preset GUIDs available for a given codec.
+ * The client must validate the codec GUID using ::NvEncGetEncodeGUIDs() API
+ * before calling this function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the number of supported presets is to
+ * be retrieved.
+ * \param [out] encodePresetGUIDCount
+ * Receives the number of supported preset GUIDs.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodePresetCount (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount);
+
+
+// NvEncGetEncodePresetGUIDs
+/**
+ * \brief Receives an array of supported encoder preset GUIDs.
+ *
+ * The function returns an array of encode preset GUIDs available for a given codec.
+ * The client can directly use one of the preset GUIDs based upon the use case
+ * or target device. The preset GUID chosen can be directly used in
+ * NV_ENC_INITIALIZE_PARAMS::presetGUID parameter to ::NvEncEncodePicture() API.
+ * Alternately client can also use the preset GUID to retrieve the encoding config
+ * parameters being used by NvEncodeAPI interface for that given preset, using
+ * ::NvEncGetEncodePresetConfig() API. It can then modify preset config parameters
+ * as per its use case and send it to NvEncodeAPI interface as part of
+ * NV_ENC_INITIALIZE_PARAMS::encodeConfig parameter for NvEncInitializeEncoder()
+ * API.
+ *
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the list of supported presets is to be
+ * retrieved.
+ * \param [in] guidArraySize
+ * Size of array of preset GUIDs passed in \p preset GUIDs
+ * \param [out] presetGUIDs
+ * Array of supported Encode preset GUIDs from the NvEncodeAPI interface
+ * to client.
+ * \param [out] encodePresetGUIDCount
+ * Receives the number of preset GUIDs returned by the NvEncodeAPI
+ * interface.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodePresetGUIDs (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount);
+
+
+// NvEncGetEncodePresetConfig
+/**
+ * \brief Returns a preset config structure supported for given preset GUID.
+ *
+ * The function returns a preset config structure for a given preset GUID.
+ * NvEncGetEncodePresetConfig() API is not applicable to AV1.
+ * Before using this function the client must enumerate the preset GUIDs available for
+ * a given codec. The preset config structure can be modified by the client depending
+ * upon its use case and can be then used to initialize the encoder using
+ * ::NvEncInitializeEncoder() API. The client can use this function only if it
+ * wants to modify the NvEncodeAPI preset configuration, otherwise it can
+ * directly use the preset GUID.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the list of supported presets is to be
+ * retrieved.
+ * \param [in] presetGUID
+ * Preset GUID, corresponding to which the Encoding configurations is to be
+ * retrieved.
+ * \param [out] presetConfig
+ * The requested Preset Encoder Attribute set. Refer ::_NV_ENC_CONFIG for
+* more details.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodePresetConfig (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig);
+
+// NvEncGetEncodePresetConfigEx
+/**
+ * \brief Returns a preset config structure supported for given preset GUID.
+ *
+ * The function returns a preset config structure for a given preset GUID and tuning info.
+ * NvEncGetEncodePresetConfigEx() API is not applicable to H264 and HEVC meonly mode.
+ * Before using this function the client must enumerate the preset GUIDs available for
+ * a given codec. The preset config structure can be modified by the client depending
+ * upon its use case and can be then used to initialize the encoder using
+ * ::NvEncInitializeEncoder() API. The client can use this function only if it
+ * wants to modify the NvEncodeAPI preset configuration, otherwise it can
+ * directly use the preset GUID.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encodeGUID
+ * Encode GUID, corresponding to which the list of supported presets is to be
+ * retrieved.
+ * \param [in] presetGUID
+ * Preset GUID, corresponding to which the Encoding configurations is to be
+ * retrieved.
+ * \param [in] tuningInfo
+ * tuning info, corresponding to which the Encoding configurations is to be
+ * retrieved.
+ * \param [out] presetConfig
+ * The requested Preset Encoder Attribute set. Refer ::_NV_ENC_CONFIG for
+ * more details.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodePresetConfigEx (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_TUNING_INFO tuningInfo, NV_ENC_PRESET_CONFIG* presetConfig);
+
+// NvEncInitializeEncoder
+/**
+ * \brief Initialize the encoder.
+ *
+ * This API must be used to initialize the encoder. The initialization parameter
+ * is passed using \p *createEncodeParams The client must send the following
+ * fields of the _NV_ENC_INITIALIZE_PARAMS structure with a valid value.
+ * - NV_ENC_INITIALIZE_PARAMS::encodeGUID
+ * - NV_ENC_INITIALIZE_PARAMS::encodeWidth
+ * - NV_ENC_INITIALIZE_PARAMS::encodeHeight
+ *
+ * The client can pass a preset GUID directly to the NvEncodeAPI interface using
+ * NV_ENC_INITIALIZE_PARAMS::presetGUID field. If the client doesn't pass
+ * NV_ENC_INITIALIZE_PARAMS::encodeConfig structure, the codec specific parameters
+ * will be selected based on the preset GUID. The preset GUID must have been
+ * validated by the client using ::NvEncGetEncodePresetGUIDs() API.
+ * If the client passes a custom ::_NV_ENC_CONFIG structure through
+ * NV_ENC_INITIALIZE_PARAMS::encodeConfig , it will override the codec specific parameters
+ * based on the preset GUID. It is recommended that even if the client passes a custom config,
+ * it should also send a preset GUID. In this case, the preset GUID passed by the client
+ * will not override any of the custom config parameters programmed by the client,
+ * it is only used as a hint by the NvEncodeAPI interface to determine certain encoder parameters
+ * which are not exposed to the client.
+ *
+ * There are two modes of operation for the encoder namely:
+ * - Asynchronous mode
+ * - Synchronous mode
+ *
+ * The client can select asynchronous or synchronous mode by setting the \p
+ * enableEncodeAsync field in ::_NV_ENC_INITIALIZE_PARAMS to 1 or 0 respectively.
+ *\par Asynchronous mode of operation:
+ * The Asynchronous mode can be enabled by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1.
+ * The client operating in asynchronous mode must allocate completion event object
+ * for each output buffer and pass the completion event object in the
+ * ::NvEncEncodePicture() API. The client can create another thread and wait on
+ * the event object to be signaled by NvEncodeAPI interface on completion of the
+ * encoding process for the output frame. This should unblock the main thread from
+ * submitting work to the encoder. When the event is signaled the client can call
+ * NvEncodeAPI interfaces to copy the bitstream data using ::NvEncLockBitstream()
+ * API. This is the preferred mode of operation.
+ *
+ * NOTE: Asynchronous mode is not supported on Linux.
+ *
+ *\par Synchronous mode of operation:
+ * The client can select synchronous mode by setting NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0.
+ * The client working in synchronous mode can work in a single threaded or multi
+ * threaded mode. The client need not allocate any event objects. The client can
+ * only lock the bitstream data after NvEncodeAPI interface has returned
+ * ::NV_ENC_SUCCESS from encode picture. The NvEncodeAPI interface can return
+ * ::NV_ENC_ERR_NEED_MORE_INPUT error code from ::NvEncEncodePicture() API. The
+ * client must not lock the output buffer in such case but should send the next
+ * frame for encoding. The client must keep on calling ::NvEncEncodePicture() API
+ * until it returns ::NV_ENC_SUCCESS. \n
+ * The client must always lock the bitstream data in order in which it has submitted.
+ * This is true for both asynchronous and synchronous mode.
+ *
+ *\par Picture type decision:
+ * If the client is taking the picture type decision and it must disable the picture
+ * type decision module in NvEncodeAPI by setting NV_ENC_INITIALIZE_PARAMS::enablePTD
+ * to 0. In this case the client is required to send the picture in encoding
+ * order to NvEncodeAPI by doing the re-ordering for B frames. \n
+ * If the client doesn't want to take the picture type decision it can enable
+ * picture type decision module in the NvEncodeAPI interface by setting
+ * NV_ENC_INITIALIZE_PARAMS::enablePTD to 1 and send the input pictures in display
+ * order.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] createEncodeParams
+ * Refer ::_NV_ENC_INITIALIZE_PARAMS for details.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncInitializeEncoder (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams);
+
+
+// NvEncCreateInputBuffer
+/**
+ * \brief Allocates Input buffer.
+ *
+ * This function is used to allocate an input buffer. The client must enumerate
+ * the input buffer format before allocating the input buffer resources. The
+ * NV_ENC_INPUT_PTR returned by the NvEncodeAPI interface in the
+ * NV_ENC_CREATE_INPUT_BUFFER::inputBuffer field can be directly used in
+ * ::NvEncEncodePicture() API. The number of input buffers to be allocated by the
+ * client must be at least 4 more than the number of B frames being used for encoding.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] createInputBufferParams
+ * Pointer to the ::NV_ENC_CREATE_INPUT_BUFFER structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncCreateInputBuffer (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams);
+
+
+// NvEncDestroyInputBuffer
+/**
+ * \brief Release an input buffers.
+ *
+ * This function is used to free an input buffer. If the client has allocated
+ * any input buffer using ::NvEncCreateInputBuffer() API, it must free those
+ * input buffers by calling this function. The client must release the input
+ * buffers before destroying the encoder using ::NvEncDestroyEncoder() API.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] inputBuffer
+ * Pointer to the input buffer to be released.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncDestroyInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer);
+
+// NvEncSetIOCudaStreams
+/**
+ * \brief Set input and output CUDA stream for specified encoder attribute.
+ *
+ * Encoding may involve CUDA pre-processing on the input and post-processing on encoded output.
+ * This function is used to set input and output CUDA streams to pipeline the CUDA pre-processing
+ * and post-processing tasks. Clients should call this function before the call to
+ * NvEncUnlockInputBuffer(). If this function is not called, the default CUDA stream is used for
+ * input and output processing. After a successful call to this function, the streams specified
+ * in that call will replace the previously-used streams.
+ * This API is supported for NVCUVID interface only.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] inputStream
+ * Pointer to CUstream which is used to process ::NV_ENC_PIC_PARAMS::inputFrame for encode.
+ * In case of ME-only mode, inputStream is used to process ::NV_ENC_MEONLY_PARAMS::inputBuffer and
+ * ::NV_ENC_MEONLY_PARAMS::referenceFrame
+ * \param [in] outputStream
+ * Pointer to CUstream which is used to process ::NV_ENC_PIC_PARAMS::outputBuffer for encode.
+ * In case of ME-only mode, outputStream is used to process ::NV_ENC_MEONLY_PARAMS::mvBuffer
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncSetIOCudaStreams (void* encoder, NV_ENC_CUSTREAM_PTR inputStream, NV_ENC_CUSTREAM_PTR outputStream);
+
+
+// NvEncCreateBitstreamBuffer
+/**
+ * \brief Allocates an output bitstream buffer
+ *
+ * This function is used to allocate an output bitstream buffer and returns a
+ * NV_ENC_OUTPUT_PTR to bitstream buffer to the client in the
+ * NV_ENC_CREATE_BITSTREAM_BUFFER::bitstreamBuffer field.
+ * The client can only call this function after the encoder session has been
+ * initialized using ::NvEncInitializeEncoder() API. The minimum number of output
+ * buffers allocated by the client must be at least 4 more than the number of B
+ * B frames being used for encoding. The client can only access the output
+ * bitstream data by locking the \p bitstreamBuffer using the ::NvEncLockBitstream()
+ * function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] createBitstreamBufferParams
+ * Pointer ::NV_ENC_CREATE_BITSTREAM_BUFFER for details.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncCreateBitstreamBuffer (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams);
+
+
+// NvEncDestroyBitstreamBuffer
+/**
+ * \brief Release a bitstream buffer.
+ *
+ * This function is used to release the output bitstream buffer allocated using
+ * the ::NvEncCreateBitstreamBuffer() function. The client must release the output
+ * bitstreamBuffer using this function before destroying the encoder session.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] bitstreamBuffer
+ * Pointer to the bitstream buffer being released.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncDestroyBitstreamBuffer (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer);
+
+// NvEncEncodePicture
+/**
+ * \brief Submit an input picture for encoding.
+ *
+ * This function is used to submit an input picture buffer for encoding. The
+ * encoding parameters are passed using \p *encodePicParams which is a pointer
+ * to the ::_NV_ENC_PIC_PARAMS structure.
+ *
+ * If the client has set NV_ENC_INITIALIZE_PARAMS::enablePTD to 0, then it must
+ * send a valid value for the following fields.
+ * - NV_ENC_PIC_PARAMS::pictureType
+ * - NV_ENC_PIC_PARAMS_H264::displayPOCSyntax (H264 only)
+ * - NV_ENC_PIC_PARAMS_H264::frameNumSyntax(H264 only)
+ * - NV_ENC_PIC_PARAMS_H264::refPicFlag(H264 only)
+ *
+ *\par MVC Encoding:
+ * For MVC encoding the client must call encode picture API for each view separately
+ * and must pass valid view id in NV_ENC_PIC_PARAMS_MVC::viewID field. Currently
+ * NvEncodeAPI only support stereo MVC so client must send viewID as 0 for base
+ * view and view ID as 1 for dependent view.
+ *
+ *\par Asynchronous Encoding
+ * If the client has enabled asynchronous mode of encoding by setting
+ * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 1 in the ::NvEncInitializeEncoder()
+ * API ,then the client must send a valid NV_ENC_PIC_PARAMS::completionEvent.
+ * Incase of asynchronous mode of operation, client can queue the ::NvEncEncodePicture()
+ * API commands from the main thread and then queue output buffers to be processed
+ * to a secondary worker thread. Before the locking the output buffers in the
+ * secondary thread , the client must wait on NV_ENC_PIC_PARAMS::completionEvent
+ * it has queued in ::NvEncEncodePicture() API call. The client must always process
+ * completion event and the output buffer in the same order in which they have been
+ * submitted for encoding. The NvEncodeAPI interface is responsible for any
+ * re-ordering required for B frames and will always ensure that encoded bitstream
+ * data is written in the same order in which output buffer is submitted.
+ * The NvEncodeAPI interface may return ::NV_ENC_ERR_NEED_MORE_INPUT error code for
+ * some ::NvEncEncodePicture() API calls but the client must not treat it as a fatal error.
+ * The NvEncodeAPI interface might not be able to submit an input picture buffer for encoding
+ * immediately due to re-ordering for B frames.
+ *\code
+ The below example shows how asynchronous encoding in case of 1 B frames
+ ------------------------------------------------------------------------
+ Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..)
+ and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to
+ keep a copy of the input buffers for re-ordering and it allocates following
+ internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI
+ and the client is not responsible for the allocating or freeing the memory of
+ the internal buffers.
+
+ a) The client main thread will queue the following encode frame calls.
+ Note the picture type is unknown to the client, the decision is being taken by
+ NvEncodeAPI interface. The client should pass ::_NV_ENC_PIC_PARAMS parameter
+ consisting of allocated input buffer, output buffer and output events in successive
+ ::NvEncEncodePicture() API calls along with other required encode picture params.
+ For example:
+ 1st EncodePicture parameters - (I1, O1, E1)
+ 2nd EncodePicture parameters - (I2, O2, E2)
+ 3rd EncodePicture parameters - (I3, O3, E3)
+
+ b) NvEncodeAPI SW will receive the following encode Commands from the client.
+ The left side shows input from client in the form (Input buffer, Output Buffer,
+ Output Event). The right hand side shows a possible picture type decision take by
+ the NvEncodeAPI interface.
+ (I1, O1, E1) ---P1 Frame
+ (I2, O2, E2) ---B2 Frame
+ (I3, O3, E3) ---P3 Frame
+
+ c) NvEncodeAPI interface will make a copy of the input buffers to its internal
+ buffers for re-ordering. These copies are done as part of nvEncEncodePicture
+ function call from the client and NvEncodeAPI interface is responsible for
+ synchronization of copy operation with the actual encoding operation.
+ I1 --> NvI1
+ I2 --> NvI2
+ I3 --> NvI3
+
+ d) The NvEncodeAPI encodes I1 as P frame and submits I1 to encoder HW and returns ::NV_ENC_SUCCESS.
+ The NvEncodeAPI tries to encode I2 as B frame and fails with ::NV_ENC_ERR_NEED_MORE_INPUT error code.
+ The error is not fatal and it notifies client that I2 is not submitted to encoder immediately.
+ The NvEncodeAPI encodes I3 as P frame and submits I3 for encoding which will be used as backward
+ reference frame for I2. The NvEncodeAPI then submits I2 for encoding and returns ::NV_ENC_SUCESS.
+ Both the submission are part of the same ::NvEncEncodePicture() function call.
+
+ e) After returning from ::NvEncEncodePicture() call , the client must queue the output
+ bitstream processing work to the secondary thread. The output bitstream processing
+ for asynchronous mode consist of first waiting on completion event(E1, E2..)
+ and then locking the output bitstream buffer(O1, O2..) for reading the encoded
+ data. The work queued to the secondary thread by the client is in the following order
+ (I1, O1, E1)
+ (I2, O2, E2)
+ (I3, O3, E3)
+ Note they are in the same order in which client calls ::NvEncEncodePicture() API
+ in \p step a).
+
+ f) NvEncodeAPI interface will do the re-ordering such that Encoder HW will receive
+ the following encode commands:
+ (NvI1, O1, E1) ---P1 Frame
+ (NvI3, O2, E2) ---P3 Frame
+ (NvI2, O3, E3) ---B2 frame
+
+ g) After the encoding operations are completed, the events will be signaled
+ by NvEncodeAPI interface in the following order :
+ (O1, E1) ---P1 Frame ,output bitstream copied to O1 and event E1 signaled.
+ (O2, E2) ---P3 Frame ,output bitstream copied to O2 and event E2 signaled.
+ (O3, E3) ---B2 Frame ,output bitstream copied to O3 and event E3 signaled.
+
+ h) The client must lock the bitstream data using ::NvEncLockBitstream() API in
+ the order O1,O2,O3 to read the encoded data, after waiting for the events
+ to be signaled in the same order i.e E1, E2 and E3.The output processing is
+ done in the secondary thread in the following order:
+ Waits on E1, copies encoded bitstream from O1
+ Waits on E2, copies encoded bitstream from O2
+ Waits on E3, copies encoded bitstream from O3
+
+ -Note the client will receive the events signaling and output buffer in the
+ same order in which they have submitted for encoding.
+ -Note the LockBitstream will have picture type field which will notify the
+ output picture type to the clients.
+ -Note the input, output buffer and the output completion event are free to be
+ reused once NvEncodeAPI interfaced has signaled the event and the client has
+ copied the data from the output buffer.
+
+ * \endcode
+ *
+ *\par Synchronous Encoding
+ * The client can enable synchronous mode of encoding by setting
+ * NV_ENC_INITIALIZE_PARAMS::enableEncodeAsync to 0 in ::NvEncInitializeEncoder() API.
+ * The NvEncodeAPI interface may return ::NV_ENC_ERR_NEED_MORE_INPUT error code for
+ * some ::NvEncEncodePicture() API calls when NV_ENC_INITIALIZE_PARAMS::enablePTD
+ * is set to 1, but the client must not treat it as a fatal error. The NvEncodeAPI
+ * interface might not be able to submit an input picture buffer for encoding
+ * immediately due to re-ordering for B frames. The NvEncodeAPI interface cannot
+ * submit the input picture which is decided to be encoded as B frame as it waits
+ * for backward reference from temporally subsequent frames. This input picture
+ * is buffered internally and waits for more input picture to arrive. The client
+ * must not call ::NvEncLockBitstream() API on the output buffers whose
+ * ::NvEncEncodePicture() API returns ::NV_ENC_ERR_NEED_MORE_INPUT. The client must
+ * wait for the NvEncodeAPI interface to return ::NV_ENC_SUCCESS before locking the
+ * output bitstreams to read the encoded bitstream data. The following example
+ * explains the scenario with synchronous encoding with 2 B frames.
+ *\code
+ The below example shows how synchronous encoding works in case of 1 B frames
+ -----------------------------------------------------------------------------
+ Suppose the client allocated 4 input buffers(I1,I2..), 4 output buffers(O1,O2..)
+ and 4 completion events(E1, E2, ...). The NvEncodeAPI interface will need to
+ keep a copy of the input buffers for re-ordering and it allocates following
+ internal buffers (NvI1, NvI2...). These internal buffers are managed by NvEncodeAPI
+ and the client is not responsible for the allocating or freeing the memory of
+ the internal buffers.
+
+ The client calls ::NvEncEncodePicture() API with input buffer I1 and output buffer O1.
+ The NvEncodeAPI decides to encode I1 as P frame and submits it to encoder
+ HW and returns ::NV_ENC_SUCCESS.
+ The client can now read the encoded data by locking the output O1 by calling
+ NvEncLockBitstream API.
+
+ The client calls ::NvEncEncodePicture() API with input buffer I2 and output buffer O2.
+ The NvEncodeAPI decides to encode I2 as B frame and buffers I2 by copying it
+ to internal buffer and returns ::NV_ENC_ERR_NEED_MORE_INPUT.
+ The error is not fatal and it notifies client that it cannot read the encoded
+ data by locking the output O2 by calling ::NvEncLockBitstream() API without submitting
+ more work to the NvEncodeAPI interface.
+
+ The client calls ::NvEncEncodePicture() with input buffer I3 and output buffer O3.
+ The NvEncodeAPI decides to encode I3 as P frame and it first submits I3 for
+ encoding which will be used as backward reference frame for I2.
+ The NvEncodeAPI then submits I2 for encoding and returns ::NV_ENC_SUCESS. Both
+ the submission are part of the same ::NvEncEncodePicture() function call.
+ The client can now read the encoded data for both the frames by locking the output
+ O2 followed by O3 ,by calling ::NvEncLockBitstream() API.
+
+ The client must always lock the output in the same order in which it has submitted
+ to receive the encoded bitstream in correct encoding order.
+
+ * \endcode
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] encodePicParams
+ * Pointer to the ::_NV_ENC_PIC_PARAMS structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_ENCODER_BUSY \n
+ * ::NV_ENC_ERR_NEED_MORE_INPUT \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncEncodePicture (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams);
+
+
+// NvEncLockBitstream
+/**
+ * \brief Lock output bitstream buffer
+ *
+ * This function is used to lock the bitstream buffer to read the encoded data.
+ * The client can only access the encoded data by calling this function.
+ * The pointer to client accessible encoded data is returned in the
+ * NV_ENC_LOCK_BITSTREAM::bitstreamBufferPtr field. The size of the encoded data
+ * in the output buffer is returned in the NV_ENC_LOCK_BITSTREAM::bitstreamSizeInBytes
+ * The NvEncodeAPI interface also returns the output picture type and picture structure
+ * of the encoded frame in NV_ENC_LOCK_BITSTREAM::pictureType and
+ * NV_ENC_LOCK_BITSTREAM::pictureStruct fields respectively. If the client has
+ * set NV_ENC_LOCK_BITSTREAM::doNotWait to 1, the function might return
+ * ::NV_ENC_ERR_LOCK_BUSY if client is operating in synchronous mode. This is not
+ * a fatal failure if NV_ENC_LOCK_BITSTREAM::doNotWait is set to 1. In the above case the client can
+ * retry the function after few milliseconds.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] lockBitstreamBufferParams
+ * Pointer to the ::_NV_ENC_LOCK_BITSTREAM structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_LOCK_BUSY \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncLockBitstream (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams);
+
+
+// NvEncUnlockBitstream
+/**
+ * \brief Unlock the output bitstream buffer
+ *
+ * This function is used to unlock the output bitstream buffer after the client
+ * has read the encoded data from output buffer. The client must call this function
+ * to unlock the output buffer which it has previously locked using ::NvEncLockBitstream()
+ * function. Using a locked bitstream buffer in ::NvEncEncodePicture() API will cause
+ * the function to fail.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] bitstreamBuffer
+ * bitstream buffer pointer being unlocked
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncUnlockBitstream (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer);
+
+
+// NvLockInputBuffer
+/**
+ * \brief Locks an input buffer
+ *
+ * This function is used to lock the input buffer to load the uncompressed YUV
+ * pixel data into input buffer memory. The client must pass the NV_ENC_INPUT_PTR
+ * it had previously allocated using ::NvEncCreateInputBuffer()in the
+ * NV_ENC_LOCK_INPUT_BUFFER::inputBuffer field.
+ * The NvEncodeAPI interface returns pointer to client accessible input buffer
+ * memory in NV_ENC_LOCK_INPUT_BUFFER::bufferDataPtr field.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] lockInputBufferParams
+ * Pointer to the ::_NV_ENC_LOCK_INPUT_BUFFER structure
+ *
+ * \return
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_LOCK_BUSY \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncLockInputBuffer (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams);
+
+
+// NvUnlockInputBuffer
+/**
+ * \brief Unlocks the input buffer
+ *
+ * This function is used to unlock the input buffer memory previously locked for
+ * uploading YUV pixel data. The input buffer must be unlocked before being used
+ * again for encoding, otherwise NvEncodeAPI will fail the ::NvEncEncodePicture()
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] inputBuffer
+ * Pointer to the input buffer that is being unlocked.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncUnlockInputBuffer (void* encoder, NV_ENC_INPUT_PTR inputBuffer);
+
+
+// NvEncGetEncodeStats
+/**
+ * \brief Get encoding statistics.
+ *
+ * This function is used to retrieve the encoding statistics.
+ * This API is not supported when encode device type is CUDA.
+ * Note that this API will be removed in future Video Codec SDK release.
+ * Clients should use NvEncLockBitstream() API to retrieve the encoding statistics.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] encodeStats
+ * Pointer to the ::_NV_ENC_STAT structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetEncodeStats (void* encoder, NV_ENC_STAT* encodeStats);
+
+
+// NvEncGetSequenceParams
+/**
+ * \brief Get encoded sequence and picture header.
+ *
+ * This function can be used to retrieve the sequence and picture header out of
+ * band. The client must call this function only after the encoder has been
+ * initialized using ::NvEncInitializeEncoder() function. The client must
+ * allocate the memory where the NvEncodeAPI interface can copy the bitstream
+ * header and pass the pointer to the memory in NV_ENC_SEQUENCE_PARAM_PAYLOAD::spsppsBuffer.
+ * The size of buffer is passed in the field NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize.
+ * The NvEncodeAPI interface will copy the bitstream header payload and returns
+ * the actual size of the bitstream header in the field
+ * NV_ENC_SEQUENCE_PARAM_PAYLOAD::outSPSPPSPayloadSize.
+ * The client must call ::NvEncGetSequenceParams() function from the same thread which is
+ * being used to call ::NvEncEncodePicture() function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] sequenceParamPayload
+ * Pointer to the ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetSequenceParams (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload);
+
+// NvEncGetSequenceParamEx
+/**
+ * \brief Get sequence and picture header.
+ *
+ * This function can be used to retrieve the sequence and picture header out of band, even when
+ * encoder has not been initialized using ::NvEncInitializeEncoder() function.
+ * The client must allocate the memory where the NvEncodeAPI interface can copy the bitstream
+ * header and pass the pointer to the memory in NV_ENC_SEQUENCE_PARAM_PAYLOAD::spsppsBuffer.
+ * The size of buffer is passed in the field NV_ENC_SEQUENCE_PARAM_PAYLOAD::inBufferSize.
+ * If encoder has not been initialized using ::NvEncInitializeEncoder() function, client must
+ * send NV_ENC_INITIALIZE_PARAMS as input. The NV_ENC_INITIALIZE_PARAMS passed must be same as the
+ * one which will be used for initializing encoder using ::NvEncInitializeEncoder() function later.
+ * If encoder is already initialized using ::NvEncInitializeEncoder() function, the provided
+ * NV_ENC_INITIALIZE_PARAMS structure is ignored. The NvEncodeAPI interface will copy the bitstream
+ * header payload and returns the actual size of the bitstream header in the field
+ * NV_ENC_SEQUENCE_PARAM_PAYLOAD::outSPSPPSPayloadSize. The client must call ::NvEncGetSequenceParamsEx()
+ * function from the same thread which is being used to call ::NvEncEncodePicture() function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] encInitParams
+ * Pointer to the _NV_ENC_INITIALIZE_PARAMS structure.
+ * \param [in,out] sequenceParamPayload
+ * Pointer to the ::_NV_ENC_SEQUENCE_PARAM_PAYLOAD structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncGetSequenceParamEx (void* encoder, NV_ENC_INITIALIZE_PARAMS* encInitParams, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload);
+
+// NvEncRegisterAsyncEvent
+/**
+ * \brief Register event for notification to encoding completion.
+ *
+ * This function is used to register the completion event with NvEncodeAPI
+ * interface. The event is required when the client has configured the encoder to
+ * work in asynchronous mode. In this mode the client needs to send a completion
+ * event with every output buffer. The NvEncodeAPI interface will signal the
+ * completion of the encoding process using this event. Only after the event is
+ * signaled the client can get the encoded data using ::NvEncLockBitstream() function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] eventParams
+ * Pointer to the ::_NV_ENC_EVENT_PARAMS structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncRegisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams);
+
+
+// NvEncUnregisterAsyncEvent
+/**
+ * \brief Unregister completion event.
+ *
+ * This function is used to unregister completion event which has been previously
+ * registered using ::NvEncRegisterAsyncEvent() function. The client must unregister
+ * all events before destroying the encoder using ::NvEncDestroyEncoder() function.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] eventParams
+ * Pointer to the ::_NV_ENC_EVENT_PARAMS structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncUnregisterAsyncEvent (void* encoder, NV_ENC_EVENT_PARAMS* eventParams);
+
+
+// NvEncMapInputResource
+/**
+ * \brief Map an externally created input resource pointer for encoding.
+ *
+ * Maps an externally allocated input resource [using and returns a NV_ENC_INPUT_PTR
+ * which can be used for encoding in the ::NvEncEncodePicture() function. The
+ * mapped resource is returned in the field NV_ENC_MAP_INPUT_RESOURCE::outputResourcePtr.
+ * The NvEncodeAPI interface also returns the buffer format of the mapped resource
+ * in the field NV_ENC_MAP_INPUT_RESOURCE::outbufferFmt.
+ * This function provides synchronization guarantee that any graphics work submitted
+ * on the input buffer is completed before the buffer is used for encoding. This is
+ * also true for compute (i.e. CUDA) work, provided that the previous workload using
+ * the input resource was submitted to the default stream.
+ * The client should not access any input buffer while they are mapped by the encoder.
+ * For D3D12 interface type, this function does not provide synchronization guarantee.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] mapInputResParams
+ * Pointer to the ::_NV_ENC_MAP_INPUT_RESOURCE structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n
+ * ::NV_ENC_ERR_MAP_FAILED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncMapInputResource (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams);
+
+
+// NvEncUnmapInputResource
+/**
+ * \brief UnMaps a NV_ENC_INPUT_PTR which was mapped for encoding
+ *
+ *
+ * UnMaps an input buffer which was previously mapped using ::NvEncMapInputResource()
+ * API. The mapping created using ::NvEncMapInputResource() should be invalidated
+ * using this API before the external resource is destroyed by the client. The client
+ * must unmap the buffer after ::NvEncLockBitstream() API returns successfully for encode
+ * work submitted using the mapped input buffer.
+ *
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] mappedInputBuffer
+ * Pointer to the NV_ENC_INPUT_PTR
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n
+ * ::NV_ENC_ERR_RESOURCE_NOT_MAPPED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncUnmapInputResource (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer);
+
+// NvEncDestroyEncoder
+/**
+ * \brief Destroy Encoding Session
+ *
+ * Destroys the encoder session previously created using ::NvEncOpenEncodeSession()
+ * function. The client must flush the encoder before freeing any resources. In order
+ * to flush the encoder the client must pass a NULL encode picture packet and either
+ * wait for the ::NvEncEncodePicture() function to return in synchronous mode or wait
+ * for the flush event to be signaled by the encoder in asynchronous mode.
+ * The client must free all the input and output resources created using the
+ * NvEncodeAPI interface before destroying the encoder. If the client is operating
+ * in asynchronous mode, it must also unregister the completion events previously
+ * registered.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncDestroyEncoder (void* encoder);
+
+// NvEncInvalidateRefFrames
+/**
+ * \brief Invalidate reference frames
+ *
+ * Invalidates reference frame based on the time stamp provided by the client.
+ * The encoder marks any reference frames or any frames which have been reconstructed
+ * using the corrupt frame as invalid for motion estimation and uses older reference
+ * frames for motion estimation. The encoder forces the current frame to be encoded
+ * as an intra frame if no reference frames are left after invalidation process.
+ * This is useful for low latency application for error resiliency. The client
+ * is recommended to set NV_ENC_CONFIG_H264::maxNumRefFrames to a large value so
+ * that encoder can keep a backup of older reference frames in the DPB and can use them
+ * for motion estimation when the newer reference frames have been invalidated.
+ * This API can be called multiple times.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] invalidRefFrameTimeStamp
+ * Timestamp of the invalid reference frames which needs to be invalidated.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncInvalidateRefFrames(void* encoder, uint64_t invalidRefFrameTimeStamp);
+
+// NvEncOpenEncodeSessionEx
+/**
+ * \brief Opens an encoding session.
+ *
+ * Opens an encoding session and returns a pointer to the encoder interface in
+ * the \p **encoder parameter. The client should start encoding process by calling
+ * this API first.
+ * The client must pass a pointer to IDirect3DDevice9 device or CUDA context in the \p *device parameter.
+ * For the OpenGL interface, \p device must be NULL. An OpenGL context must be current when
+ * calling all NvEncodeAPI functions.
+ * If the creation of encoder session fails, the client must call ::NvEncDestroyEncoder API
+ * before exiting.
+ *
+ * \param [in] openSessionExParams
+ * Pointer to a ::NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS structure.
+ * \param [out] encoder
+ * Encode Session pointer to the NvEncodeAPI interface.
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n
+ * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n
+ * ::NV_ENC_ERR_INVALID_DEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncOpenEncodeSessionEx (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder);
+
+// NvEncRegisterResource
+/**
+ * \brief Registers a resource with the Nvidia Video Encoder Interface.
+ *
+ * Registers a resource with the Nvidia Video Encoder Interface for book keeping.
+ * The client is expected to pass the registered resource handle as well, while calling ::NvEncMapInputResource API.
+ *
+ * \param [in] encoder
+ * Pointer to the NVEncodeAPI interface.
+ *
+ * \param [in] registerResParams
+ * Pointer to a ::_NV_ENC_REGISTER_RESOURCE structure
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_RESOURCE_REGISTER_FAILED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ * ::NV_ENC_ERR_UNIMPLEMENTED \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncRegisterResource (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams);
+
+// NvEncUnregisterResource
+/**
+ * \brief Unregisters a resource previously registered with the Nvidia Video Encoder Interface.
+ *
+ * Unregisters a resource previously registered with the Nvidia Video Encoder Interface.
+ * The client is expected to unregister any resource that it has registered with the
+ * Nvidia Video Encoder Interface before destroying the resource.
+ *
+ * \param [in] encoder
+ * Pointer to the NVEncodeAPI interface.
+ *
+ * \param [in] registeredResource
+ * The registered resource pointer that was returned in ::NvEncRegisterResource.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_RESOURCE_NOT_REGISTERED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ * ::NV_ENC_ERR_UNIMPLEMENTED \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncUnregisterResource (void* encoder, NV_ENC_REGISTERED_PTR registeredResource);
+
+// NvEncReconfigureEncoder
+/**
+ * \brief Reconfigure an existing encoding session.
+ *
+ * Reconfigure an existing encoding session.
+ * The client should call this API to change/reconfigure the parameter passed during
+ * NvEncInitializeEncoder API call.
+ * Currently Reconfiguration of following are not supported.
+ * Change in GOP structure.
+ * Change in sync-Async mode.
+ * Change in MaxWidth & MaxHeight.
+ * Change in PTD mode.
+ *
+ * Resolution change is possible only if maxEncodeWidth & maxEncodeHeight of NV_ENC_INITIALIZE_PARAMS
+ * is set while creating encoder session.
+ *
+ * \param [in] encoder
+ * Pointer to the NVEncodeAPI interface.
+ *
+ * \param [in] reInitEncodeParams
+ * Pointer to a ::NV_ENC_RECONFIGURE_PARAMS structure.
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_NO_ENCODE_DEVICE \n
+ * ::NV_ENC_ERR_UNSUPPORTED_DEVICE \n
+ * ::NV_ENC_ERR_INVALID_DEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_GENERIC \n
+ *
+ */
+NVENCSTATUS NVENCAPI NvEncReconfigureEncoder (void *encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams);
+
+
+
+// NvEncCreateMVBuffer
+/**
+ * \brief Allocates output MV buffer for ME only mode.
+ *
+ * This function is used to allocate an output MV buffer. The size of the mvBuffer is
+ * dependent on the frame height and width of the last ::NvEncCreateInputBuffer() call.
+ * The NV_ENC_OUTPUT_PTR returned by the NvEncodeAPI interface in the
+ * ::NV_ENC_CREATE_MV_BUFFER::mvBuffer field should be used in
+ * ::NvEncRunMotionEstimationOnly() API.
+ * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in,out] createMVBufferParams
+ * Pointer to the ::NV_ENC_CREATE_MV_BUFFER structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncCreateMVBuffer (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams);
+
+
+// NvEncDestroyMVBuffer
+/**
+ * \brief Release an output MV buffer for ME only mode.
+ *
+ * This function is used to release the output MV buffer allocated using
+ * the ::NvEncCreateMVBuffer() function. The client must release the output
+ * mvBuffer using this function before destroying the encoder session.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] mvBuffer
+ * Pointer to the mvBuffer being released.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncDestroyMVBuffer (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer);
+
+
+// NvEncRunMotionEstimationOnly
+/**
+ * \brief Submit an input picture and reference frame for motion estimation in ME only mode.
+ *
+ * This function is used to submit the input frame and reference frame for motion
+ * estimation. The ME parameters are passed using *meOnlyParams which is a pointer
+ * to ::_NV_ENC_MEONLY_PARAMS structure.
+ * Client must lock ::NV_ENC_CREATE_MV_BUFFER::mvBuffer using ::NvEncLockBitstream() API to get the motion vector data.
+ * to get motion vector data.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ * \param [in] meOnlyParams
+ * Pointer to the ::_NV_ENC_MEONLY_PARAMS structure.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ * ::NV_ENC_ERR_INVALID_ENCODERDEVICE \n
+ * ::NV_ENC_ERR_DEVICE_NOT_EXIST \n
+ * ::NV_ENC_ERR_UNSUPPORTED_PARAM \n
+ * ::NV_ENC_ERR_OUT_OF_MEMORY \n
+ * ::NV_ENC_ERR_INVALID_PARAM \n
+ * ::NV_ENC_ERR_INVALID_VERSION \n
+ * ::NV_ENC_ERR_NEED_MORE_INPUT \n
+ * ::NV_ENC_ERR_ENCODER_NOT_INITIALIZED \n
+ * ::NV_ENC_ERR_GENERIC \n
+ */
+NVENCSTATUS NVENCAPI NvEncRunMotionEstimationOnly (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams);
+
+// NvEncodeAPIGetMaxSupportedVersion
+/**
+ * \brief Get the largest NvEncodeAPI version supported by the driver.
+ *
+ * This function can be used by clients to determine if the driver supports
+ * the NvEncodeAPI header the application was compiled with.
+ *
+ * \param [out] version
+ * Pointer to the requested value. The 4 least significant bits in the returned
+ * indicate the minor version and the rest of the bits indicate the major
+ * version of the largest supported version.
+ *
+ * \return
+ * ::NV_ENC_SUCCESS \n
+ * ::NV_ENC_ERR_INVALID_PTR \n
+ */
+NVENCSTATUS NVENCAPI NvEncodeAPIGetMaxSupportedVersion (uint32_t* version);
+
+
+// NvEncGetLastErrorString
+/**
+ * \brief Get the description of the last error reported by the API.
+ *
+ * This function returns a null-terminated string that can be used by clients to better understand the reason
+ * for failure of a previous API call.
+ *
+ * \param [in] encoder
+ * Pointer to the NvEncodeAPI interface.
+ *
+ * \return
+ * Pointer to buffer containing the details of the last error encountered by the API.
+ */
+const char * NVENCAPI NvEncGetLastErrorString (void* encoder);
+
+
+/// \cond API PFN
+/*
+ * Defines API function pointers
+ */
+typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSION) (void* device, uint32_t deviceType, void** encoder);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDCOUNT) (void* encoder, uint32_t* encodeGUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEGUIDS) (void* encoder, GUID* GUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodeProfileGUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPROFILEGUIDS) (void* encoder, GUID encodeGUID, GUID* profileGUIDs, uint32_t guidArraySize, uint32_t* GUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATCOUNT) (void* encoder, GUID encodeGUID, uint32_t* inputFmtCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETINPUTFORMATS) (void* encoder, GUID encodeGUID, NV_ENC_BUFFER_FORMAT* inputFmts, uint32_t inputFmtArraySize, uint32_t* inputFmtCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODECAPS) (void* encoder, GUID encodeGUID, NV_ENC_CAPS_PARAM* capsParam, int* capsVal);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCOUNT) (void* encoder, GUID encodeGUID, uint32_t* encodePresetGUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETGUIDS) (void* encoder, GUID encodeGUID, GUID* presetGUIDs, uint32_t guidArraySize, uint32_t* encodePresetGUIDCount);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCONFIG) (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_PRESET_CONFIG* presetConfig);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODEPRESETCONFIGEX) (void* encoder, GUID encodeGUID, GUID presetGUID, NV_ENC_TUNING_INFO tuningInfo, NV_ENC_PRESET_CONFIG* presetConfig);
+typedef NVENCSTATUS (NVENCAPI* PNVENCINITIALIZEENCODER) (void* encoder, NV_ENC_INITIALIZE_PARAMS* createEncodeParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEINPUTBUFFER) (void* encoder, NV_ENC_CREATE_INPUT_BUFFER* createInputBufferParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEBITSTREAMBUFFER) (void* encoder, NV_ENC_CREATE_BITSTREAM_BUFFER* createBitstreamBufferParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYBITSTREAMBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCENCODEPICTURE) (void* encoder, NV_ENC_PIC_PARAMS* encodePicParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKBITSTREAM) (void* encoder, NV_ENC_LOCK_BITSTREAM* lockBitstreamBufferParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKBITSTREAM) (void* encoder, NV_ENC_OUTPUT_PTR bitstreamBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCLOCKINPUTBUFFER) (void* encoder, NV_ENC_LOCK_INPUT_BUFFER* lockInputBufferParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCUNLOCKINPUTBUFFER) (void* encoder, NV_ENC_INPUT_PTR inputBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETENCODESTATS) (void* encoder, NV_ENC_STAT* encodeStats);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETSEQUENCEPARAMS) (void* encoder, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload);
+typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERASYNCEVENT) (void* encoder, NV_ENC_EVENT_PARAMS* eventParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCMAPINPUTRESOURCE) (void* encoder, NV_ENC_MAP_INPUT_RESOURCE* mapInputResParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCUNMAPINPUTRESOURCE) (void* encoder, NV_ENC_INPUT_PTR mappedInputBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYENCODER) (void* encoder);
+typedef NVENCSTATUS (NVENCAPI* PNVENCINVALIDATEREFFRAMES) (void* encoder, uint64_t invalidRefFrameTimeStamp);
+typedef NVENCSTATUS (NVENCAPI* PNVENCOPENENCODESESSIONEX) (NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS *openSessionExParams, void** encoder);
+typedef NVENCSTATUS (NVENCAPI* PNVENCREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTER_RESOURCE* registerResParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCUNREGISTERRESOURCE) (void* encoder, NV_ENC_REGISTERED_PTR registeredRes);
+typedef NVENCSTATUS (NVENCAPI* PNVENCRECONFIGUREENCODER) (void* encoder, NV_ENC_RECONFIGURE_PARAMS* reInitEncodeParams);
+
+typedef NVENCSTATUS (NVENCAPI* PNVENCCREATEMVBUFFER) (void* encoder, NV_ENC_CREATE_MV_BUFFER* createMVBufferParams);
+typedef NVENCSTATUS (NVENCAPI* PNVENCDESTROYMVBUFFER) (void* encoder, NV_ENC_OUTPUT_PTR mvBuffer);
+typedef NVENCSTATUS (NVENCAPI* PNVENCRUNMOTIONESTIMATIONONLY) (void* encoder, NV_ENC_MEONLY_PARAMS* meOnlyParams);
+typedef const char * (NVENCAPI* PNVENCGETLASTERROR) (void* encoder);
+typedef NVENCSTATUS (NVENCAPI* PNVENCSETIOCUDASTREAMS) (void* encoder, NV_ENC_CUSTREAM_PTR inputStream, NV_ENC_CUSTREAM_PTR outputStream);
+typedef NVENCSTATUS (NVENCAPI* PNVENCGETSEQUENCEPARAMEX) (void* encoder, NV_ENC_INITIALIZE_PARAMS* encInitParams, NV_ENC_SEQUENCE_PARAM_PAYLOAD* sequenceParamPayload);
+
+
+/// \endcond
+
+
+/** @} */ /* END ENCODE_FUNC */
+
+/**
+ * \ingroup ENCODER_STRUCTURE
+ * NV_ENCODE_API_FUNCTION_LIST
+ */
+typedef struct _NV_ENCODE_API_FUNCTION_LIST
+{
+ uint32_t version; /**< [in]: Client should pass NV_ENCODE_API_FUNCTION_LIST_VER. */
+ uint32_t reserved; /**< [in]: Reserved and should be set to 0. */
+ PNVENCOPENENCODESESSION nvEncOpenEncodeSession; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */
+ PNVENCGETENCODEGUIDCOUNT nvEncGetEncodeGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeGUIDCount() API through this pointer. */
+ PNVENCGETENCODEPRESETCOUNT nvEncGetEncodeProfileGUIDCount; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDCount() API through this pointer.*/
+ PNVENCGETENCODEPRESETGUIDS nvEncGetEncodeProfileGUIDs; /**< [out]: Client should access ::NvEncGetEncodeProfileGUIDs() API through this pointer. */
+ PNVENCGETENCODEGUIDS nvEncGetEncodeGUIDs; /**< [out]: Client should access ::NvEncGetEncodeGUIDs() API through this pointer. */
+ PNVENCGETINPUTFORMATCOUNT nvEncGetInputFormatCount; /**< [out]: Client should access ::NvEncGetInputFormatCount() API through this pointer. */
+ PNVENCGETINPUTFORMATS nvEncGetInputFormats; /**< [out]: Client should access ::NvEncGetInputFormats() API through this pointer. */
+ PNVENCGETENCODECAPS nvEncGetEncodeCaps; /**< [out]: Client should access ::NvEncGetEncodeCaps() API through this pointer. */
+ PNVENCGETENCODEPRESETCOUNT nvEncGetEncodePresetCount; /**< [out]: Client should access ::NvEncGetEncodePresetCount() API through this pointer. */
+ PNVENCGETENCODEPRESETGUIDS nvEncGetEncodePresetGUIDs; /**< [out]: Client should access ::NvEncGetEncodePresetGUIDs() API through this pointer. */
+ PNVENCGETENCODEPRESETCONFIG nvEncGetEncodePresetConfig; /**< [out]: Client should access ::NvEncGetEncodePresetConfig() API through this pointer. */
+ PNVENCINITIALIZEENCODER nvEncInitializeEncoder; /**< [out]: Client should access ::NvEncInitializeEncoder() API through this pointer. */
+ PNVENCCREATEINPUTBUFFER nvEncCreateInputBuffer; /**< [out]: Client should access ::NvEncCreateInputBuffer() API through this pointer. */
+ PNVENCDESTROYINPUTBUFFER nvEncDestroyInputBuffer; /**< [out]: Client should access ::NvEncDestroyInputBuffer() API through this pointer. */
+ PNVENCCREATEBITSTREAMBUFFER nvEncCreateBitstreamBuffer; /**< [out]: Client should access ::NvEncCreateBitstreamBuffer() API through this pointer. */
+ PNVENCDESTROYBITSTREAMBUFFER nvEncDestroyBitstreamBuffer; /**< [out]: Client should access ::NvEncDestroyBitstreamBuffer() API through this pointer. */
+ PNVENCENCODEPICTURE nvEncEncodePicture; /**< [out]: Client should access ::NvEncEncodePicture() API through this pointer. */
+ PNVENCLOCKBITSTREAM nvEncLockBitstream; /**< [out]: Client should access ::NvEncLockBitstream() API through this pointer. */
+ PNVENCUNLOCKBITSTREAM nvEncUnlockBitstream; /**< [out]: Client should access ::NvEncUnlockBitstream() API through this pointer. */
+ PNVENCLOCKINPUTBUFFER nvEncLockInputBuffer; /**< [out]: Client should access ::NvEncLockInputBuffer() API through this pointer. */
+ PNVENCUNLOCKINPUTBUFFER nvEncUnlockInputBuffer; /**< [out]: Client should access ::NvEncUnlockInputBuffer() API through this pointer. */
+ PNVENCGETENCODESTATS nvEncGetEncodeStats; /**< [out]: Client should access ::NvEncGetEncodeStats() API through this pointer. */
+ PNVENCGETSEQUENCEPARAMS nvEncGetSequenceParams; /**< [out]: Client should access ::NvEncGetSequenceParams() API through this pointer. */
+ PNVENCREGISTERASYNCEVENT nvEncRegisterAsyncEvent; /**< [out]: Client should access ::NvEncRegisterAsyncEvent() API through this pointer. */
+ PNVENCUNREGISTERASYNCEVENT nvEncUnregisterAsyncEvent; /**< [out]: Client should access ::NvEncUnregisterAsyncEvent() API through this pointer. */
+ PNVENCMAPINPUTRESOURCE nvEncMapInputResource; /**< [out]: Client should access ::NvEncMapInputResource() API through this pointer. */
+ PNVENCUNMAPINPUTRESOURCE nvEncUnmapInputResource; /**< [out]: Client should access ::NvEncUnmapInputResource() API through this pointer. */
+ PNVENCDESTROYENCODER nvEncDestroyEncoder; /**< [out]: Client should access ::NvEncDestroyEncoder() API through this pointer. */
+ PNVENCINVALIDATEREFFRAMES nvEncInvalidateRefFrames; /**< [out]: Client should access ::NvEncInvalidateRefFrames() API through this pointer. */
+ PNVENCOPENENCODESESSIONEX nvEncOpenEncodeSessionEx; /**< [out]: Client should access ::NvEncOpenEncodeSession() API through this pointer. */
+ PNVENCREGISTERRESOURCE nvEncRegisterResource; /**< [out]: Client should access ::NvEncRegisterResource() API through this pointer. */
+ PNVENCUNREGISTERRESOURCE nvEncUnregisterResource; /**< [out]: Client should access ::NvEncUnregisterResource() API through this pointer. */
+ PNVENCRECONFIGUREENCODER nvEncReconfigureEncoder; /**< [out]: Client should access ::NvEncReconfigureEncoder() API through this pointer. */
+ void* reserved1;
+ PNVENCCREATEMVBUFFER nvEncCreateMVBuffer; /**< [out]: Client should access ::NvEncCreateMVBuffer API through this pointer. */
+ PNVENCDESTROYMVBUFFER nvEncDestroyMVBuffer; /**< [out]: Client should access ::NvEncDestroyMVBuffer API through this pointer. */
+ PNVENCRUNMOTIONESTIMATIONONLY nvEncRunMotionEstimationOnly; /**< [out]: Client should access ::NvEncRunMotionEstimationOnly API through this pointer. */
+ PNVENCGETLASTERROR nvEncGetLastErrorString; /**< [out]: Client should access ::nvEncGetLastErrorString API through this pointer. */
+ PNVENCSETIOCUDASTREAMS nvEncSetIOCudaStreams; /**< [out]: Client should access ::nvEncSetIOCudaStreams API through this pointer. */
+ PNVENCGETENCODEPRESETCONFIGEX nvEncGetEncodePresetConfigEx; /**< [out]: Client should access ::NvEncGetEncodePresetConfigEx() API through this pointer. */
+ PNVENCGETSEQUENCEPARAMEX nvEncGetSequenceParamEx; /**< [out]: Client should access ::NvEncGetSequenceParamEx() API through this pointer. */
+ void* reserved2[277]; /**< [in]: Reserved and must be set to NULL */
+} NV_ENCODE_API_FUNCTION_LIST;
+
+/** Macro for constructing the version field of ::_NV_ENCODEAPI_FUNCTION_LIST. */
+#define NV_ENCODE_API_FUNCTION_LIST_VER NVENCAPI_STRUCT_VERSION(2)
+
+// NvEncodeAPICreateInstance
+/**
+ * \ingroup ENCODE_FUNC
+ * Entry Point to the NvEncodeAPI interface.
+ *
+ * Creates an instance of the NvEncodeAPI interface, and populates the
+ * pFunctionList with function pointers to the API routines implemented by the
+ * NvEncodeAPI interface.
+ *
+ * \param [out] functionList
+ *
+ * \return
+ * ::NV_ENC_SUCCESS
+ * ::NV_ENC_ERR_INVALID_PTR
+ */
+NVENCSTATUS NVENCAPI NvEncodeAPICreateInstance(NV_ENCODE_API_FUNCTION_LIST *functionList);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
+
diff --git a/extra/gpu-screen-recorder.env b/extra/gpu-screen-recorder.env
index ce9f223..8ce1edb 100644
--- a/extra/gpu-screen-recorder.env
+++ b/extra/gpu-screen-recorder.env
@@ -3,9 +3,11 @@ CONTAINER=mp4
QUALITY=very_high
CODEC=h264
AUDIO_CODEC=opus
-AUDIO_DEVICE=alsa_output.pci-0000_0a_00.4.iec958-stereo.monitor
-SECONDARY_AUDIO_DEVICE=alsa_input.some-mic.mono-fallback
+AUDIO_DEVICE=default_output
+SECONDARY_AUDIO_DEVICE=default_input
FRAMERATE=60
REPLAYDURATION=60
OUTPUTDIR=/run/media/dec05eba/SSD1TB/Videos/aaaa
KEYINT=2
+ENCODER=gpu
+RESTORE_PORTAL_SESSION=yes \ No newline at end of file
diff --git a/extra/gpu-screen-recorder.service b/extra/gpu-screen-recorder.service
index 6933f66..d0912ec 100644
--- a/extra/gpu-screen-recorder.service
+++ b/extra/gpu-screen-recorder.service
@@ -8,7 +8,7 @@ Environment=CONTAINER=mp4
Environment=QUALITY=very_high
Environment=CODEC=auto
Environment=AUDIO_CODEC=opus
-Environment=AUDIO_DEVICE=
+Environment=AUDIO_DEVICE=default_output
Environment=SECONDARY_AUDIO_DEVICE=
Environment=FRAMERATE=60
Environment=REPLAYDURATION=30
@@ -16,7 +16,10 @@ Environment=OUTPUTDIR=%h/Videos
Environment=MAKEFOLDERS=no
Environment=COLOR_RANGE=limited
Environment=KEYINT=2
-ExecStart=/bin/sh -c 'AUDIO="${AUDIO_DEVICE:-$(pactl get-default-sink).monitor}"; gpu-screen-recorder -v no -w $WINDOW -c $CONTAINER -q $QUALITY -k $CODEC -ac $AUDIO_CODEC -a "$AUDIO" -a "$SECONDARY_AUDIO_DEVICE" -f $FRAMERATE -r $REPLAYDURATION -o "$OUTPUTDIR" -mf $MAKEFOLDERS $ADDITIONAL_ARGS -cr $COLOR_RANGE -keyint $KEYINT'
+Environment=ENCODER=gpu
+Environment=RESTORE_PORTAL_SESSION=yes
+Environment=ADDITIONAL_ARGS=
+ExecStart=gpu-screen-recorder -v no -w "${WINDOW}" -c "${CONTAINER}" -q "${QUALITY}" -k "${CODEC}" -ac "${AUDIO_CODEC}" -a "${AUDIO_DEVICE}" -a "${SECONDARY_AUDIO_DEVICE}" -f "${FRAMERATE}" -r "${REPLAYDURATION}" -o "${OUTPUTDIR}" -df "${MAKEFOLDERS}" $ADDITIONAL_ARGS -cr "${COLOR_RANGE}" -keyint "${KEYINT}" -restore-portal-session "${RESTORE_PORTAL_SESSION}" -encoder "${ENCODER}"
KillSignal=SIGINT
Restart=on-failure
RestartSec=5s
diff --git a/extra/install_preserve_video_memory.sh b/extra/install_preserve_video_memory.sh
deleted file mode 100755
index c5cf658..0000000
--- a/extra/install_preserve_video_memory.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-script_dir=$(dirname "$0")
-cd "$script_dir"
-
-[ $(id -u) -ne 0 ] && echo "You need root privileges to run the install script" && exit 1
-
-install -Dm644 gsr-nvidia.conf /etc/modprobe.d/gsr-nvidia.conf
diff --git a/extra/meson_post_install.sh b/extra/meson_post_install.sh
index f1f6a5a..7bf0d96 100755
--- a/extra/meson_post_install.sh
+++ b/extra/meson_post_install.sh
@@ -1,5 +1,17 @@
#!/bin/sh
-setcap cap_sys_admin+ep ${MESON_INSTALL_DESTDIR_PREFIX}/bin/gsr-kms-server \
+# Needed to remove password prompt when recording a monitor (without desktop portal option) on amd/intel or nvidia wayland
+/usr/sbin/setcap cap_sys_admin+ep ${MESON_INSTALL_DESTDIR_PREFIX}/bin/gsr-kms-server \
|| echo "\n!!! Please re-run install as root\n"
-setcap cap_sys_nice+ep ${MESON_INSTALL_DESTDIR_PREFIX}/bin/gpu-screen-recorder
+
+# Cant do this because it breaks desktop portal (create session)!!!.
+# For some reason the desktop portal tries to access /proc/gpu-screen-recorder-pid/root from the portal process
+# which doesn't work because for some reason CAP_SYS_NICE on a program makes /proc/self/root not readable by other processes.
+# The reason portal reads that file might be because portal seems to have a security feature where its able to identify the
+# process and if the session token is stolen by another application then it will ignore the session token as it wasn't that
+# application that created the session token.
+# ---
+# This is needed (for EGL_CONTEXT_PRIORITY_HIGH_IMG) to allow gpu screen recorder to run faster than the heaviest application on AMD.
+# For example when trying to record a game at 60 fps and the game drops to 45 fps in some place that would also make gpu screen recorder
+# drop to 45 fps unless this setcap is used.
+#/usr/sbin/setcap cap_sys_nice+ep ${MESON_INSTALL_DESTDIR_PREFIX}/bin/gpu-screen-recorder
diff --git a/include/capture/capture.h b/include/capture/capture.h
index 026a955..7c8887d 100644
--- a/include/capture/capture.h
+++ b/include/capture/capture.h
@@ -3,22 +3,29 @@
#include "../color_conversion.h"
#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
typedef struct AVCodecContext AVCodecContext;
+typedef struct AVStream AVStream;
typedef struct AVFrame AVFrame;
typedef struct gsr_capture gsr_capture;
+typedef struct AVMasteringDisplayMetadata AVMasteringDisplayMetadata;
+typedef struct AVContentLightMetadata AVContentLightMetadata;
struct gsr_capture {
/* These methods should not be called manually. Call gsr_capture_* instead */
int (*start)(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame);
- void (*tick)(gsr_capture *cap, AVCodecContext *video_codec_context); /* can be NULL */
- bool (*is_damaged)(gsr_capture *cap); /* can be NULL */
- void (*clear_damage)(gsr_capture *cap); /* can be NULL */
+ void (*on_event)(gsr_capture *cap, gsr_egl *egl); /* can be NULL */
+ void (*tick)(gsr_capture *cap); /* can be NULL. If there is an event then |on_event| is called before this */
bool (*should_stop)(gsr_capture *cap, bool *err); /* can be NULL. If NULL, return false */
int (*capture)(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion);
- void (*capture_end)(gsr_capture *cap, AVFrame *frame); /* can be NULL */
gsr_source_color (*get_source_color)(gsr_capture *cap);
bool (*uses_external_image)(gsr_capture *cap); /* can be NULL. If NULL, return false */
+ bool (*set_hdr_metadata)(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata); /* can be NULL. If NULL, return false */
+ uint64_t (*get_window_id)(gsr_capture *cap); /* can be NULL. Returns 0 if unknown */
+ bool (*is_damaged)(gsr_capture *cap); /* can be NULL */
+ void (*clear_damage)(gsr_capture *cap); /* can be NULL */
void (*destroy)(gsr_capture *cap, AVCodecContext *video_codec_context);
void *priv; /* can be NULL */
@@ -26,12 +33,13 @@ struct gsr_capture {
};
int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame);
-void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context);
+void gsr_capture_on_event(gsr_capture *cap, gsr_egl *egl);
+void gsr_capture_tick(gsr_capture *cap);
bool gsr_capture_should_stop(gsr_capture *cap, bool *err);
int gsr_capture_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion);
-void gsr_capture_capture_end(gsr_capture *cap, AVFrame *frame);
gsr_source_color gsr_capture_get_source_color(gsr_capture *cap);
bool gsr_capture_uses_external_image(gsr_capture *cap);
+bool gsr_capture_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata);
void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context);
#endif /* GSR_CAPTURE_CAPTURE_H */
diff --git a/include/capture/kms.h b/include/capture/kms.h
index 278a391..646928e 100644
--- a/include/capture/kms.h
+++ b/include/capture/kms.h
@@ -6,9 +6,11 @@
typedef struct {
gsr_egl *egl;
const char *display_to_capture; /* if this is "screen", then the first monitor is captured. A copy is made of this */
+ gsr_color_depth color_depth;
gsr_color_range color_range;
bool hdr;
bool record_cursor;
+ int fps;
} gsr_capture_kms_params;
gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params);
diff --git a/include/capture/nvfbc.h b/include/capture/nvfbc.h
index e24b712..95ca88d 100644
--- a/include/capture/nvfbc.h
+++ b/include/capture/nvfbc.h
@@ -11,8 +11,7 @@ typedef struct {
vec2i pos;
vec2i size;
bool direct_capture;
- bool overclock;
- bool hdr;
+ gsr_color_depth color_depth;
gsr_color_range color_range;
bool record_cursor;
bool use_software_video_encoder;
diff --git a/include/capture/portal.h b/include/capture/portal.h
new file mode 100644
index 0000000..2e2c6f2
--- /dev/null
+++ b/include/capture/portal.h
@@ -0,0 +1,18 @@
+#ifndef GSR_CAPTURE_PORTAL_H
+#define GSR_CAPTURE_PORTAL_H
+
+#include "capture.h"
+
+typedef struct {
+ gsr_egl *egl;
+ gsr_color_depth color_depth;
+ gsr_color_range color_range;
+ bool record_cursor;
+ bool restore_portal_session;
+ /* If this is set to NULL then this defaults to $XDG_CONFIG_HOME/gpu-screen-recorder/restore_token ($XDG_CONFIG_HOME defaults to $HOME/.config) */
+ const char *portal_session_token_filepath;
+} gsr_capture_portal_params;
+
+gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params);
+
+#endif /* GSR_CAPTURE_PORTAL_H */
diff --git a/include/capture/xcomposite.h b/include/capture/xcomposite.h
index 707421f..8c87404 100644
--- a/include/capture/xcomposite.h
+++ b/include/capture/xcomposite.h
@@ -11,7 +11,7 @@ typedef struct {
vec2i region_size; /* This is currently only used with |follow_focused| */
gsr_color_range color_range;
bool record_cursor;
- bool track_damage;
+ gsr_color_depth color_depth;
} gsr_capture_xcomposite_params;
gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *params);
diff --git a/include/codec_query/codec_query.h b/include/codec_query/codec_query.h
new file mode 100644
index 0000000..316217d
--- /dev/null
+++ b/include/codec_query/codec_query.h
@@ -0,0 +1,23 @@
+#ifndef GSR_CODEC_QUERY_H
+#define GSR_CODEC_QUERY_H
+
+#include <stdbool.h>
+
+typedef struct {
+ bool supported;
+ bool low_power;
+} gsr_supported_video_codec;
+
+typedef struct {
+ gsr_supported_video_codec h264;
+ gsr_supported_video_codec hevc;
+ gsr_supported_video_codec hevc_hdr;
+ gsr_supported_video_codec hevc_10bit;
+ gsr_supported_video_codec av1;
+ gsr_supported_video_codec av1_hdr;
+ gsr_supported_video_codec av1_10bit;
+ gsr_supported_video_codec vp8;
+ gsr_supported_video_codec vp9;
+} gsr_supported_video_codecs;
+
+#endif /* GSR_CODEC_QUERY_H */
diff --git a/include/codec_query/nvenc.h b/include/codec_query/nvenc.h
new file mode 100644
index 0000000..c01acf6
--- /dev/null
+++ b/include/codec_query/nvenc.h
@@ -0,0 +1,8 @@
+#ifndef GSR_CODEC_QUERY_NVENC_H
+#define GSR_CODEC_QUERY_NVENC_H
+
+#include "codec_query.h"
+
+bool gsr_get_supported_video_codecs_nvenc(gsr_supported_video_codecs *video_codecs, bool cleanup);
+
+#endif /* GSR_CODEC_QUERY_NVENC_H */
diff --git a/include/codec_query/vaapi.h b/include/codec_query/vaapi.h
new file mode 100644
index 0000000..60bdeca
--- /dev/null
+++ b/include/codec_query/vaapi.h
@@ -0,0 +1,8 @@
+#ifndef GSR_CODEC_QUERY_VAAPI_H
+#define GSR_CODEC_QUERY_VAAPI_H
+
+#include "codec_query.h"
+
+bool gsr_get_supported_video_codecs_vaapi(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup);
+
+#endif /* GSR_CODEC_QUERY_VAAPI_H */
diff --git a/include/codec_query/vulkan.h b/include/codec_query/vulkan.h
new file mode 100644
index 0000000..bb06c6b
--- /dev/null
+++ b/include/codec_query/vulkan.h
@@ -0,0 +1,8 @@
+#ifndef GSR_CODEC_QUERY_VULKAN_H
+#define GSR_CODEC_QUERY_VULKAN_H
+
+#include "codec_query.h"
+
+bool gsr_get_supported_video_codecs_vulkan(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup);
+
+#endif /* GSR_CODEC_QUERY_VULKAN_H */
diff --git a/include/color_conversion.h b/include/color_conversion.h
index d05df6a..236bfbd 100644
--- a/include/color_conversion.h
+++ b/include/color_conversion.h
@@ -11,6 +11,11 @@ typedef enum {
} gsr_color_range;
typedef enum {
+ GSR_COLOR_DEPTH_8_BITS,
+ GSR_COLOR_DEPTH_10_BITS
+} gsr_color_depth;
+
+typedef enum {
GSR_SOURCE_COLOR_RGB,
GSR_SOURCE_COLOR_BGR
} gsr_source_color;
diff --git a/include/cursor.h b/include/cursor.h
index 2f26dfd..1564714 100644
--- a/include/cursor.h
+++ b/include/cursor.h
@@ -8,7 +8,6 @@ typedef struct {
gsr_egl *egl;
Display *display;
int x_fixes_event_base;
- int xi_opcode;
unsigned int texture_id;
vec2i size;
@@ -17,14 +16,13 @@ typedef struct {
bool cursor_image_set;
bool visible;
- bool cursor_moved;
} gsr_cursor;
int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display);
void gsr_cursor_deinit(gsr_cursor *self);
/* Returns true if the cursor image has updated or if the cursor has moved */
-bool gsr_cursor_update(gsr_cursor *self, XEvent *xev);
+bool gsr_cursor_on_event(gsr_cursor *self, XEvent *xev);
void gsr_cursor_tick(gsr_cursor *self, Window relative_to);
#endif /* GSR_CURSOR_H */
diff --git a/include/damage.h b/include/damage.h
new file mode 100644
index 0000000..7229418
--- /dev/null
+++ b/include/damage.h
@@ -0,0 +1,51 @@
+#ifndef GSR_DAMAGE_H
+#define GSR_DAMAGE_H
+
+#include "cursor.h"
+#include "utils.h"
+#include <stdbool.h>
+#include <stdint.h>
+
+typedef struct _XDisplay Display;
+typedef union _XEvent XEvent;
+
+typedef enum {
+ GSR_DAMAGE_TRACK_NONE,
+ GSR_DAMAGE_TRACK_WINDOW,
+ GSR_DAMAGE_TRACK_MONITOR
+} gsr_damage_track_type;
+
+typedef struct {
+ gsr_egl *egl;
+ bool track_cursor;
+ gsr_damage_track_type track_type;
+
+ int damage_event;
+ int damage_error;
+ uint64_t damage;
+ bool damaged;
+
+ int randr_event;
+ int randr_error;
+
+ uint64_t window;
+ //vec2i window_pos;
+ vec2i window_size;
+
+ gsr_cursor cursor; /* Relative to |window| */
+ gsr_monitor monitor;
+ char monitor_name[32];
+} gsr_damage;
+
+bool gsr_damage_init(gsr_damage *self, gsr_egl *egl, bool track_cursor);
+void gsr_damage_deinit(gsr_damage *self);
+
+bool gsr_damage_set_target_window(gsr_damage *self, uint64_t window);
+bool gsr_damage_set_target_monitor(gsr_damage *self, const char *monitor_name);
+void gsr_damage_on_event(gsr_damage *self, XEvent *xev);
+void gsr_damage_tick(gsr_damage *self);
+/* Also returns true if damage tracking is not available */
+bool gsr_damage_is_damaged(gsr_damage *self);
+void gsr_damage_clear(gsr_damage *self);
+
+#endif /* GSR_DAMAGE_H */
diff --git a/include/dbus.h b/include/dbus.h
new file mode 100644
index 0000000..6978634
--- /dev/null
+++ b/include/dbus.h
@@ -0,0 +1,45 @@
+#ifndef GSR_DBUS_H
+#define GSR_DBUS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <dbus/dbus.h>
+
+#define DBUS_RANDOM_STR_SIZE 16
+
+typedef struct {
+ DBusConnection *con;
+ DBusError err;
+ char random_str[DBUS_RANDOM_STR_SIZE + 1];
+ unsigned int handle_counter;
+ bool desktop_portal_rule_added;
+ uint32_t screencast_version;
+ char *screencast_restore_token;
+} gsr_dbus;
+
+typedef enum {
+ GSR_PORTAL_CAPTURE_TYPE_MONITOR = 1 << 0,
+ GSR_PORTAL_CAPTURE_TYPE_WINDOW = 1 << 1,
+ GSR_PORTAL_CAPTURE_TYPE_VIRTUAL = 1 << 2,
+ GSR_PORTAL_CAPTURE_TYPE_ALL = GSR_PORTAL_CAPTURE_TYPE_MONITOR | GSR_PORTAL_CAPTURE_TYPE_WINDOW | GSR_PORTAL_CAPTURE_TYPE_VIRTUAL
+} gsr_portal_capture_type;
+
+typedef enum {
+ GSR_PORTAL_CURSOR_MODE_HIDDEN = 1 << 0,
+ GSR_PORTAL_CURSOR_MODE_EMBEDDED = 1 << 1,
+ GSR_PORTAL_CURSOR_MODE_METADATA = 1 << 2
+} gsr_portal_cursor_mode;
+
+/* Blocking. TODO: Make non-blocking */
+bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token);
+void gsr_dbus_deinit(gsr_dbus *self);
+
+/* The follow functions should be called in order to setup ScreenCast properly */
+/* These functions that return an int return the response status code */
+int gsr_dbus_screencast_create_session(gsr_dbus *self, char **session_handle);
+int gsr_dbus_screencast_select_sources(gsr_dbus *self, const char *session_handle, gsr_portal_capture_type capture_type, gsr_portal_cursor_mode cursor_mode);
+int gsr_dbus_screencast_start(gsr_dbus *self, const char *session_handle, uint32_t *pipewire_node);
+bool gsr_dbus_screencast_open_pipewire_remote(gsr_dbus *self, const char *session_handle, int *pipewire_fd);
+const char* gsr_dbus_screencast_get_restore_token(gsr_dbus *self);
+
+#endif /* GSR_DBUS_H */
diff --git a/include/defs.h b/include/defs.h
index 473583c..8fd2ddc 100644
--- a/include/defs.h
+++ b/include/defs.h
@@ -1,6 +1,8 @@
#ifndef GSR_DEFS_H
#define GSR_DEFS_H
+#include <stdbool.h>
+
typedef enum {
GSR_GPU_VENDOR_AMD,
GSR_GPU_VENDOR_INTEL,
@@ -10,6 +12,7 @@ typedef enum {
typedef struct {
gsr_gpu_vendor vendor;
int gpu_version; /* 0 if unknown */
+ bool is_steam_deck;
} gsr_gpu_info;
typedef enum {
diff --git a/include/egl.h b/include/egl.h
index 899c0d0..82014b9 100644
--- a/include/egl.h
+++ b/include/egl.h
@@ -43,7 +43,9 @@ typedef void(*__GLXextFuncPtr)(void);
#define EGL_BUFFER_SIZE 0x3020
#define EGL_RENDERABLE_TYPE 0x3040
#define EGL_OPENGL_API 0x30A2
+#define EGL_OPENGL_ES_API 0x30A0
#define EGL_OPENGL_BIT 0x0008
+#define EGL_OPENGL_ES_BIT 0x0001
#define EGL_NONE 0x3038
#define EGL_CONTEXT_CLIENT_VERSION 0x3098
#define EGL_BACK_BUFFER 0x3084
@@ -57,8 +59,23 @@ typedef void(*__GLXextFuncPtr)(void);
#define EGL_DMA_BUF_PLANE0_FD_EXT 0x3272
#define EGL_DMA_BUF_PLANE0_OFFSET_EXT 0x3273
#define EGL_DMA_BUF_PLANE0_PITCH_EXT 0x3274
+#define EGL_DMA_BUF_PLANE1_FD_EXT 0x3275
+#define EGL_DMA_BUF_PLANE1_OFFSET_EXT 0x3276
+#define EGL_DMA_BUF_PLANE1_PITCH_EXT 0x3277
+#define EGL_DMA_BUF_PLANE2_FD_EXT 0x3278
+#define EGL_DMA_BUF_PLANE2_OFFSET_EXT 0x3279
+#define EGL_DMA_BUF_PLANE2_PITCH_EXT 0x327A
+#define EGL_DMA_BUF_PLANE3_FD_EXT 0x3440
+#define EGL_DMA_BUF_PLANE3_OFFSET_EXT 0x3441
+#define EGL_DMA_BUF_PLANE3_PITCH_EXT 0x3442
#define EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT 0x3443
#define EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT 0x3444
+#define EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT 0x3445
+#define EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT 0x3446
+#define EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT 0x3447
+#define EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT 0x3448
+#define EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT 0x3449
+#define EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT 0x344A
#define EGL_LINUX_DMA_BUF_EXT 0x3270
#define EGL_RED_SIZE 0x3024
#define EGL_ALPHA_SIZE 0x3021
@@ -78,9 +95,9 @@ typedef void(*__GLXextFuncPtr)(void);
#define GL_TEXTURE_2D 0x0DE1
#define GL_TEXTURE_EXTERNAL_OES 0x8D65
#define GL_RED 0x1903
-#define GL_GREEN 0x1904
-#define GL_BLUE 0x1905
-#define GL_ALPHA 0x1906
+#define GL_GREEN 0x1904
+#define GL_BLUE 0x1905
+#define GL_ALPHA 0x1906
#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46
#define GL_RG 0x8227
#define GL_RGB 0x1907
@@ -132,12 +149,25 @@ typedef int (*FUNC_glXSwapIntervalSGI)(int interval);
typedef void (*GLDEBUGPROC)(unsigned int source, unsigned int type, unsigned int id, unsigned int severity, int length, const char *message, const void *userParam);
typedef int (*FUNC_eglQueryDisplayAttribEXT)(EGLDisplay dpy, int32_t attribute, intptr_t *value);
typedef const char* (*FUNC_eglQueryDeviceStringEXT)(void *device, int32_t name);
+typedef int (*FUNC_eglQueryDmaBufModifiersEXT)(EGLDisplay dpy, int32_t format, int32_t max_modifiers, uint64_t *modifiers, int *external_only, int32_t *num_modifiers);
#define GSR_MAX_OUTPUTS 32
typedef struct {
+ char *name;
+ vec2i pos;
+ vec2i size;
+ uint32_t connector_id;
+ gsr_monitor_rotation rotation;
+ uint32_t monitor_identifier; /* crtc id */
+} gsr_x11_output;
+
+typedef struct {
Display *dpy;
Window window;
+ gsr_x11_output outputs[GSR_MAX_OUTPUTS];
+ int num_outputs;
+ XEvent xev;
} gsr_x11;
typedef struct {
@@ -164,6 +194,11 @@ typedef enum {
GSR_GL_CONTEXT_TYPE_GLX
} gsr_gl_context_type;
+typedef enum {
+ GSR_DISPLAY_SERVER_X11,
+ GSR_DISPLAY_SERVER_WAYLAND
+} gsr_display_server;
+
typedef struct gsr_egl gsr_egl;
struct gsr_egl {
void *egl_library;
@@ -208,6 +243,7 @@ struct gsr_egl {
FUNC_glEGLImageTargetTexture2DOES glEGLImageTargetTexture2DOES;
FUNC_eglQueryDisplayAttribEXT eglQueryDisplayAttribEXT;
FUNC_eglQueryDeviceStringEXT eglQueryDeviceStringEXT;
+ FUNC_eglQueryDmaBufModifiersEXT eglQueryDmaBufModifiersEXT;
__GLXextFuncPtr (*glXGetProcAddress)(const unsigned char *procName);
GLXFBConfig* (*glXChooseFBConfig)(Display *dpy, int screen, const int *attribList, int *nitems);
@@ -236,8 +272,6 @@ struct gsr_egl {
void (*glTexParameteriv)(unsigned int target, unsigned int pname, const int *params);
void (*glGetTexLevelParameteriv)(unsigned int target, int level, unsigned int pname, int *params);
void (*glTexImage2D)(unsigned int target, int level, int internalFormat, int width, int height, int border, unsigned int format, unsigned int type, const void *pixels);
- void (*glCopyImageSubData)(unsigned int srcName, unsigned int srcTarget, int srcLevel, int srcX, int srcY, int srcZ, unsigned int dstName, unsigned int dstTarget, int dstLevel, int dstX, int dstY, int dstZ, int srcWidth, int srcHeight, int srcDepth);
- void (*glClearTexImage)(unsigned int texture, unsigned int level, unsigned int format, unsigned int type, const void *data);
void (*glGetTexImage)(unsigned int target, int level, unsigned int format, unsigned int type, void *pixels);
void (*glGenFramebuffers)(int n, unsigned int *framebuffers);
void (*glBindFramebuffer)(unsigned int target, unsigned int framebuffer);
@@ -279,11 +313,20 @@ struct gsr_egl {
void (*glUniform2f)(int location, float v0, float v1);
void (*glDebugMessageCallback)(GLDEBUGPROC callback, const void *userParam);
void (*glScissor)(int x, int y, int width, int height);
+ void (*glReadPixels)(int x, int y, int width, int height, unsigned int format, unsigned int type, void *pixels);
+ void* (*glMapBuffer)(unsigned int target, unsigned int access);
+ unsigned char (*glUnmapBuffer)(unsigned int target);
};
bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland, bool is_monitor_capture);
void gsr_egl_unload(gsr_egl *self);
-void gsr_egl_update(gsr_egl *self);
+/* Returns true if an event is available */
+bool gsr_egl_process_event(gsr_egl *self);
+/* Does opengl swap with egl or glx, depending on which one is active */
+void gsr_egl_swap_buffers(gsr_egl *self);
+
+gsr_display_server gsr_egl_get_display_server(const gsr_egl *self);
+XEvent* gsr_egl_get_event_data(gsr_egl *self);
#endif /* GSR_EGL_H */
diff --git a/include/encoder/video/cuda.h b/include/encoder/video/cuda.h
index 802e72e..6d32e09 100644
--- a/include/encoder/video/cuda.h
+++ b/include/encoder/video/cuda.h
@@ -8,7 +8,7 @@ typedef struct gsr_egl gsr_egl;
typedef struct {
gsr_egl *egl;
bool overclock;
- bool hdr;
+ gsr_color_depth color_depth;
} gsr_video_encoder_cuda_params;
gsr_video_encoder* gsr_video_encoder_cuda_create(const gsr_video_encoder_cuda_params *params);
diff --git a/include/encoder/video/software.h b/include/encoder/video/software.h
index e39d5f8..fd2dc6b 100644
--- a/include/encoder/video/software.h
+++ b/include/encoder/video/software.h
@@ -7,7 +7,7 @@ typedef struct gsr_egl gsr_egl;
typedef struct {
gsr_egl *egl;
- bool hdr;
+ gsr_color_depth color_depth;
} gsr_video_encoder_software_params;
gsr_video_encoder* gsr_video_encoder_software_create(const gsr_video_encoder_software_params *params);
diff --git a/include/encoder/video/vaapi.h b/include/encoder/video/vaapi.h
index 2981a01..b509f17 100644
--- a/include/encoder/video/vaapi.h
+++ b/include/encoder/video/vaapi.h
@@ -7,7 +7,7 @@ typedef struct gsr_egl gsr_egl;
typedef struct {
gsr_egl *egl;
- bool hdr;
+ gsr_color_depth color_depth;
} gsr_video_encoder_vaapi_params;
gsr_video_encoder* gsr_video_encoder_vaapi_create(const gsr_video_encoder_vaapi_params *params);
diff --git a/include/encoder/video/video.h b/include/encoder/video/video.h
index 21338d6..49f48bd 100644
--- a/include/encoder/video/video.h
+++ b/include/encoder/video/video.h
@@ -10,7 +10,7 @@ typedef struct AVFrame AVFrame;
struct gsr_video_encoder {
bool (*start)(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame);
- void (*copy_textures_to_frame)(gsr_video_encoder *encoder, AVFrame *frame); /* Can be NULL */
+ void (*copy_textures_to_frame)(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion); /* Can be NULL */
/* |textures| should be able to fit 2 elements */
void (*get_textures)(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color);
void (*destroy)(gsr_video_encoder *encoder, AVCodecContext *video_codec_context);
@@ -20,7 +20,7 @@ struct gsr_video_encoder {
};
bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame);
-void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame);
+void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion);
void gsr_video_encoder_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color);
void gsr_video_encoder_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context);
diff --git a/include/encoder/video/vulkan.h b/include/encoder/video/vulkan.h
new file mode 100644
index 0000000..383fc4f
--- /dev/null
+++ b/include/encoder/video/vulkan.h
@@ -0,0 +1,15 @@
+#ifndef GSR_ENCODER_VIDEO_VULKAN_H
+#define GSR_ENCODER_VIDEO_VULKAN_H
+
+#include "video.h"
+
+typedef struct gsr_egl gsr_egl;
+
+typedef struct {
+ gsr_egl *egl;
+ gsr_color_depth color_depth;
+} gsr_video_encoder_vulkan_params;
+
+gsr_video_encoder* gsr_video_encoder_vulkan_create(const gsr_video_encoder_vulkan_params *params);
+
+#endif /* GSR_ENCODER_VIDEO_VULKAN_H */
diff --git a/include/pipewire.h b/include/pipewire.h
new file mode 100644
index 0000000..1908e2d
--- /dev/null
+++ b/include/pipewire.h
@@ -0,0 +1,112 @@
+#ifndef GSR_PIPEWIRE_H
+#define GSR_PIPEWIRE_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#include <spa/utils/hook.h>
+#include <spa/param/video/format.h>
+
+#define GSR_PIPEWIRE_MAX_MODIFIERS 1024
+#define GSR_PIPEWIRE_NUM_VIDEO_FORMATS 6
+#define GSR_PIPEWIRE_DMABUF_MAX_PLANES 4
+
+typedef struct gsr_egl gsr_egl;
+
+typedef struct {
+ int major;
+ int minor;
+ int micro;
+} gsr_pipewire_data_version;
+
+typedef struct {
+ uint32_t fps_num;
+ uint32_t fps_den;
+} gsr_pipewire_video_info;
+
+typedef struct {
+ int fd;
+ uint32_t offset;
+ int32_t stride;
+} gsr_pipewire_dmabuf_data;
+
+typedef struct {
+ int x, y;
+ int width, height;
+} gsr_pipewire_region;
+
+typedef struct {
+ enum spa_video_format format;
+ size_t modifiers_index;
+ size_t modifiers_size;
+} gsr_video_format;
+
+typedef struct {
+ unsigned int texture_id;
+ unsigned int external_texture_id;
+ unsigned int cursor_texture_id;
+} gsr_texture_map;
+
+typedef struct {
+ gsr_egl *egl;
+ int fd;
+ uint32_t node;
+ pthread_mutex_t mutex;
+ bool mutex_initialized;
+
+ struct pw_thread_loop *thread_loop;
+ struct pw_context *context;
+ struct pw_core *core;
+ struct spa_hook core_listener;
+ struct pw_stream *stream;
+ struct spa_hook stream_listener;
+ struct spa_source *reneg;
+ struct spa_video_info format;
+ int server_version_sync;
+ bool negotiated;
+ bool damaged;
+
+ struct {
+ bool visible;
+ bool valid;
+ uint8_t *data;
+ int x, y;
+ int hotspot_x, hotspot_y;
+ int width, height;
+ } cursor;
+
+ struct {
+ bool valid;
+ int x, y;
+ uint32_t width, height;
+ } crop;
+
+ gsr_video_format supported_video_formats[GSR_PIPEWIRE_NUM_VIDEO_FORMATS];
+
+ gsr_pipewire_data_version server_version;
+ gsr_pipewire_video_info video_info;
+ gsr_pipewire_dmabuf_data dmabuf_data[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ size_t dmabuf_num_planes;
+
+ bool no_modifiers_fallback;
+ bool external_texture_fallback;
+
+ uint64_t modifiers[GSR_PIPEWIRE_MAX_MODIFIERS];
+ size_t num_modifiers;
+} gsr_pipewire;
+
+/*
+ |capture_cursor| only applies to when capturing a window or region.
+ In other cases |pipewire_node|'s setup will determine if the cursor is included.
+ Note that the cursor is not guaranteed to be shown even if set to true, it depends on the wayland compositor.
+*/
+bool gsr_pipewire_init(gsr_pipewire *self, int pipewire_fd, uint32_t pipewire_node, int fps, bool capture_cursor, gsr_egl *egl);
+void gsr_pipewire_deinit(gsr_pipewire *self);
+
+/* |dmabuf_data| should be at least GSR_PIPEWIRE_DMABUF_MAX_PLANES in size */
+bool gsr_pipewire_map_texture(gsr_pipewire *self, gsr_texture_map texture_map, gsr_pipewire_region *region, gsr_pipewire_region *cursor_region, gsr_pipewire_dmabuf_data *dmabuf_data, int *num_dmabuf_data, uint32_t *fourcc, uint64_t *modifiers, bool *using_external_image);
+bool gsr_pipewire_is_damaged(gsr_pipewire *self);
+void gsr_pipewire_clear_damage(gsr_pipewire *self);
+
+#endif /* GSR_PIPEWIRE_H */
diff --git a/include/sound.hpp b/include/sound.hpp
index 77bec99..7bcc120 100644
--- a/include/sound.hpp
+++ b/include/sound.hpp
@@ -31,6 +31,12 @@ struct AudioInput {
std::string description;
};
+struct AudioDevices {
+ std::string default_output;
+ std::string default_input;
+ std::vector<AudioInput> audio_inputs;
+};
+
struct MergedAudioInputs {
std::vector<AudioInput> audio_inputs;
};
@@ -57,6 +63,6 @@ void sound_device_close(SoundDevice *device);
*/
int sound_device_read_next_chunk(SoundDevice *device, void **buffer, double timeout_sec, double *latency_seconds);
-std::vector<AudioInput> get_pulseaudio_inputs();
+AudioDevices get_pulseaudio_inputs();
#endif /* GPU_SCREEN_RECORDER_H */
diff --git a/include/utils.h b/include/utils.h
index c5d659a..92eb851 100644
--- a/include/utils.h
+++ b/include/utils.h
@@ -6,17 +6,18 @@
#include "../include/defs.h"
#include <stdbool.h>
#include <stdint.h>
-#include <X11/extensions/Xrandr.h>
+
+typedef struct AVCodecContext AVCodecContext;
+typedef struct AVFrame AVFrame;
typedef struct {
const char *name;
int name_len;
vec2i pos;
vec2i size;
- XRRCrtcInfo *crt_info; /* Only on x11 */
uint32_t connector_id; /* Only on x11 and drm */
gsr_monitor_rotation rotation; /* Only on x11 and wayland */
- uint32_t monitor_identifier; /* Only on drm and wayland */
+ uint32_t monitor_identifier; /* On x11 this is the crtc id */
} gsr_monitor;
typedef struct {
@@ -29,7 +30,8 @@ typedef struct {
double clock_get_monotonic_seconds(void);
typedef void (*active_monitor_callback)(const gsr_monitor *monitor, void *userdata);
-void for_each_active_monitor_output_x11(Display *display, active_monitor_callback callback, void *userdata);
+void for_each_active_monitor_output_x11_not_cached(Display *display, active_monitor_callback callback, void *userdata);
+void for_each_active_monitor_output_x11(const gsr_egl *egl, active_monitor_callback callback, void *userdata);
void for_each_active_monitor_output(const gsr_egl *egl, gsr_connection_type connection_type, active_monitor_callback callback, void *userdata);
bool get_monitor_by_name(const gsr_egl *egl, gsr_connection_type connection_type, const char *name, gsr_monitor *monitor);
gsr_monitor_rotation drm_monitor_get_display_server_rotation(const gsr_egl *egl, const gsr_monitor *monitor);
@@ -41,4 +43,12 @@ bool gsr_get_valid_card_path(gsr_egl *egl, char *output, bool is_monitor_capture
/* |render_path| should be at least 128 bytes in size */
bool gsr_card_path_get_render_path(const char *card_path, char *render_path);
+int create_directory_recursive(char *path);
+
+/* |img_attr| needs to be at least 44 in size */
+void setup_dma_buf_attrs(intptr_t *img_attr, uint32_t format, uint32_t width, uint32_t height, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes, bool use_modifier);
+bool video_codec_context_is_vaapi(AVCodecContext *video_codec_context);
+bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, AVFrame *video_frame, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, uint32_t format, vec2i size, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes);
+bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, AVCodecContext *video_codec_context, AVFrame *video_frame);
+
#endif /* GSR_UTILS_H */
diff --git a/include/window_texture.h b/include/window_texture.h
index 75bb2a7..6ee5df4 100644
--- a/include/window_texture.h
+++ b/include/window_texture.h
@@ -7,6 +7,7 @@ typedef struct {
Display *display;
Window window;
Pixmap pixmap;
+ EGLImage image;
unsigned int texture_id;
int redirected;
gsr_egl *egl;
diff --git a/install.sh b/install.sh
index ab921fa..2a1abf8 100755
--- a/install.sh
+++ b/install.sh
@@ -7,7 +7,8 @@ cd "$script_dir"
echo "Warning: this install.sh script is deprecated. Use meson directly instead if possible"
-test -d build || meson setup build
+rm -rf build
+meson setup build
meson configure --prefix=/usr --buildtype=release -Dsystemd=true -Dstrip=true build
ninja -C build install
diff --git a/kms/client/kms_client.c b/kms/client/kms_client.c
index 869bf81..468e3a6 100644
--- a/kms/client/kms_client.c
+++ b/kms/client/kms_client.c
@@ -12,6 +12,7 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <sys/capability.h>
+#include <sys/random.h>
#define GSR_SOCKET_PAIR_LOCAL 0
#define GSR_SOCKET_PAIR_REMOTE 1
@@ -20,15 +21,9 @@ static void cleanup_socket(gsr_kms_client *self, bool kill_server);
static int gsr_kms_client_replace_connection(gsr_kms_client *self);
static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
- int fd = open("/dev/urandom", O_RDONLY);
- if(fd == -1) {
- perror("/dev/urandom");
- return false;
- }
-
- if(read(fd, buffer, buffer_size) < buffer_size) {
- fprintf(stderr, "Failed to read %d bytes from /dev/urandom\n", buffer_size);
- close(fd);
+ /* TODO: Use other functions on other platforms than linux */
+ if(getrandom(buffer, buffer_size, 0) < buffer_size) {
+ fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
return false;
}
@@ -37,16 +32,21 @@ static bool generate_random_characters(char *buffer, int buffer_size, const char
buffer[i] = alphabet[c % alphabet_size];
}
- close(fd);
return true;
}
static void close_fds(gsr_kms_response *response) {
- for(int i = 0; i < response->num_fds; ++i) {
- if(response->fds[i].fd > 0)
- close(response->fds[i].fd);
- response->fds[i].fd = 0;
+ for(int i = 0; i < response->num_items; ++i) {
+ for(int j = 0; j < response->items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &response->items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ response->items[i].num_dma_bufs = 0;
}
+ response->num_items = 0;
}
static int send_msg_to_server(int server_fd, gsr_kms_request *request) {
@@ -88,7 +88,7 @@ static int recv_msg_from_server(int server_pid, int server_fd, gsr_kms_response
response_message.msg_iov = &iov;
response_message.msg_iovlen = 1;
- char cmsgbuf[CMSG_SPACE(sizeof(int) * GSR_KMS_MAX_PLANES)];
+ char cmsgbuf[CMSG_SPACE(sizeof(int) * GSR_KMS_MAX_ITEMS * GSR_KMS_MAX_DMA_BUFS)];
memset(cmsgbuf, 0, sizeof(cmsgbuf));
response_message.msg_control = cmsgbuf;
response_message.msg_controllen = sizeof(cmsgbuf);
@@ -112,12 +112,16 @@ static int recv_msg_from_server(int server_pid, int server_fd, gsr_kms_response
}
}
- if(res > 0 && response->num_fds > 0) {
+ if(res > 0 && response->num_items > 0) {
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&response_message);
if(cmsg) {
int *fds = (int*)CMSG_DATA(cmsg);
- for(int i = 0; i < response->num_fds; ++i) {
- response->fds[i].fd = fds[i];
+ int fd_index = 0;
+ for(int i = 0; i < response->num_items; ++i) {
+ for(int j = 0; j < response->items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &response->items[i].dma_buf[j];
+ dma_buf->fd = fds[fd_index++];
+ }
}
} else {
close_fds(response);
@@ -335,12 +339,12 @@ int gsr_kms_client_init(gsr_kms_client *self, const char *card_path) {
}
void cleanup_socket(gsr_kms_client *self, bool kill_server) {
- if(self->initial_client_fd != -1) {
+ if(self->initial_client_fd > 0) {
close(self->initial_client_fd);
self->initial_client_fd = -1;
}
- if(self->initial_socket_fd != -1) {
+ if(self->initial_socket_fd > 0) {
close(self->initial_socket_fd);
self->initial_socket_fd = -1;
}
@@ -354,7 +358,7 @@ void cleanup_socket(gsr_kms_client *self, bool kill_server) {
}
}
- if(kill_server && self->kms_server_pid != -1) {
+ if(kill_server && self->kms_server_pid > 0) {
kill(self->kms_server_pid, SIGKILL);
//int status;
//waitpid(self->kms_server_pid, &status, 0);
@@ -396,7 +400,7 @@ int gsr_kms_client_replace_connection(gsr_kms_client *self) {
}
if(response.version != GSR_KMS_PROTOCOL_VERSION) {
- fprintf(stderr, "gsr error: gsr_kms_client_replace_connection: expected gsr-kms-server protocol version to be %u, but it's %u\n", GSR_KMS_PROTOCOL_VERSION, response.version);
+ fprintf(stderr, "gsr error: gsr_kms_client_replace_connection: expected gsr-kms-server protocol version to be %u, but it's %u. please reinstall gpu screen recorder\n", GSR_KMS_PROTOCOL_VERSION, response.version);
/*close_fds(response);*/
return -1;
}
@@ -431,7 +435,7 @@ int gsr_kms_client_get_kms(gsr_kms_client *self, gsr_kms_response *response) {
}
if(response->version != GSR_KMS_PROTOCOL_VERSION) {
- fprintf(stderr, "gsr error: gsr_kms_client_get_kms: expected gsr-kms-server protocol version to be %u, but it's %u\n", GSR_KMS_PROTOCOL_VERSION, response->version);
+ fprintf(stderr, "gsr error: gsr_kms_client_get_kms: expected gsr-kms-server protocol version to be %u, but it's %u. please reinstall gpu screen recorder\n", GSR_KMS_PROTOCOL_VERSION, response->version);
/*close_fds(response);*/
strcpy(response->err_msg, "mismatching protocol version");
return -1;
diff --git a/kms/kms_shared.h b/kms/kms_shared.h
index 4fa9c38..2dbb655 100644
--- a/kms/kms_shared.h
+++ b/kms/kms_shared.h
@@ -5,10 +5,13 @@
#include <stdbool.h>
#include <drm_mode.h>
-#define GSR_KMS_PROTOCOL_VERSION 2
-#define GSR_KMS_MAX_PLANES 10
+#define GSR_KMS_PROTOCOL_VERSION 4
-typedef struct gsr_kms_response_fd gsr_kms_response_fd;
+#define GSR_KMS_MAX_ITEMS 8
+#define GSR_KMS_MAX_DMA_BUFS 4
+
+typedef struct gsr_kms_response_dma_buf gsr_kms_response_dma_buf;
+typedef struct gsr_kms_response_item gsr_kms_response_item;
typedef struct gsr_kms_response gsr_kms_response;
typedef enum {
@@ -30,16 +33,20 @@ typedef struct {
int new_connection_fd;
} gsr_kms_request;
-struct gsr_kms_response_fd {
+struct gsr_kms_response_dma_buf {
int fd;
- uint32_t width;
- uint32_t height;
uint32_t pitch;
uint32_t offset;
+};
+
+struct gsr_kms_response_item {
+ gsr_kms_response_dma_buf dma_buf[GSR_KMS_MAX_DMA_BUFS];
+ int num_dma_bufs;
+ uint32_t width;
+ uint32_t height;
uint32_t pixel_format;
uint64_t modifier;
uint32_t connector_id; /* 0 if unknown */
- bool is_combined_plane;
bool is_cursor;
bool has_hdr_metadata;
int x;
@@ -53,8 +60,8 @@ struct gsr_kms_response {
uint32_t version; /* GSR_KMS_PROTOCOL_VERSION */
int result; /* gsr_kms_result */
char err_msg[128];
- gsr_kms_response_fd fds[GSR_KMS_MAX_PLANES];
- int num_fds;
+ gsr_kms_response_item items[GSR_KMS_MAX_ITEMS];
+ int num_items;
};
#endif /* #define GSR_KMS_SHARED_H */
diff --git a/kms/server/kms_server.c b/kms/server/kms_server.c
index 2eaa1ed..c6460ad 100644
--- a/kms/server/kms_server.c
+++ b/kms/server/kms_server.c
@@ -37,6 +37,14 @@ static int max_int(int a, int b) {
return a > b ? a : b;
}
+static int count_num_fds(const gsr_kms_response *response) {
+ int num_fds = 0;
+ for(int i = 0; i < response->num_items; ++i) {
+ num_fds += response->items[i].num_dma_bufs;
+ }
+ return num_fds;
+}
+
static int send_msg_to_client(int client_fd, gsr_kms_response *response) {
struct iovec iov;
iov.iov_base = response;
@@ -46,21 +54,25 @@ static int send_msg_to_client(int client_fd, gsr_kms_response *response) {
response_message.msg_iov = &iov;
response_message.msg_iovlen = 1;
- char cmsgbuf[CMSG_SPACE(sizeof(int) * max_int(1, response->num_fds))];
+ const int num_fds = count_num_fds(response);
+ char cmsgbuf[CMSG_SPACE(sizeof(int) * max_int(1, num_fds))];
memset(cmsgbuf, 0, sizeof(cmsgbuf));
- if(response->num_fds > 0) {
+ if(num_fds > 0) {
response_message.msg_control = cmsgbuf;
response_message.msg_controllen = sizeof(cmsgbuf);
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&response_message);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
- cmsg->cmsg_len = CMSG_LEN(sizeof(int) * response->num_fds);
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int) * num_fds);
int *fds = (int*)CMSG_DATA(cmsg);
- for(int i = 0; i < response->num_fds; ++i) {
- fds[i] = response->fds[i].fd;
+ int fd_index = 0;
+ for(int i = 0; i < response->num_items; ++i) {
+ for(int j = 0; j < response->items[i].num_dma_bufs; ++j) {
+ fds[fd_index++] = response->items[i].dma_buf[j].fd;
+ }
}
response_message.msg_controllen = cmsg->cmsg_len;
@@ -258,14 +270,27 @@ static bool get_hdr_metadata(int drm_fd, uint64_t hdr_metadata_blob_id, struct h
return true;
}
+/* Returns the number of drm handles that we managed to get */
+static int drm_prime_handles_to_fds(gsr_drm *drm, drmModeFB2Ptr drmfb, int *fb_fds) {
+ for(int i = 0; i < GSR_KMS_MAX_DMA_BUFS; ++i) {
+ if(!drmfb->handles[i])
+ return i;
+
+ const int ret = drmPrimeHandleToFD(drm->drmfd, drmfb->handles[i], O_RDONLY, &fb_fds[i]);
+ if(ret != 0 || fb_fds[i] == -1)
+ return i;
+ }
+ return GSR_KMS_MAX_DMA_BUFS;
+}
+
static int kms_get_fb(gsr_drm *drm, gsr_kms_response *response, connector_to_crtc_map *c2crtc_map) {
int result = -1;
response->result = KMS_RESULT_OK;
response->err_msg[0] = '\0';
- response->num_fds = 0;
+ response->num_items = 0;
- for(uint32_t i = 0; i < drm->planes->count_planes && response->num_fds < GSR_KMS_MAX_PLANES; ++i) {
+ for(uint32_t i = 0; i < drm->planes->count_planes && response->num_items < GSR_KMS_MAX_ITEMS; ++i) {
drmModePlanePtr plane = NULL;
drmModeFB2Ptr drmfb = NULL;
@@ -299,52 +324,54 @@ static int kms_get_fb(gsr_drm *drm, gsr_kms_response *response, connector_to_crt
// TODO: Check if dimensions have changed by comparing width and height to previous time this was called.
// TODO: Support other plane formats than rgb (with multiple planes, such as direct YUV420 on wayland).
- int fb_fd = -1;
- const int ret = drmPrimeHandleToFD(drm->drmfd, drmfb->handles[0], O_RDONLY, &fb_fd);
- if(ret != 0 || fb_fd == -1) {
+ int x = 0, y = 0, src_x = 0, src_y = 0, src_w = 0, src_h = 0;
+ plane_property_mask property_mask = plane_get_properties(drm->drmfd, plane->plane_id, &x, &y, &src_x, &src_y, &src_w, &src_h);
+ if(!(property_mask & PLANE_PROPERTY_IS_PRIMARY) && !(property_mask & PLANE_PROPERTY_IS_CURSOR))
+ continue;
+
+ int fb_fds[GSR_KMS_MAX_DMA_BUFS];
+ const int num_fb_fds = drm_prime_handles_to_fds(drm, drmfb, fb_fds);
+ if(num_fb_fds == 0) {
response->result = KMS_RESULT_FAILED_TO_GET_PLANE;
snprintf(response->err_msg, sizeof(response->err_msg), "failed to get fd from drm handle, error: %s", strerror(errno));
fprintf(stderr, "kms server error: %s\n", response->err_msg);
goto cleanup_handles;
}
- const int fd_index = response->num_fds;
+ const int item_index = response->num_items;
- int x = 0, y = 0, src_x = 0, src_y = 0, src_w = 0, src_h = 0;
- plane_property_mask property_mask = plane_get_properties(drm->drmfd, plane->plane_id, &x, &y, &src_x, &src_y, &src_w, &src_h);
- if((property_mask & PLANE_PROPERTY_IS_PRIMARY) || (property_mask & PLANE_PROPERTY_IS_CURSOR)) {
- const connector_crtc_pair *crtc_pair = get_connector_pair_by_crtc_id(c2crtc_map, plane->crtc_id);
- if(crtc_pair && crtc_pair->hdr_metadata_blob_id) {
- response->fds[fd_index].has_hdr_metadata = get_hdr_metadata(drm->drmfd, crtc_pair->hdr_metadata_blob_id, &response->fds[fd_index].hdr_metadata);
- } else {
- response->fds[fd_index].has_hdr_metadata = false;
- }
+ const connector_crtc_pair *crtc_pair = get_connector_pair_by_crtc_id(c2crtc_map, plane->crtc_id);
+ if(crtc_pair && crtc_pair->hdr_metadata_blob_id) {
+ response->items[item_index].has_hdr_metadata = get_hdr_metadata(drm->drmfd, crtc_pair->hdr_metadata_blob_id, &response->items[item_index].hdr_metadata);
+ } else {
+ response->items[item_index].has_hdr_metadata = false;
+ }
- response->fds[fd_index].fd = fb_fd;
- response->fds[fd_index].width = drmfb->width;
- response->fds[fd_index].height = drmfb->height;
- response->fds[fd_index].pitch = drmfb->pitches[0];
- response->fds[fd_index].offset = drmfb->offsets[0];
- response->fds[fd_index].pixel_format = drmfb->pixel_format;
- response->fds[fd_index].modifier = drmfb->modifier;
- response->fds[fd_index].connector_id = crtc_pair ? crtc_pair->connector_id : 0;
- response->fds[fd_index].is_cursor = property_mask & PLANE_PROPERTY_IS_CURSOR;
- response->fds[fd_index].is_combined_plane = false;
- if(property_mask & PLANE_PROPERTY_IS_CURSOR) {
- response->fds[fd_index].x = x;
- response->fds[fd_index].y = y;
- response->fds[fd_index].src_w = 0;
- response->fds[fd_index].src_h = 0;
- } else {
- response->fds[fd_index].x = src_x;
- response->fds[fd_index].y = src_y;
- response->fds[fd_index].src_w = src_w;
- response->fds[fd_index].src_h = src_h;
- }
- ++response->num_fds;
+ for(int j = 0; j < num_fb_fds; ++j) {
+ response->items[item_index].dma_buf[j].fd = fb_fds[j];
+ response->items[item_index].dma_buf[j].pitch = drmfb->pitches[j];
+ response->items[item_index].dma_buf[j].offset = drmfb->offsets[j];
+ }
+ response->items[item_index].num_dma_bufs = num_fb_fds;
+
+ response->items[item_index].width = drmfb->width;
+ response->items[item_index].height = drmfb->height;
+ response->items[item_index].pixel_format = drmfb->pixel_format;
+ response->items[item_index].modifier = drmfb->modifier;
+ response->items[item_index].connector_id = crtc_pair ? crtc_pair->connector_id : 0;
+ response->items[item_index].is_cursor = property_mask & PLANE_PROPERTY_IS_CURSOR;
+ if(property_mask & PLANE_PROPERTY_IS_CURSOR) {
+ response->items[item_index].x = x;
+ response->items[item_index].y = y;
+ response->items[item_index].src_w = 0;
+ response->items[item_index].src_h = 0;
} else {
- close(fb_fd);
+ response->items[item_index].x = src_x;
+ response->items[item_index].y = src_y;
+ response->items[item_index].src_w = src_w;
+ response->items[item_index].src_h = src_h;
}
+ ++response->num_items;
cleanup_handles:
drm_mode_cleanup_handles(drm->drmfd, drmfb);
@@ -356,16 +383,23 @@ static int kms_get_fb(gsr_drm *drm, gsr_kms_response *response, connector_to_crt
drmModeFreePlane(plane);
}
- if(response->num_fds > 0)
+ if(response->num_items > 0)
response->result = KMS_RESULT_OK;
if(response->result == KMS_RESULT_OK) {
result = 0;
} else {
- for(int i = 0; i < response->num_fds; ++i) {
- close(response->fds[i].fd);
+ for(int i = 0; i < response->num_items; ++i) {
+ for(int j = 0; j < response->items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &response->items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ response->items[i].num_dma_bufs = 0;
}
- response->num_fds = 0;
+ response->num_items = 0;
}
return result;
@@ -494,7 +528,7 @@ int main(int argc, char **argv) {
}
if(request.version != GSR_KMS_PROTOCOL_VERSION) {
- fprintf(stderr, "kms server error: expected gpu screen recorder protocol version to be %u, but it's %u\n", GSR_KMS_PROTOCOL_VERSION, request.version);
+ fprintf(stderr, "kms server error: expected gpu screen recorder protocol version to be %u, but it's %u. please reinstall gpu screen recorder\n", GSR_KMS_PROTOCOL_VERSION, request.version);
/*
if(request.new_connection_fd > 0)
close(request.new_connection_fd);
@@ -506,7 +540,7 @@ int main(int argc, char **argv) {
case KMS_REQUEST_TYPE_REPLACE_CONNECTION: {
gsr_kms_response response;
response.version = GSR_KMS_PROTOCOL_VERSION;
- response.num_fds = 0;
+ response.num_items = 0;
if(request.new_connection_fd > 0) {
if(socket_fd > 0)
@@ -529,7 +563,7 @@ int main(int argc, char **argv) {
case KMS_REQUEST_TYPE_GET_KMS: {
gsr_kms_response response;
response.version = GSR_KMS_PROTOCOL_VERSION;
- response.num_fds = 0;
+ response.num_items = 0;
if(kms_get_fb(&drm, &response, &c2crtc_map) == 0) {
if(send_msg_to_client(socket_fd, &response) == -1)
@@ -539,9 +573,17 @@ int main(int argc, char **argv) {
fprintf(stderr, "kms server error: failed to respond to client KMS_REQUEST_TYPE_GET_KMS request\n");
}
- for(int i = 0; i < response.num_fds; ++i) {
- close(response.fds[i].fd);
+ for(int i = 0; i < response.num_items; ++i) {
+ for(int j = 0; j < response.items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &response.items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ response.items[i].num_dma_bufs = 0;
}
+ response.num_items = 0;
break;
}
@@ -549,7 +591,7 @@ int main(int argc, char **argv) {
gsr_kms_response response;
response.version = GSR_KMS_PROTOCOL_VERSION;
response.result = KMS_RESULT_INVALID_REQUEST;
- response.num_fds = 0;
+ response.num_items = 0;
snprintf(response.err_msg, sizeof(response.err_msg), "invalid request type %d, expected %d (%s)", request.type, KMS_REQUEST_TYPE_GET_KMS, "KMS_REQUEST_TYPE_GET_KMS");
fprintf(stderr, "kms server error: %s\n", response.err_msg);
diff --git a/meson.build b/meson.build
index ae19a17..74ac845 100644
--- a/meson.build
+++ b/meson.build
@@ -1,4 +1,4 @@
-project('gpu-screen-recorder', ['c', 'cpp'], version : '3.8.0', default_options : ['warning_level=2'])
+project('gpu-screen-recorder', ['c', 'cpp'], version : '4.1.11', default_options : ['warning_level=2'])
add_project_arguments('-Wshadow', language : ['c', 'cpp'])
if get_option('buildtype') == 'debug'
@@ -16,7 +16,11 @@ src = [
'src/encoder/video/video.c',
'src/encoder/video/cuda.c',
'src/encoder/video/vaapi.c',
+ 'src/encoder/video/vulkan.c',
'src/encoder/video/software.c',
+ 'src/codec_query/nvenc.c',
+ 'src/codec_query/vaapi.c',
+ 'src/codec_query/vulkan.c',
'src/egl.c',
'src/cuda.c',
'src/xnvctrl.c',
@@ -27,11 +31,13 @@ src = [
'src/utils.c',
'src/library_loader.c',
'src/cursor.c',
+ 'src/damage.c',
'src/sound.cpp',
'src/main.cpp',
]
dep = [
+ dependency('threads'),
dependency('libavcodec'),
dependency('libavformat'),
dependency('libavutil'),
@@ -40,24 +46,46 @@ dep = [
dependency('xrandr'),
dependency('xfixes'),
dependency('xdamage'),
- dependency('xi'),
dependency('libpulse'),
dependency('libswresample'),
dependency('libavfilter'),
dependency('libva'),
+ dependency('libva-drm'),
dependency('libcap'),
dependency('libdrm'),
dependency('wayland-egl'),
dependency('wayland-client'),
]
+if get_option('portal') == true
+ src += [
+ 'src/capture/portal.c',
+ 'src/dbus.c',
+ 'src/pipewire.c',
+ ]
+
+ dep += [
+ dependency('dbus-1'),
+ dependency('libpipewire-0.3'),
+ dependency('libspa-0.2'),
+ ]
+
+ add_project_arguments('-DGSR_PORTAL', language : ['c', 'cpp'])
+endif
+
+add_project_arguments('-DGSR_VERSION="' + meson.project_version() + '"', language: ['c', 'cpp'])
+
executable('gsr-kms-server', 'kms/server/kms_server.c', dependencies : dependency('libdrm'), c_args : '-fstack-protector-all', install : true)
executable('gpu-screen-recorder', src, dependencies : dep, install : true)
if get_option('systemd') == true
- install_data(files('extra/gpu-screen-recorder.service'), install_dir : '/usr/lib/systemd/user')
+ install_data(files('extra/gpu-screen-recorder.service'), install_dir : 'lib/systemd/user')
endif
if get_option('capabilities') == true
meson.add_install_script('extra/meson_post_install.sh')
endif
+
+if get_option('nvidia_suspend_fix') == true
+ install_data(files('extra/gsr-nvidia.conf'), install_dir : 'lib/modprobe.d')
+endif
diff --git a/meson_options.txt b/meson_options.txt
index cbd553c..61972c6 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -1,2 +1,4 @@
-option('systemd', type : 'boolean', value : false, description : 'Install systemd service file')
-option('capabilities', type : 'boolean', value : true, description : 'Set binary admin capability to remove password prompt and nice capability to allow gpu screen recorder to run at a higher framerate than the game you are recording')
+option('systemd', type : 'boolean', value : true, description : 'Install systemd service file')
+option('capabilities', type : 'boolean', value : true, description : 'Set binary admin capability to remove password prompt when recording monitor (without desktop portal option) on amd/intel or nvidia wayland')
+option('nvidia_suspend_fix', type : 'boolean', value : true, description : 'Install nvidia modprobe config file to tell nvidia driver to preserve video memory on suspend. This is a workaround for an nvidia driver bug that breaks cuda (and gpu screen recorder) on suspend')
+option('portal', type : 'boolean', value : true, description : 'If GPU Screen Recorder should be built with support for xdg desktop portal ScreenCast capture (wayland only)')
diff --git a/project.conf b/project.conf
index a7e2757..6b107b0 100644
--- a/project.conf
+++ b/project.conf
@@ -1,12 +1,15 @@
[package]
name = "gpu-screen-recorder"
type = "executable"
-version = "3.8.0"
+version = "4.1.11"
platforms = ["posix"]
[config]
ignore_dirs = ["kms/server", "build"]
-error_on_warning = "true"
+#error_on_warning = "true"
+
+[define]
+GSR_PORTAL = "1"
[dependencies]
libavcodec = ">=58"
@@ -17,12 +20,16 @@ xcomposite = ">=0.2"
xrandr = ">=1"
xfixes = ">=2"
xdamage = ">=1"
-xi = ">=1"
libpulse = ">=13"
libswresample = ">=3"
libavfilter = ">=5"
libva = ">=1"
+libva-drm = ">=1"
libcap = ">=2"
libdrm = ">=2"
wayland-egl = ">=15"
-wayland-client = ">=1" \ No newline at end of file
+wayland-client = ">=1"
+dbus-1 = ">=1"
+libpipewire-0.3 = ">=1"
+libspa-0.2 = ">=0"
+#vulkan = ">=1" \ No newline at end of file
diff --git a/scripts/interactive.sh b/scripts/interactive.sh
index 63b0eae..bfaaae0 100755
--- a/scripts/interactive.sh
+++ b/scripts/interactive.sh
@@ -1,7 +1,5 @@
#!/bin/sh -e
-selected_audio_input="$(pactl get-default-sink).monitor"
-
echo "Select a window to record"
window_id=$(xdotool selectwindow)
@@ -14,4 +12,4 @@ read output_file_name
output_dir=$(dirname "$output_file_name")
mkdir -p "$output_dir"
-gpu-screen-recorder -w "$window_id" -c mp4 -f "$fps" -a "$selected_audio_input" -o "$output_file_name"
+gpu-screen-recorder -w "$window_id" -c mp4 -f "$fps" -a default_output -o "$output_file_name"
diff --git a/scripts/record-application-name.sh b/scripts/record-application-name.sh
index cc29255..4139c9c 100755
--- a/scripts/record-application-name.sh
+++ b/scripts/record-application-name.sh
@@ -3,4 +3,4 @@
window=$(xdotool selectwindow)
window_name=$(xdotool getwindowclassname "$window" || xdotool getwindowname "$window" || echo "Game")
window_name="$(echo "$window_name" | tr '/\\' '_')"
-gpu-screen-recorder -w "$window" -f 60 -a "$(pactl get-default-sink).monitor" -o "$HOME/Videos/recording/$window_name/$(date +"Video_%Y-%m-%d_%H-%M-%S.mp4")"
+gpu-screen-recorder -w "$window" -f 60 -a default_output -o "$HOME/Videos/recording/$window_name/$(date +"Video_%Y-%m-%d_%H-%M-%S.mp4")"
diff --git a/scripts/record-save-application-name.sh b/scripts/record-save-application-name.sh
index 46c51f0..b814809 100755
--- a/scripts/record-save-application-name.sh
+++ b/scripts/record-save-application-name.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# This script should be passed to gpu-screen-recorder with the -sc option, for example:
-# gpu-screen-recorder -w screen -f 60 -a "$(pactl get-default-sink).monitor" -r 60 -sc scripts/record-save-application-name.sh -c mp4 -o "$HOME/Videos"
+# gpu-screen-recorder -w screen -f 60 -a default_output -r 60 -sc scripts/record-save-application-name.sh -c mp4 -o "$HOME/Videos"
window=$(xdotool getwindowfocus)
window_name=$(xdotool getwindowclassname "$window" || xdotool getwindowname "$window" || echo "Game")
diff --git a/scripts/replay-application-name.sh b/scripts/replay-application-name.sh
index 18df61a..9f363f7 100755
--- a/scripts/replay-application-name.sh
+++ b/scripts/replay-application-name.sh
@@ -3,4 +3,4 @@
window=$(xdotool selectwindow)
window_name=$(xdotool getwindowclassname "$window" || xdotool getwindowname "$window" || echo "Game")
window_name="$(echo "$window_name" | tr '/\\' '_')"
-gpu-screen-recorder -w "$window" -f 60 -c mkv -a "$(pactl get-default-sink).monitor" -r 60 -o "$HOME/Videos/Replays/$window_name"
+gpu-screen-recorder -w "$window" -f 60 -c mkv -a default_output -r 60 -o "$HOME/Videos/Replays/$window_name"
diff --git a/scripts/replay.sh b/scripts/replay.sh
deleted file mode 100755
index 2781e1e..0000000
--- a/scripts/replay.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-
-[ "$#" -ne 4 ] && echo "usage: replay.sh <window_id> <fps> <replay_time_sec> <output_directory>" && exit 1
-active_sink="$(pactl get-default-sink).monitor"
-mkdir -p "$4"
-gpu-screen-recorder -w "$1" -c mkv -f "$2" -a "$active_sink" -r "$3" -o "$4"
diff --git a/scripts/start-recording.sh b/scripts/start-recording.sh
new file mode 100755
index 0000000..03fda73
--- /dev/null
+++ b/scripts/start-recording.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+pidof -q gpu-screen-recorder && exit 0
+video="$HOME/Videos/$(date +"Video_%Y-%m-%d_%H-%M-%S.mp4")"
+gpu-screen-recorder -w screen -f 60 -a default_output -o "$video"
diff --git a/scripts/start-replay.sh b/scripts/start-replay.sh
index e36d59d..860f2ce 100755
--- a/scripts/start-replay.sh
+++ b/scripts/start-replay.sh
@@ -1,5 +1,6 @@
#!/bin/sh
+pidof -q gpu-screen-recorder && exit 0
video_path="$HOME/Videos"
mkdir -p "$video_path"
-gpu-screen-recorder -w screen -f 60 -a "$(pactl get-default-sink).monitor" -c mkv -r 30 -o "$video_path"
+gpu-screen-recorder -w screen -f 60 -a default_output -c mkv -r 30 -o "$video_path"
diff --git a/scripts/start-stop-recording.sh b/scripts/start-stop-recording.sh
new file mode 100755
index 0000000..775a829
--- /dev/null
+++ b/scripts/start-stop-recording.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# Simple script to start recording if it's not recording and stop recording
+# if it's already recording. This script can be bound to a single hotkey
+# to start/stop recording with a single hotkey.
+
+killall -SIGINT -q gpu-screen-recorder && exit 0
+video="$HOME/Videos/$(date +"Video_%Y-%m-%d_%H-%M-%S.mp4")"
+gpu-screen-recorder -w screen -f 60 -a default_output -o "$video"
+notify-send -t 2000 -u low "GPU Screen Recorder" "Video saved to $video"
diff --git a/scripts/toggle-recording-selected.sh b/scripts/toggle-recording-selected.sh
index 309e4d1..d4c1b38 100755
--- a/scripts/toggle-recording-selected.sh
+++ b/scripts/toggle-recording-selected.sh
@@ -2,7 +2,7 @@
killall -SIGINT gpu-screen-recorder && sleep 0.5 && notify-send -t 1500 -u low 'GPU Screen Recorder' 'Stopped recording' && exit 0;
window=$(xdotool selectwindow)
-active_sink="$(pactl get-default-sink).monitor"
+active_sink=default_output
mkdir -p "$HOME/Videos"
video="$HOME/Videos/$(date +"Video_%Y-%m-%d_%H-%M-%S.mp4")"
notify-send -t 1500 -u low 'GPU Screen Recorder' "Started recording video to $video"
diff --git a/scripts/twitch-stream-local-copy.sh b/scripts/twitch-stream-local-copy.sh
index 4a678e8..fa23cf6 100755
--- a/scripts/twitch-stream-local-copy.sh
+++ b/scripts/twitch-stream-local-copy.sh
@@ -3,5 +3,5 @@
# Stream on twitch while also saving the video to disk locally
[ "$#" -ne 4 ] && echo "usage: twitch-stream-local-copy.sh <window_id> <fps> <livestream_key> <local_file>" && exit 1
-active_sink="$(pactl get-default-sink).monitor"
+active_sink=default_output
gpu-screen-recorder -w "$1" -c flv -f "$2" -q high -a "$active_sink" | tee -- "$4" | ffmpeg -i pipe:0 -c copy -f flv -- "rtmp://live.twitch.tv/app/$3"
diff --git a/scripts/twitch-stream.sh b/scripts/twitch-stream.sh
index aaa5828..99dade8 100755
--- a/scripts/twitch-stream.sh
+++ b/scripts/twitch-stream.sh
@@ -1,5 +1,5 @@
#!/bin/sh
[ "$#" -ne 3 ] && echo "usage: twitch-stream.sh <window_id> <fps> <livestream_key>" && exit 1
-active_sink="$(pactl get-default-sink).monitor"
+active_sink=default_output
gpu-screen-recorder -w "$1" -c flv -f "$2" -q high -a "$active_sink" -o "rtmp://live.twitch.tv/app/$3"
diff --git a/scripts/youtube-hls-stream.sh b/scripts/youtube-hls-stream.sh
index 2f1659e..10fa6b2 100755
--- a/scripts/youtube-hls-stream.sh
+++ b/scripts/youtube-hls-stream.sh
@@ -1,5 +1,5 @@
#!/bin/sh
[ "$#" -ne 3 ] && echo "usage: youtube-hls-stream.sh <window_id> <fps> <livestream_key>" && exit 1
-active_sink="$(pactl get-default-sink).monitor"
+active_sink=default_output
gpu-screen-recorder -w "$1" -c hls -f "$2" -q high -a "$active_sink" -ac aac -o "https://a.upload.youtube.com/http_upload_hls?cid=$3&copy=0&file=stream.m3u8" \ No newline at end of file
diff --git a/src/capture/capture.c b/src/capture/capture.c
index 40507bf..ec10854 100644
--- a/src/capture/capture.c
+++ b/src/capture/capture.c
@@ -10,10 +10,15 @@ int gsr_capture_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVF
return res;
}
-void gsr_capture_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
+void gsr_capture_tick(gsr_capture *cap) {
assert(cap->started);
if(cap->tick)
- cap->tick(cap, video_codec_context);
+ cap->tick(cap);
+}
+
+void gsr_capture_on_event(gsr_capture *cap, gsr_egl *egl) {
+ if(cap->on_event)
+ cap->on_event(cap, egl);
}
bool gsr_capture_should_stop(gsr_capture *cap, bool *err) {
@@ -29,12 +34,6 @@ int gsr_capture_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *
return cap->capture(cap, frame, color_conversion);
}
-void gsr_capture_capture_end(gsr_capture *cap, AVFrame *frame) {
- assert(cap->started);
- if(cap->capture_end)
- cap->capture_end(cap, frame);
-}
-
gsr_source_color gsr_capture_get_source_color(gsr_capture *cap) {
return cap->get_source_color(cap);
}
@@ -46,6 +45,13 @@ bool gsr_capture_uses_external_image(gsr_capture *cap) {
return false;
}
+bool gsr_capture_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ if(cap->set_hdr_metadata)
+ return cap->set_hdr_metadata(cap, mastering_display_metadata, light_metadata);
+ else
+ return false;
+}
+
void gsr_capture_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
cap->destroy(cap, video_codec_context);
}
diff --git a/src/capture/kms.c b/src/capture/kms.c
index a9ce73c..8b16ec9 100644
--- a/src/capture/kms.c
+++ b/src/capture/kms.c
@@ -1,14 +1,21 @@
#include "../../include/capture/kms.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
+#include "../../include/cursor.h"
#include "../../kms/client/kms_client.h"
+#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
+#include <fcntl.h>
+
+#include <xf86drm.h>
+#include <libdrm/drm_fourcc.h>
#include <libavcodec/avcodec.h>
#include <libavutil/mastering_display_metadata.h>
+#include <libavformat/avformat.h>
#define HDMI_STATIC_METADATA_TYPE1 0
#define HDMI_EOTF_SMPTE_ST2084 2
@@ -22,9 +29,6 @@ typedef struct {
typedef struct {
gsr_capture_kms_params params;
-
- bool should_stop;
- bool stop_is_error;
gsr_kms_client kms_client;
gsr_kms_response kms_response;
@@ -33,62 +37,104 @@ typedef struct {
vec2i capture_size;
MonitorId monitor_id;
- AVMasteringDisplayMetadata *mastering_display_metadata;
- AVContentLightMetadata *light_metadata;
-
gsr_monitor_rotation monitor_rotation;
- unsigned int input_texture;
- unsigned int cursor_texture;
+ unsigned int input_texture_id;
+ unsigned int external_input_texture_id;
+ unsigned int cursor_texture_id;
+
+ bool no_modifiers_fallback;
+ bool external_texture_fallback;
+
+ struct hdr_output_metadata hdr_metadata;
+ bool hdr_metadata_set;
+
+ bool is_x11;
+ gsr_cursor x11_cursor;
+
+ AVCodecContext *video_codec_context;
+ bool performance_error_shown;
+ bool fast_path_failed;
+
+ //int drm_fd;
+ //uint64_t prev_sequence;
+ //bool damaged;
+
+ vec2i prev_target_pos;
+ vec2i prev_plane_size;
} gsr_capture_kms;
static void gsr_capture_kms_cleanup_kms_fds(gsr_capture_kms *self) {
- for(int i = 0; i < self->kms_response.num_fds; ++i) {
- if(self->kms_response.fds[i].fd > 0)
- close(self->kms_response.fds[i].fd);
- self->kms_response.fds[i].fd = 0;
+ for(int i = 0; i < self->kms_response.num_items; ++i) {
+ for(int j = 0; j < self->kms_response.items[i].num_dma_bufs; ++j) {
+ gsr_kms_response_dma_buf *dma_buf = &self->kms_response.items[i].dma_buf[j];
+ if(dma_buf->fd > 0) {
+ close(dma_buf->fd);
+ dma_buf->fd = -1;
+ }
+ }
+ self->kms_response.items[i].num_dma_bufs = 0;
}
- self->kms_response.num_fds = 0;
+ self->kms_response.num_items = 0;
}
static void gsr_capture_kms_stop(gsr_capture_kms *self) {
- if(self->input_texture) {
- self->params.egl->glDeleteTextures(1, &self->input_texture);
- self->input_texture = 0;
+ if(self->input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->input_texture_id);
+ self->input_texture_id = 0;
}
- if(self->cursor_texture) {
- self->params.egl->glDeleteTextures(1, &self->cursor_texture);
- self->cursor_texture = 0;
+ if(self->external_input_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->external_input_texture_id);
+ self->external_input_texture_id = 0;
}
+ if(self->cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->cursor_texture_id);
+ self->cursor_texture_id = 0;
+ }
+
+ // if(self->drm_fd > 0) {
+ // close(self->drm_fd);
+ // self->drm_fd = -1;
+ // }
+
gsr_capture_kms_cleanup_kms_fds(self);
gsr_kms_client_deinit(&self->kms_client);
+ gsr_cursor_deinit(&self->x11_cursor);
}
static int max_int(int a, int b) {
return a > b ? a : b;
}
-static void gsr_capture_kms_create_input_textures(gsr_capture_kms *self) {
- self->params.egl->glGenTextures(1, &self->input_texture);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
+static void gsr_capture_kms_create_input_texture_ids(gsr_capture_kms *self) {
+ self->params.egl->glGenTextures(1, &self->input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture_id);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
- const bool cursor_texture_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
- const int cursor_texture_target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
-
- self->params.egl->glGenTextures(1, &self->cursor_texture);
- self->params.egl->glBindTexture(cursor_texture_target, self->cursor_texture);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- self->params.egl->glTexParameteri(cursor_texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- self->params.egl->glBindTexture(cursor_texture_target, 0);
+ self->params.egl->glGenTextures(1, &self->external_input_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->external_input_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const int cursor_texture_id_target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+
+ self->params.egl->glGenTextures(1, &self->cursor_texture_id);
+ self->params.egl->glBindTexture(cursor_texture_id_target, self->cursor_texture_id);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(cursor_texture_id_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(cursor_texture_id_target, 0);
}
/* TODO: On monitor reconfiguration, find monitor x, y, width and height again. Do the same for nvfbc. */
@@ -116,10 +162,19 @@ static void monitor_callback(const gsr_monitor *monitor, void *userdata) {
fprintf(stderr, "gsr warning: reached max connector ids\n");
}
+static vec2i rotate_capture_size_if_rotated(gsr_capture_kms *self, vec2i capture_size) {
+ if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
+ int tmp_x = capture_size.x;
+ capture_size.x = capture_size.y;
+ capture_size.y = tmp_x;
+ }
+ return capture_size;
+}
+
static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_capture_kms *self = cap->priv;
- gsr_capture_kms_create_input_textures(self);
+ gsr_capture_kms_create_input_texture_ids(self);
gsr_monitor monitor;
self->monitor_id.num_connector_ids = 0;
@@ -128,14 +183,19 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
if(kms_init_res != 0)
return kms_init_res;
+ self->is_x11 = gsr_egl_get_display_server(self->params.egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = self->is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+ if(self->is_x11)
+ gsr_cursor_init(&self->x11_cursor, self->params.egl, self->params.egl->x11.dpy);
+
MonitorCallbackUserdata monitor_callback_userdata = {
&self->monitor_id,
self->params.display_to_capture, strlen(self->params.display_to_capture),
0,
};
- for_each_active_monitor_output(self->params.egl, GSR_CONNECTION_DRM, monitor_callback, &monitor_callback_userdata);
+ for_each_active_monitor_output(self->params.egl, connection_type, monitor_callback, &monitor_callback_userdata);
- if(!get_monitor_by_name(self->params.egl, GSR_CONNECTION_DRM, self->params.display_to_capture, &monitor)) {
+ if(!get_monitor_by_name(self->params.egl, connection_type, self->params.display_to_capture, &monitor)) {
fprintf(stderr, "gsr error: gsr_capture_kms_start: failed to find monitor by name \"%s\"\n", self->params.display_to_capture);
gsr_capture_kms_stop(self);
return -1;
@@ -145,12 +205,11 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
self->monitor_rotation = drm_monitor_get_display_server_rotation(self->params.egl, &monitor);
self->capture_pos = monitor.pos;
- if(self->monitor_rotation == GSR_MONITOR_ROT_90 || self->monitor_rotation == GSR_MONITOR_ROT_270) {
- self->capture_size.x = monitor.size.y;
- self->capture_size.y = monitor.size.x;
- } else {
+ /* Monitor size is already rotated on x11 when the monitor is rotated, no need to apply it ourselves */
+ if(self->is_x11)
self->capture_size = monitor.size;
- }
+ else
+ self->capture_size = rotate_capture_size_if_rotated(self, monitor.size);
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
@@ -160,9 +219,41 @@ static int gsr_capture_kms_start(gsr_capture *cap, AVCodecContext *video_codec_c
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
+
+ self->video_codec_context = video_codec_context;
return 0;
}
+static void gsr_capture_kms_on_event(gsr_capture *cap, gsr_egl *egl) {
+ gsr_capture_kms *self = cap->priv;
+ if(!self->is_x11)
+ return;
+
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ gsr_cursor_on_event(&self->x11_cursor, xev);
+}
+
+// TODO: This is disabled for now because we want to be able to record at a framerate higher than the monitor framerate
+// static void gsr_capture_kms_tick(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+
+// if(self->drm_fd <= 0)
+// self->drm_fd = open(self->params.egl->card_path, O_RDONLY);
+
+// if(self->drm_fd <= 0)
+// return;
+
+// uint64_t sequence = 0;
+// uint64_t ns = 0;
+// if(drmCrtcGetSequence(self->drm_fd, 79, &sequence, &ns) != 0)
+// return;
+
+// if(sequence != self->prev_sequence) {
+// self->prev_sequence = sequence;
+// self->damaged = true;
+// }
+// }
+
static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
switch(rot) {
case GSR_MONITOR_ROT_0: return 0.0f;
@@ -173,54 +264,40 @@ static float monitor_rotation_to_radians(gsr_monitor_rotation rot) {
return 0.0f;
}
-/* Prefer non combined planes */
-static gsr_kms_response_fd* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
- int index_combined = -1;
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].connector_id == connector_id && !kms_response->fds[i].is_cursor) {
- if(kms_response->fds[i].is_combined_plane)
- index_combined = i;
- else
- return &kms_response->fds[i];
- }
- }
-
- if(index_combined != -1)
- return &kms_response->fds[index_combined];
- else
- return NULL;
-}
-
-static gsr_kms_response_fd* find_first_combined_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_combined_plane && !kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_drm_by_connector_id(gsr_kms_response *kms_response, uint32_t connector_id) {
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].connector_id == connector_id && !kms_response->items[i].is_cursor)
+ return &kms_response->items[i];
}
return NULL;
}
-static gsr_kms_response_fd* find_largest_drm(gsr_kms_response *kms_response) {
- if(kms_response->num_fds == 0)
+static gsr_kms_response_item* find_largest_drm(gsr_kms_response *kms_response) {
+ if(kms_response->num_items == 0)
return NULL;
int64_t largest_size = 0;
- gsr_kms_response_fd *largest_drm = &kms_response->fds[0];
- for(int i = 0; i < kms_response->num_fds; ++i) {
- const int64_t size = (int64_t)kms_response->fds[i].width * (int64_t)kms_response->fds[i].height;
- if(size > largest_size && !kms_response->fds[i].is_cursor) {
+ gsr_kms_response_item *largest_drm = &kms_response->items[0];
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ const int64_t size = (int64_t)kms_response->items[i].width * (int64_t)kms_response->items[i].height;
+ if(size > largest_size && !kms_response->items[i].is_cursor) {
largest_size = size;
- largest_drm = &kms_response->fds[i];
+ largest_drm = &kms_response->items[i];
}
}
return largest_drm;
}
-static gsr_kms_response_fd* find_cursor_drm(gsr_kms_response *kms_response) {
- for(int i = 0; i < kms_response->num_fds; ++i) {
- if(kms_response->fds[i].is_cursor)
- return &kms_response->fds[i];
+static gsr_kms_response_item* find_cursor_drm(gsr_kms_response *kms_response, uint32_t connector_id) {
+ gsr_kms_response_item *cursor_drm = NULL;
+ for(int i = 0; i < kms_response->num_items; ++i) {
+ if(kms_response->items[i].is_cursor) {
+ cursor_drm = &kms_response->items[i];
+ if(kms_response->items[i].connector_id == connector_id)
+ break;
+ }
}
- return NULL;
+ return cursor_drm;
}
static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *hdr_metadata) {
@@ -229,33 +306,13 @@ static bool hdr_metadata_is_supported_format(const struct hdr_output_metadata *h
hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
}
-static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, AVFrame *frame, gsr_kms_response_fd *drm_fd) {
- if(!self->mastering_display_metadata)
- self->mastering_display_metadata = av_mastering_display_metadata_create_side_data(frame);
-
- if(!self->light_metadata)
- self->light_metadata = av_content_light_metadata_create_side_data(frame);
-
- if(self->mastering_display_metadata) {
- for(int i = 0; i < 3; ++i) {
- self->mastering_display_metadata->display_primaries[i][0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
- self->mastering_display_metadata->display_primaries[i][1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
- }
-
- self->mastering_display_metadata->white_point[0] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
- self->mastering_display_metadata->white_point[1] = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
-
- self->mastering_display_metadata->min_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
- self->mastering_display_metadata->max_luminance = av_make_q(drm_fd->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
-
- self->mastering_display_metadata->has_primaries = self->mastering_display_metadata->display_primaries[0][0].num > 0;
- self->mastering_display_metadata->has_luminance = self->mastering_display_metadata->max_luminance.num > 0;
- }
+// TODO: Check if this hdr data can be changed after the call to av_packet_side_data_add
+static void gsr_kms_set_hdr_metadata(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ if(self->hdr_metadata_set)
+ return;
- if(self->light_metadata) {
- self->light_metadata->MaxCLL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_cll;
- self->light_metadata->MaxFALL = drm_fd->hdr_metadata.hdmi_metadata_type1.max_fall;
- }
+ self->hdr_metadata_set = true;
+ self->hdr_metadata = drm_fd->hdr_metadata;
}
static vec2i swap_vec2i(vec2i value) {
@@ -265,33 +322,90 @@ static vec2i swap_vec2i(vec2i value) {
return value;
}
-static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
- gsr_capture_kms *self = cap->priv;
- const bool screen_plane_use_modifiers = self->params.egl->gpu_info.vendor != GSR_GPU_VENDOR_AMD;
- const bool cursor_texture_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+static EGLImage gsr_capture_kms_create_egl_image(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, drm_fd->pixel_format, drm_fd->width, drm_fd->height, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs, use_modifiers);
+ while(self->params.egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image || self->params.egl->eglGetError() != EGL_SUCCESS) {
+ if(image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ return NULL;
+ }
+ return image;
+}
- //egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->params.egl->glClear(0);
+static EGLImage gsr_capture_kms_create_egl_image_with_fallback(gsr_capture_kms *self, const gsr_kms_response_item *drm_fd) {
+ // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
+ // Even ffmpeg kmsgrab causes this crash. The error is:
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // amdgpu: Failed to allocate a buffer:
+ // amdgpu: size : 28508160 bytes
+ // amdgpu: alignment : 2097152 bytes
+ // amdgpu: domains : 4
+ // amdgpu: flags : 4
+ // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
+ // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
+ // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
+ // Error: avcodec_send_frame failed, error: Input/output error
+ // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
+ // kms server info: kms client shutdown, shutting down the server
- gsr_capture_kms_cleanup_kms_fds(self);
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
- gsr_kms_response_fd *drm_fd = NULL;
- gsr_kms_response_fd *cursor_drm_fd = NULL;
- bool capture_is_combined_plane = false;
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
+ }
- if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
- fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
- return -1;
+ EGLImage image = NULL;
+ if(self->no_modifiers_fallback) {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
+ } else {
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, true);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
+ self->no_modifiers_fallback = true;
+ image = gsr_capture_kms_create_egl_image(self, drm_fd, fds, offsets, pitches, modifiers, false);
+ }
}
+ return image;
+}
- if(self->kms_response.num_fds == 0) {
- static bool error_shown = false;
- if(!error_shown) {
- error_shown = true;
- fprintf(stderr, "gsr error: no drm found, capture will fail\n");
+static bool gsr_capture_kms_bind_image_to_texture(gsr_capture_kms *self, EGLImage image, unsigned int texture_id, bool external_texture) {
+ const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ while(self->params.egl->glGetError() != 0){}
+ self->params.egl->glBindTexture(texture_target, texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(texture_target, image);
+ const bool success = self->params.egl->glGetError() == 0;
+ self->params.egl->glBindTexture(texture_target, 0);
+ return success;
+}
+
+static void gsr_capture_kms_bind_image_to_input_texture_with_fallback(gsr_capture_kms *self, EGLImage image) {
+ if(self->external_texture_fallback) {
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
+ } else {
+ if(!gsr_capture_kms_bind_image_to_texture(self, image, self->input_texture_id, false)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to bind image to texture, trying with external texture\n");
+ self->external_texture_fallback = true;
+ gsr_capture_kms_bind_image_to_texture(self, image, self->external_input_texture_id, true);
}
- return -1;
}
+}
+
+static gsr_kms_response_item* find_monitor_drm(gsr_capture_kms *self, bool *capture_is_combined_plane) {
+ *capture_is_combined_plane = false;
+ gsr_kms_response_item *drm_fd = NULL;
for(int i = 0; i < self->monitor_id.num_connector_ids; ++i) {
drm_fd = find_drm_by_connector_id(&self->kms_response, self->monitor_id.connector_ids[i]);
@@ -301,189 +415,278 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
// Will never happen on wayland unless the target monitor has been disconnected
if(!drm_fd) {
- drm_fd = find_first_combined_drm(&self->kms_response);
- if(!drm_fd)
- drm_fd = find_largest_drm(&self->kms_response);
- capture_is_combined_plane = true;
+ drm_fd = find_largest_drm(&self->kms_response);
+ *capture_is_combined_plane = true;
}
- cursor_drm_fd = find_cursor_drm(&self->kms_response);
-
- if(!drm_fd)
- return -1;
+ return drm_fd;
+}
- if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != drm_fd->connector_id)
+static gsr_kms_response_item* find_cursor_drm_if_on_monitor(gsr_capture_kms *self, uint32_t monitor_connector_id, bool capture_is_combined_plane) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm(&self->kms_response, monitor_connector_id);
+ if(!capture_is_combined_plane && cursor_drm_fd && cursor_drm_fd->connector_id != monitor_connector_id)
cursor_drm_fd = NULL;
+ return cursor_drm_fd;
+}
- if(drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
- gsr_kms_set_hdr_metadata(self, frame, drm_fd);
+static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, const gsr_kms_response_item *cursor_drm_fd, vec2i target_pos, float texture_rotation) {
+ const bool cursor_texture_id_is_external = self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- // TODO: This causes a crash sometimes on steam deck, why? is it a driver bug? a vaapi pure version doesn't cause a crash.
- // Even ffmpeg kmsgrab causes this crash. The error is:
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // amdgpu: Failed to allocate a buffer:
- // amdgpu: size : 28508160 bytes
- // amdgpu: alignment : 2097152 bytes
- // amdgpu: domains : 4
- // amdgpu: flags : 4
- // EE ../jupiter-mesa/src/gallium/drivers/radeonsi/radeon_vcn_enc.c:516 radeon_create_encoder UVD - Can't create CPB buffer.
- // [hevc_vaapi @ 0x55ea72b09840] Failed to upload encode parameters: 2 (resource allocation failed).
- // [hevc_vaapi @ 0x55ea72b09840] Encode failed: -5.
- // Error: avcodec_send_frame failed, error: Input/output error
- // Assertion pic->display_order == pic->encode_order failed at libavcodec/vaapi_encode_h265.c:765
- // kms server info: kms client shutdown, shutting down the server
- intptr_t img_attr[18] = {
- EGL_LINUX_DRM_FOURCC_EXT, drm_fd->pixel_format,
- EGL_WIDTH, drm_fd->width,
- EGL_HEIGHT, drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, drm_fd->pitch,
- };
+ vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
+ switch(self->monitor_rotation) {
+ case GSR_MONITOR_ROT_0:
+ break;
+ case GSR_MONITOR_ROT_90:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ break;
+ case GSR_MONITOR_ROT_180:
+ cursor_pos.x = self->capture_size.x - cursor_pos.x;
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.x -= cursor_size.x;
+ cursor_pos.y -= cursor_size.y;
+ break;
+ case GSR_MONITOR_ROT_270:
+ cursor_pos = swap_vec2i(cursor_pos);
+ cursor_pos.y = self->capture_size.y - cursor_pos.y;
+ // TODO: Remove this horrible hack
+ cursor_pos.y -= cursor_size.y;
+ break;
+ }
- if(screen_plane_use_modifiers) {
- img_attr[12] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
- img_attr[13] = drm_fd->modifier & 0xFFFFFFFFULL;
+ cursor_pos.x += target_pos.x;
+ cursor_pos.y += target_pos.y;
- img_attr[14] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
- img_attr[15] = drm_fd->modifier >> 32ULL;
+ int fds[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t offsets[GSR_KMS_MAX_DMA_BUFS];
+ uint32_t pitches[GSR_KMS_MAX_DMA_BUFS];
+ uint64_t modifiers[GSR_KMS_MAX_DMA_BUFS];
- img_attr[16] = EGL_NONE;
- img_attr[17] = EGL_NONE;
- } else {
- img_attr[12] = EGL_NONE;
- img_attr[13] = EGL_NONE;
+ for(int i = 0; i < cursor_drm_fd->num_dma_bufs; ++i) {
+ fds[i] = cursor_drm_fd->dma_buf[i].fd;
+ offsets[i] = cursor_drm_fd->dma_buf[i].offset;
+ pitches[i] = cursor_drm_fd->dma_buf[i].pitch;
+ modifiers[i] = cursor_drm_fd->modifier;
}
- EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, self->input_texture);
- self->params.egl->glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
- self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
- self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+ intptr_t img_attr_cursor[44];
+ setup_dma_buf_attrs(img_attr_cursor, cursor_drm_fd->pixel_format, cursor_drm_fd->width, cursor_drm_fd->height,
+ fds, offsets, pitches, modifiers, cursor_drm_fd->num_dma_bufs, true);
- vec2i capture_pos = self->capture_pos;
- if(!capture_is_combined_plane)
- capture_pos = (vec2i){drm_fd->x, drm_fd->y};
+ EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
+ const int target = cursor_texture_id_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ self->params.egl->glBindTexture(target, self->cursor_texture_id);
+ self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
+ self->params.egl->glBindTexture(target, 0);
- const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ if(cursor_image)
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
- const int target_x = max_int(0, frame->width / 2 - self->capture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->capture_size.y / 2);
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
- gsr_color_conversion_draw(color_conversion, self->input_texture,
- (vec2i){target_x, target_y}, self->capture_size,
- capture_pos, self->capture_size,
- texture_rotation, false);
+ gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
+ cursor_pos, cursor_size,
+ (vec2i){0, 0}, cursor_size,
+ texture_rotation, cursor_texture_id_is_external);
- if(self->params.record_cursor && cursor_drm_fd) {
- const vec2i cursor_size = {cursor_drm_fd->width, cursor_drm_fd->height};
- vec2i cursor_pos = {cursor_drm_fd->x, cursor_drm_fd->y};
- switch(self->monitor_rotation) {
- case GSR_MONITOR_ROT_0:
- break;
- case GSR_MONITOR_ROT_90:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- break;
- case GSR_MONITOR_ROT_180:
- cursor_pos.x = self->capture_size.x - cursor_pos.x;
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.x -= cursor_size.x;
- cursor_pos.y -= cursor_size.y;
- break;
- case GSR_MONITOR_ROT_270:
- cursor_pos = swap_vec2i(cursor_pos);
- cursor_pos.y = self->capture_size.y - cursor_pos.y;
- // TODO: Remove this horrible hack
- cursor_pos.y -= cursor_size.y;
- break;
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i capture_pos, vec2i target_pos) {
+ if(!self->x11_cursor.visible)
+ return;
+
+ gsr_cursor_tick(&self->x11_cursor, DefaultRootWindow(self->params.egl->x11.dpy));
+
+ const vec2i cursor_pos = {
+ target_pos.x + self->x11_cursor.position.x - self->x11_cursor.hotspot.x - capture_pos.x,
+ target_pos.y + self->x11_cursor.position.y - self->x11_cursor.hotspot.y - capture_pos.y
+ };
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+
+ gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
+ cursor_pos, self->x11_cursor.size,
+ (vec2i){0, 0}, self->x11_cursor.size,
+ 0.0f, false);
+
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+}
+
+static void gsr_capture_kms_update_capture_size_change(gsr_capture_kms *self, gsr_color_conversion *color_conversion, vec2i target_pos, const gsr_kms_response_item *drm_fd) {
+ if(target_pos.x != self->prev_target_pos.x || target_pos.y != self->prev_target_pos.y || drm_fd->src_w != self->prev_plane_size.x || drm_fd->src_h != self->prev_plane_size.y) {
+ self->prev_target_pos = target_pos;
+ self->prev_plane_size = self->capture_size;
+ gsr_color_conversion_clear(color_conversion);
+ }
+}
+
+static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_capture_kms *self = cap->priv;
+
+ gsr_capture_kms_cleanup_kms_fds(self);
+
+ if(gsr_kms_client_get_kms(&self->kms_client, &self->kms_response) != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: failed to get kms, error: %d (%s)\n", self->kms_response.result, self->kms_response.err_msg);
+ return -1;
+ }
+
+ if(self->kms_response.num_items == 0) {
+ static bool error_shown = false;
+ if(!error_shown) {
+ error_shown = true;
+ fprintf(stderr, "gsr error: no drm found, capture will fail\n");
}
+ return -1;
+ }
- cursor_pos.x += target_x;
- cursor_pos.y += target_y;
-
- const intptr_t img_attr_cursor[] = {
- EGL_LINUX_DRM_FOURCC_EXT, cursor_drm_fd->pixel_format,
- EGL_WIDTH, cursor_drm_fd->width,
- EGL_HEIGHT, cursor_drm_fd->height,
- EGL_DMA_BUF_PLANE0_FD_EXT, cursor_drm_fd->fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, cursor_drm_fd->offset,
- EGL_DMA_BUF_PLANE0_PITCH_EXT, cursor_drm_fd->pitch,
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, cursor_drm_fd->modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, cursor_drm_fd->modifier >> 32ULL,
- EGL_NONE
- };
-
- EGLImage cursor_image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr_cursor);
- const int target = cursor_texture_is_external ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
- self->params.egl->glBindTexture(target, self->cursor_texture);
- self->params.egl->glEGLImageTargetTexture2DOES(target, cursor_image);
- self->params.egl->eglDestroyImage(self->params.egl->egl_display, cursor_image);
- self->params.egl->glBindTexture(target, 0);
+ bool capture_is_combined_plane = false;
+ const gsr_kms_response_item *drm_fd = find_monitor_drm(self, &capture_is_combined_plane);
+ if(!drm_fd) {
+ gsr_capture_kms_cleanup_kms_fds(self);
+ return -1;
+ }
- self->params.egl->glEnable(GL_SCISSOR_TEST);
- self->params.egl->glScissor(target_x, target_y, self->capture_size.x, self->capture_size.y);
+ if(drm_fd->has_hdr_metadata && self->params.hdr && hdr_metadata_is_supported_format(&drm_fd->hdr_metadata))
+ gsr_kms_set_hdr_metadata(self, drm_fd);
- gsr_color_conversion_draw(color_conversion, self->cursor_texture,
- cursor_pos, cursor_size,
- (vec2i){0, 0}, cursor_size,
- texture_rotation, cursor_texture_is_external);
+ if(!self->performance_error_shown && self->monitor_rotation != GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ self->performance_error_shown = true;
+ fprintf(stderr,"gsr warning: gsr_capture_kms_capture: the monitor you are recording is rotated, composition will have to be used."
+ " If you are experience performance problems in the video then record a single window on X11 or use portal capture option instead\n");
+ }
- self->params.egl->glDisable(GL_SCISSOR_TEST);
+ const float texture_rotation = monitor_rotation_to_radians(self->monitor_rotation);
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+ self->capture_size = rotate_capture_size_if_rotated(self, (vec2i){ drm_fd->src_w, drm_fd->src_h });
+ gsr_capture_kms_update_capture_size_change(self, color_conversion, target_pos, drm_fd);
+
+ vec2i capture_pos = self->capture_pos;
+ if(!capture_is_combined_plane)
+ capture_pos = (vec2i){drm_fd->x, drm_fd->y};
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && self->monitor_rotation == GSR_MONITOR_ROT_0 && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < drm_fd->num_dma_bufs; ++i) {
+ fds[i] = drm_fd->dma_buf[i].fd;
+ offsets[i] = drm_fd->dma_buf[i].offset;
+ pitches[i] = drm_fd->dma_buf[i].pitch;
+ modifiers[i] = drm_fd->modifier;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){capture_pos.x, capture_pos.y}, self->capture_size, target_pos, self->capture_size, drm_fd->pixel_format, (vec2i){drm_fd->width, drm_fd->height}, fds, offsets, pitches, modifiers, drm_fd->num_dma_bufs)) {
+ fprintf(stderr, "gsr error: gsr_capture_kms_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
}
- self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
-
- // TODO: Do software specific video encoder conversion here
+ if(self->fast_path_failed) {
+ EGLImage image = gsr_capture_kms_create_egl_image_with_fallback(self, drm_fd);
+ if(image) {
+ gsr_capture_kms_bind_image_to_input_texture_with_fallback(self, image);
+ self->params.egl->eglDestroyImage(self->params.egl->egl_display, image);
+ }
- //self->params.egl->glFlush();
- //self->params.egl->glFinish();
+ gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
+ target_pos, self->capture_size,
+ capture_pos, self->capture_size,
+ texture_rotation, self->external_texture_fallback);
+ }
+
+ if(self->params.record_cursor) {
+ gsr_kms_response_item *cursor_drm_fd = find_cursor_drm_if_on_monitor(self, drm_fd->connector_id, capture_is_combined_plane);
+ // The cursor is handled by x11 on x11 instead of using the cursor drm plane because on prime systems with a dedicated nvidia gpu
+ // the cursor plane is not available when the cursor is on the monitor controlled by the nvidia device.
+ if(self->is_x11) {
+ const vec2i cursor_monitor_offset = self->capture_pos;
+ render_x11_cursor(self, color_conversion, cursor_monitor_offset, target_pos);
+ } else if(cursor_drm_fd) {
+ render_drm_cursor(self, color_conversion, cursor_drm_fd, target_pos, texture_rotation);
+ }
+ }
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ gsr_capture_kms_cleanup_kms_fds(self);
return 0;
}
static bool gsr_capture_kms_should_stop(gsr_capture *cap, bool *err) {
- gsr_capture_kms *cap_kms = cap->priv;
- if(cap_kms->should_stop) {
- if(err)
- *err = cap_kms->stop_is_error;
- return true;
- }
-
+ (void)cap;
if(err)
*err = false;
return false;
}
-static void gsr_capture_kms_capture_end(gsr_capture *cap, AVFrame *frame) {
- (void)frame;
- gsr_capture_kms_cleanup_kms_fds(cap->priv);
-}
-
static gsr_source_color gsr_capture_kms_get_source_color(gsr_capture *cap) {
(void)cap;
return GSR_SOURCE_COLOR_RGB;
}
static bool gsr_capture_kms_uses_external_image(gsr_capture *cap) {
- gsr_capture_kms *cap_kms = cap->priv;
- return cap_kms->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA;
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_kms_set_hdr_metadata(gsr_capture *cap, AVMasteringDisplayMetadata *mastering_display_metadata, AVContentLightMetadata *light_metadata) {
+ gsr_capture_kms *self = cap->priv;
+
+ if(!self->hdr_metadata_set)
+ return false;
+
+ light_metadata->MaxCLL = self->hdr_metadata.hdmi_metadata_type1.max_cll;
+ light_metadata->MaxFALL = self->hdr_metadata.hdmi_metadata_type1.max_fall;
+
+ for(int i = 0; i < 3; ++i) {
+ mastering_display_metadata->display_primaries[i][0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].x, 50000);
+ mastering_display_metadata->display_primaries[i][1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.display_primaries[i].y, 50000);
+ }
+
+ mastering_display_metadata->white_point[0] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.x, 50000);
+ mastering_display_metadata->white_point[1] = av_make_q(self->hdr_metadata.hdmi_metadata_type1.white_point.y, 50000);
+
+ mastering_display_metadata->min_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.min_display_mastering_luminance, 10000);
+ mastering_display_metadata->max_luminance = av_make_q(self->hdr_metadata.hdmi_metadata_type1.max_display_mastering_luminance, 1);
+
+ mastering_display_metadata->has_primaries = mastering_display_metadata->display_primaries[0][0].num > 0;
+ mastering_display_metadata->has_luminance = mastering_display_metadata->max_luminance.num > 0;
+
+ return true;
}
+// static bool gsr_capture_kms_is_damaged(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// return self->damaged;
+// }
+
+// static void gsr_capture_kms_clear_damage(gsr_capture *cap) {
+// gsr_capture_kms *self = cap->priv;
+// self->damaged = false;
+// }
+
static void gsr_capture_kms_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
- gsr_capture_kms *cap_kms = cap->priv;
+ gsr_capture_kms *self = cap->priv;
if(cap->priv) {
- gsr_capture_kms_stop(cap_kms);
- free((void*)cap_kms->params.display_to_capture);
- cap_kms->params.display_to_capture = NULL;
+ gsr_capture_kms_stop(self);
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
free(cap->priv);
cap->priv = NULL;
}
@@ -518,12 +721,15 @@ gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params) {
*cap = (gsr_capture) {
.start = gsr_capture_kms_start,
- .tick = NULL,
+ .on_event = gsr_capture_kms_on_event,
+ //.tick = gsr_capture_kms_tick,
.should_stop = gsr_capture_kms_should_stop,
.capture = gsr_capture_kms_capture,
- .capture_end = gsr_capture_kms_capture_end,
.get_source_color = gsr_capture_kms_get_source_color,
.uses_external_image = gsr_capture_kms_uses_external_image,
+ .set_hdr_metadata = gsr_capture_kms_set_hdr_metadata,
+ //.is_damaged = gsr_capture_kms_is_damaged,
+ //.clear_damage = gsr_capture_kms_clear_damage,
.destroy = gsr_capture_kms_destroy,
.priv = cap_kms
};
diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c
index 9c7a041..ee77a20 100644
--- a/src/capture/nvfbc.c
+++ b/src/capture/nvfbc.c
@@ -1,6 +1,5 @@
#include "../../include/capture/nvfbc.h"
#include "../../external/NvFBC.h"
-#include "../../include/cuda.h"
#include "../../include/egl.h"
#include "../../include/utils.h"
#include "../../include/color_conversion.h"
@@ -24,13 +23,8 @@ typedef struct {
bool fbc_handle_created;
bool capture_session_created;
- gsr_cuda cuda;
- CUgraphicsResource cuda_graphics_resources[2];
- CUarray mapped_arrays[2];
- CUstream cuda_stream; // TODO: asdasdsa
NVFBC_TOGL_SETUP_PARAMS setup_params;
- bool direct_capture;
bool supports_direct_cursor;
bool capture_region;
uint32_t x, y, width, height;
@@ -108,7 +102,7 @@ static void set_func_ptr(void **dst, void *src) {
}
static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
dlerror(); /* clear */
void *lib = dlopen("libnvidia-fbc.so.1", RTLD_LAZY);
@@ -117,23 +111,23 @@ static bool gsr_capture_nvfbc_load_library(gsr_capture *cap) {
return false;
}
- set_func_ptr((void**)&cap_nvfbc->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
- if(!cap_nvfbc->nv_fbc_create_instance) {
+ set_func_ptr((void**)&self->nv_fbc_create_instance, dlsym(lib, "NvFBCCreateInstance"));
+ if(!self->nv_fbc_create_instance) {
fprintf(stderr, "gsr error: unable to resolve symbol 'NvFBCCreateInstance'\n");
dlclose(lib);
return false;
}
- memset(&cap_nvfbc->nv_fbc_function_list, 0, sizeof(cap_nvfbc->nv_fbc_function_list));
- cap_nvfbc->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_create_instance(&cap_nvfbc->nv_fbc_function_list);
+ memset(&self->nv_fbc_function_list, 0, sizeof(self->nv_fbc_function_list));
+ self->nv_fbc_function_list.dwVersion = NVFBC_VERSION;
+ NVFBCSTATUS status = self->nv_fbc_create_instance(&self->nv_fbc_function_list);
if(status != NVFBC_SUCCESS) {
fprintf(stderr, "gsr error: failed to create NvFBC instance (status: %d)\n", status);
dlclose(lib);
return false;
}
- cap_nvfbc->library = lib;
+ self->library = lib;
return true;
}
@@ -159,64 +153,64 @@ static void set_vertical_sync_enabled(gsr_egl *egl, int enabled) {
fprintf(stderr, "gsr warning: setting vertical sync failed\n");
}
-static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created && cap_nvfbc->capture_session_created) {
+static void gsr_capture_nvfbc_destroy_session(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created && self->capture_session_created) {
NVFBC_DESTROY_CAPTURE_SESSION_PARAMS destroy_capture_params;
memset(&destroy_capture_params, 0, sizeof(destroy_capture_params));
destroy_capture_params.dwVersion = NVFBC_DESTROY_CAPTURE_SESSION_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyCaptureSession(cap_nvfbc->nv_fbc_handle, &destroy_capture_params);
- cap_nvfbc->capture_session_created = false;
+ self->nv_fbc_function_list.nvFBCDestroyCaptureSession(self->nv_fbc_handle, &destroy_capture_params);
+ self->capture_session_created = false;
}
}
-static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *cap_nvfbc) {
- if(cap_nvfbc->fbc_handle_created) {
+static void gsr_capture_nvfbc_destroy_handle(gsr_capture_nvfbc *self) {
+ if(self->fbc_handle_created) {
NVFBC_DESTROY_HANDLE_PARAMS destroy_params;
memset(&destroy_params, 0, sizeof(destroy_params));
destroy_params.dwVersion = NVFBC_DESTROY_HANDLE_PARAMS_VER;
- cap_nvfbc->nv_fbc_function_list.nvFBCDestroyHandle(cap_nvfbc->nv_fbc_handle, &destroy_params);
- cap_nvfbc->fbc_handle_created = false;
- cap_nvfbc->nv_fbc_handle = 0;
+ self->nv_fbc_function_list.nvFBCDestroyHandle(self->nv_fbc_handle, &destroy_params);
+ self->fbc_handle_created = false;
+ self->nv_fbc_handle = 0;
}
}
-static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *cap_nvfbc) {
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
- gsr_capture_nvfbc_destroy_handle(cap_nvfbc);
+static void gsr_capture_nvfbc_destroy_session_and_handle(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session(self);
+ gsr_capture_nvfbc_destroy_handle(self);
}
-static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *self) {
NVFBCSTATUS status;
NVFBC_CREATE_HANDLE_PARAMS create_params;
memset(&create_params, 0, sizeof(create_params));
create_params.dwVersion = NVFBC_CREATE_HANDLE_PARAMS_VER;
create_params.bExternallyManagedContext = NVFBC_TRUE;
- create_params.glxCtx = cap_nvfbc->params.egl->glx_context;
- create_params.glxFBConfig = cap_nvfbc->params.egl->glx_fb_config;
+ create_params.glxCtx = self->params.egl->glx_context;
+ create_params.glxFBConfig = self->params.egl->glx_fb_config;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
// Reverse engineering for interoperability
const uint8_t enable_key[] = { 0xac, 0x10, 0xc9, 0x2e, 0xa5, 0xe6, 0x87, 0x4f, 0x8f, 0x4b, 0xf4, 0x61, 0xf8, 0x56, 0x27, 0xe9 };
create_params.privateData = enable_key;
create_params.privateDataSize = 16;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateHandle(&cap_nvfbc->nv_fbc_handle, &create_params);
+ status = self->nv_fbc_function_list.nvFBCCreateHandle(&self->nv_fbc_handle, &create_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
}
- cap_nvfbc->fbc_handle_created = true;
+ self->fbc_handle_created = true;
NVFBC_GET_STATUS_PARAMS status_params;
memset(&status_params, 0, sizeof(status_params));
status_params.dwVersion = NVFBC_GET_STATUS_PARAMS_VER;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCGetStatus(cap_nvfbc->nv_fbc_handle, &status_params);
+ status = self->nv_fbc_function_list.nvFBCGetStatus(self->nv_fbc_handle, &status_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
goto error_cleanup;
}
@@ -225,10 +219,10 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(cap_nvfbc->params.egl->x11.dpy));
- cap_nvfbc->tracking_type = strcmp(cap_nvfbc->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT) {
+ self->tracking_width = XWidthOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_height = XHeightOfScreen(DefaultScreenOfDisplay(self->params.egl->x11.dpy));
+ self->tracking_type = strcmp(self->params.display_to_capture, "screen") == 0 ? NVFBC_TRACKING_SCREEN : NVFBC_TRACKING_OUTPUT;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT) {
if(!status_params.bXRandRAvailable) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: the xrandr extension is not available\n");
goto error_cleanup;
@@ -239,9 +233,9 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
goto error_cleanup;
}
- cap_nvfbc->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, cap_nvfbc->params.display_to_capture, &cap_nvfbc->tracking_width, &cap_nvfbc->tracking_height);
- if(cap_nvfbc->output_id == 0) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", cap_nvfbc->params.display_to_capture);
+ self->output_id = get_output_id_from_display_name(status_params.outputs, status_params.dwOutputNum, self->params.display_to_capture, &self->tracking_width, &self->tracking_height);
+ if(self->output_id == 0) {
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: display '%s' not found\n", self->params.display_to_capture);
goto error_cleanup;
}
}
@@ -249,92 +243,83 @@ static int gsr_capture_nvfbc_setup_handle(gsr_capture_nvfbc *cap_nvfbc) {
return 0;
error_cleanup:
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
return -1;
}
-static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *cap_nvfbc) {
+static int gsr_capture_nvfbc_setup_session(gsr_capture_nvfbc *self) {
NVFBC_CREATE_CAPTURE_SESSION_PARAMS create_capture_params;
memset(&create_capture_params, 0, sizeof(create_capture_params));
create_capture_params.dwVersion = NVFBC_CREATE_CAPTURE_SESSION_PARAMS_VER;
create_capture_params.eCaptureType = NVFBC_CAPTURE_TO_GL;
- create_capture_params.bWithCursor = (!cap_nvfbc->direct_capture || cap_nvfbc->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
- if(!cap_nvfbc->params.record_cursor)
+ create_capture_params.bWithCursor = (!self->params.direct_capture || self->supports_direct_cursor) ? NVFBC_TRUE : NVFBC_FALSE;
+ if(!self->params.record_cursor)
create_capture_params.bWithCursor = false;
- if(cap_nvfbc->capture_region)
- create_capture_params.captureBox = (NVFBC_BOX){ cap_nvfbc->x, cap_nvfbc->y, cap_nvfbc->width, cap_nvfbc->height };
- create_capture_params.eTrackingType = cap_nvfbc->tracking_type;
- create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)cap_nvfbc->params.fps);
- create_capture_params.bAllowDirectCapture = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
- create_capture_params.bPushModel = cap_nvfbc->direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ if(self->capture_region)
+ create_capture_params.captureBox = (NVFBC_BOX){ self->x, self->y, self->width, self->height };
+ create_capture_params.eTrackingType = self->tracking_type;
+ create_capture_params.dwSamplingRateMs = (uint32_t)ceilf(1000.0f / (float)self->params.fps);
+ create_capture_params.bAllowDirectCapture = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
+ create_capture_params.bPushModel = self->params.direct_capture ? NVFBC_TRUE : NVFBC_FALSE;
create_capture_params.bDisableAutoModesetRecovery = true;
- if(cap_nvfbc->tracking_type == NVFBC_TRACKING_OUTPUT)
- create_capture_params.dwOutputId = cap_nvfbc->output_id;
+ if(self->tracking_type == NVFBC_TRACKING_OUTPUT)
+ create_capture_params.dwOutputId = self->output_id;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCCreateCaptureSession(cap_nvfbc->nv_fbc_handle, &create_capture_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCCreateCaptureSession(self->nv_fbc_handle, &create_capture_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
return -1;
}
- cap_nvfbc->capture_session_created = true;
+ self->capture_session_created = true;
- memset(&cap_nvfbc->setup_params, 0, sizeof(cap_nvfbc->setup_params));
- cap_nvfbc->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
- cap_nvfbc->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
+ memset(&self->setup_params, 0, sizeof(self->setup_params));
+ self->setup_params.dwVersion = NVFBC_TOGL_SETUP_PARAMS_VER;
+ self->setup_params.eBufferFormat = NVFBC_BUFFER_FORMAT_BGRA;
- status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLSetUp(cap_nvfbc->nv_fbc_handle, &cap_nvfbc->setup_params);
+ status = self->nv_fbc_function_list.nvFBCToGLSetUp(self->nv_fbc_handle, &self->setup_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle));
- gsr_capture_nvfbc_destroy_session(cap_nvfbc);
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_start failed: %s\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle));
+ gsr_capture_nvfbc_destroy_session(self);
return -1;
}
return 0;
}
-static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *cap_nvfbc) {
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
- gsr_cuda_unload(&cap_nvfbc->cuda);
- if(cap_nvfbc->library) {
- dlclose(cap_nvfbc->library);
- cap_nvfbc->library = NULL;
+static void gsr_capture_nvfbc_stop(gsr_capture_nvfbc *self) {
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
+ if(self->library) {
+ dlclose(self->library);
+ self->library = NULL;
}
- if(cap_nvfbc->params.display_to_capture) {
- free((void*)cap_nvfbc->params.display_to_capture);
- cap_nvfbc->params.display_to_capture = NULL;
+ if(self->params.display_to_capture) {
+ free((void*)self->params.display_to_capture);
+ self->params.display_to_capture = NULL;
}
}
static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
- if(!cap_nvfbc->params.use_software_video_encoder) {
- if(!gsr_cuda_load(&cap_nvfbc->cuda, cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.overclock))
- return -1;
- }
-
- if(!gsr_capture_nvfbc_load_library(cap)) {
- gsr_cuda_unload(&cap_nvfbc->cuda);
+ if(!gsr_capture_nvfbc_load_library(cap))
return -1;
- }
- cap_nvfbc->x = max_int(cap_nvfbc->params.pos.x, 0);
- cap_nvfbc->y = max_int(cap_nvfbc->params.pos.y, 0);
- cap_nvfbc->width = max_int(cap_nvfbc->params.size.x, 0);
- cap_nvfbc->height = max_int(cap_nvfbc->params.size.y, 0);
+ self->x = max_int(self->params.pos.x, 0);
+ self->y = max_int(self->params.pos.y, 0);
+ self->width = max_int(self->params.size.x, 0);
+ self->height = max_int(self->params.size.y, 0);
- cap_nvfbc->capture_region = (cap_nvfbc->x > 0 || cap_nvfbc->y > 0 || cap_nvfbc->width > 0 || cap_nvfbc->height > 0);
+ self->capture_region = (self->x > 0 || self->y > 0 || self->width > 0 || self->height > 0);
- cap_nvfbc->supports_direct_cursor = false;
- bool direct_capture = cap_nvfbc->params.direct_capture;
+ self->supports_direct_cursor = false;
int driver_major_version = 0;
int driver_minor_version = 0;
- if(direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
+ if(self->params.direct_capture && get_driver_version(&driver_major_version, &driver_minor_version)) {
fprintf(stderr, "Info: detected nvidia version: %d.%d\n", driver_major_version, driver_minor_version);
// TODO:
if(version_at_least(driver_major_version, driver_minor_version, 515, 57) && version_less_than(driver_major_version, driver_minor_version, 520, 56)) {
- direct_capture = false;
+ self->params.direct_capture = false;
fprintf(stderr, "Warning: \"screen-direct\" has temporary been disabled as it causes stuttering with driver versions >= 515.57 and < 520.56. Please update your driver if possible. Capturing \"screen\" instead.\n");
}
@@ -343,63 +328,63 @@ static int gsr_capture_nvfbc_start(gsr_capture *cap, AVCodecContext *video_codec
/*
if(direct_capture) {
if(version_at_least(driver_major_version, driver_minor_version, 515, 57))
- supports_direct_cursor = true;
+ self->supports_direct_cursor = true;
else
fprintf(stderr, "Info: capturing \"screen-direct\" but driver version appears to be less than 515.57. Disabling capture of cursor. Please update your driver if you want to capture your cursor or record \"screen\" instead.\n");
}
*/
}
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
goto error_cleanup;
}
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
goto error_cleanup;
}
- if(cap_nvfbc->capture_region) {
- video_codec_context->width = FFALIGN(cap_nvfbc->width, 2);
- video_codec_context->height = FFALIGN(cap_nvfbc->height, 2);
+ if(self->capture_region) {
+ video_codec_context->width = FFALIGN(self->width, 2);
+ video_codec_context->height = FFALIGN(self->height, 2);
} else {
- video_codec_context->width = FFALIGN(cap_nvfbc->tracking_width, 2);
- video_codec_context->height = FFALIGN(cap_nvfbc->tracking_height, 2);
+ video_codec_context->width = FFALIGN(self->tracking_width, 2);
+ video_codec_context->height = FFALIGN(self->tracking_height, 2);
}
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
/* Disable vsync */
- set_vertical_sync_enabled(cap_nvfbc->params.egl, 0);
+ set_vertical_sync_enabled(self->params.egl, 0);
return 0;
error_cleanup:
- gsr_capture_nvfbc_stop(cap_nvfbc);
+ gsr_capture_nvfbc_stop(self);
return -1;
}
static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
+ gsr_capture_nvfbc *self = cap->priv;
const double nvfbc_recreate_retry_time_seconds = 1.0;
- if(cap_nvfbc->nvfbc_needs_recreate) {
+ if(self->nvfbc_needs_recreate) {
const double now = clock_get_monotonic_seconds();
- if(now - cap_nvfbc->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
- cap_nvfbc->nvfbc_dead_start = now;
- gsr_capture_nvfbc_destroy_session_and_handle(cap_nvfbc);
+ if(now - self->nvfbc_dead_start >= nvfbc_recreate_retry_time_seconds) {
+ self->nvfbc_dead_start = now;
+ gsr_capture_nvfbc_destroy_session_and_handle(self);
- if(gsr_capture_nvfbc_setup_handle(cap_nvfbc) != 0) {
+ if(gsr_capture_nvfbc_setup_handle(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc handle, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
-
- if(gsr_capture_nvfbc_setup_session(cap_nvfbc) != 0) {
+
+ if(gsr_capture_nvfbc_setup_session(self) != 0) {
fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed to recreate nvfbc session, trying again in %f second(s)\n", nvfbc_recreate_retry_time_seconds);
return -1;
}
- cap_nvfbc->nvfbc_needs_recreate = false;
+ self->nvfbc_needs_recreate = false;
} else {
return 0;
}
@@ -415,23 +400,24 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
grab_params.pFrameGrabInfo = &frame_info;
grab_params.dwTimeoutMs = 0;
- NVFBCSTATUS status = cap_nvfbc->nv_fbc_function_list.nvFBCToGLGrabFrame(cap_nvfbc->nv_fbc_handle, &grab_params);
+ NVFBCSTATUS status = self->nv_fbc_function_list.nvFBCToGLGrabFrame(self->nv_fbc_handle, &grab_params);
if(status != NVFBC_SUCCESS) {
- fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", cap_nvfbc->nv_fbc_function_list.nvFBCGetLastErrorStr(cap_nvfbc->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
- cap_nvfbc->nvfbc_needs_recreate = true;
- cap_nvfbc->nvfbc_dead_start = clock_get_monotonic_seconds();
+ fprintf(stderr, "gsr error: gsr_capture_nvfbc_capture failed: %s (%d), recreating session after %f second(s)\n", self->nv_fbc_function_list.nvFBCGetLastErrorStr(self->nv_fbc_handle), status, nvfbc_recreate_retry_time_seconds);
+ self->nvfbc_needs_recreate = true;
+ self->nvfbc_dead_start = clock_get_monotonic_seconds();
return 0;
}
- //cap_nvfbc->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- cap_nvfbc->params.egl->glClear(0);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
- gsr_color_conversion_draw(color_conversion, cap_nvfbc->setup_params.dwTextures[grab_params.dwTextureIndex],
+ gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
(vec2i){0, 0}, (vec2i){frame->width, frame->height},
0.0f, false);
- cap_nvfbc->params.egl->glXSwapBuffers(cap_nvfbc->params.egl->x11.dpy, cap_nvfbc->params.egl->x11.window);
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
@@ -443,8 +429,9 @@ static gsr_source_color gsr_capture_nvfbc_get_source_color(gsr_capture *cap) {
static void gsr_capture_nvfbc_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
- gsr_capture_nvfbc *cap_nvfbc = cap->priv;
- gsr_capture_nvfbc_stop(cap_nvfbc);
+ gsr_capture_nvfbc *self = cap->priv;
+ gsr_capture_nvfbc_stop(self);
+ free(cap->priv);
free(cap);
}
@@ -479,13 +466,12 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
cap_nvfbc->params = *params;
cap_nvfbc->params.display_to_capture = display_to_capture;
cap_nvfbc->params.fps = max_int(cap_nvfbc->params.fps, 1);
-
+
*cap = (gsr_capture) {
.start = gsr_capture_nvfbc_start,
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_nvfbc_capture,
- .capture_end = NULL,
.get_source_color = gsr_capture_nvfbc_get_source_color,
.uses_external_image = NULL,
.destroy = gsr_capture_nvfbc_destroy,
diff --git a/src/capture/portal.c b/src/capture/portal.c
new file mode 100644
index 0000000..9ab7e8b
--- /dev/null
+++ b/src/capture/portal.c
@@ -0,0 +1,458 @@
+#include "../../include/capture/portal.h"
+#include "../../include/color_conversion.h"
+#include "../../include/egl.h"
+#include "../../include/utils.h"
+#include "../../include/dbus.h"
+#include "../../include/pipewire.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+
+#include <libavcodec/avcodec.h>
+
+typedef struct {
+ gsr_capture_portal_params params;
+
+ gsr_texture_map texture_map;
+
+ gsr_dbus dbus;
+ char *session_handle;
+
+ gsr_pipewire pipewire;
+ vec2i capture_size;
+ gsr_pipewire_dmabuf_data dmabuf_data[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ int num_dmabuf_data;
+
+ AVCodecContext *video_codec_context;
+ bool fast_path_failed;
+} gsr_capture_portal;
+
+static void gsr_capture_portal_cleanup_plane_fds(gsr_capture_portal *self) {
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = 0;
+ }
+ }
+ self->num_dmabuf_data = 0;
+}
+
+static void gsr_capture_portal_stop(gsr_capture_portal *self) {
+ if(self->texture_map.texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.texture_id);
+ self->texture_map.texture_id = 0;
+ }
+
+ if(self->texture_map.external_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.external_texture_id);
+ self->texture_map.external_texture_id = 0;
+ }
+
+ if(self->texture_map.cursor_texture_id) {
+ self->params.egl->glDeleteTextures(1, &self->texture_map.cursor_texture_id);
+ self->texture_map.cursor_texture_id = 0;
+ }
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ gsr_pipewire_deinit(&self->pipewire);
+
+ if(self->session_handle) {
+ free(self->session_handle);
+ self->session_handle = NULL;
+ }
+
+ gsr_dbus_deinit(&self->dbus);
+}
+
+static void gsr_capture_portal_create_input_textures(gsr_capture_portal *self) {
+ self->params.egl->glGenTextures(1, &self->texture_map.texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.external_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, self->texture_map.external_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
+
+ self->params.egl->glGenTextures(1, &self->texture_map.cursor_texture_id);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->texture_map.cursor_texture_id);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+}
+
+static void get_default_gpu_screen_recorder_restore_token_path(char *buffer, size_t buffer_size) {
+ const char *xdg_config_home = getenv("XDG_CONFIG_HOME");
+ if(xdg_config_home) {
+ snprintf(buffer, buffer_size, "%s/gpu-screen-recorder/restore_token", xdg_config_home);
+ } else {
+ const char *home = getenv("HOME");
+ if(!home)
+ home = "/tmp";
+ snprintf(buffer, buffer_size, "%s/.config/gpu-screen-recorder/restore_token", home);
+ }
+}
+
+static bool create_directory_to_file(const char *filepath) {
+ char dir[PATH_MAX];
+ dir[0] = '\0';
+
+ const char *split = strrchr(filepath, '/');
+ if(!split) /* Assuming it's the current directory (for example if filepath is "restore_token"), which doesn't need to be created */
+ return true;
+
+ snprintf(dir, sizeof(dir), "%.*s", (int)(split - filepath), filepath);
+ if(create_directory_recursive(dir) != 0) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create directory (%s) for restore token\n", dir);
+ return false;
+ }
+ return true;
+}
+
+static void gsr_capture_portal_save_restore_token(const char *restore_token, const char *portal_session_token_filepath) {
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ if(!create_directory_to_file(restore_token_path))
+ return;
+
+ FILE *f = fopen(restore_token_path, "wb");
+ if(!f) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to create restore token file (%s)\n", restore_token_path);
+ return;
+ }
+
+ const int restore_token_len = strlen(restore_token);
+ if((long)fwrite(restore_token, 1, restore_token_len, f) != restore_token_len) {
+ fprintf(stderr, "gsr warning: gsr_capture_portal_save_restore_token: failed to write restore token to file (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_save_restore_token: saved restore token to cache (%s)\n", restore_token);
+ fclose(f);
+}
+
+static void gsr_capture_portal_get_restore_token_from_cache(char *buffer, size_t buffer_size, const char *portal_session_token_filepath) {
+ assert(buffer_size > 0);
+ buffer[0] = '\0';
+
+ char restore_token_path[PATH_MAX];
+ restore_token_path[0] = '\0';
+ if(portal_session_token_filepath)
+ snprintf(restore_token_path, sizeof(restore_token_path), "%s", portal_session_token_filepath);
+ else
+ get_default_gpu_screen_recorder_restore_token_path(restore_token_path, sizeof(restore_token_path));
+
+ FILE *f = fopen(restore_token_path, "rb");
+ if(!f) {
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: no restore token found in cache or failed to load (%s)\n", restore_token_path);
+ return;
+ }
+
+ fseek(f, 0, SEEK_END);
+ long file_size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ if(file_size > 0 && file_size < 1024 && file_size < (long)buffer_size && (long)fread(buffer, 1, file_size, f) != file_size) {
+ buffer[0] = '\0';
+ fprintf(stderr, "gsr warning: gsr_capture_portal_get_restore_token_from_cache: failed to read restore token (%s)\n", restore_token_path);
+ fclose(f);
+ return;
+ }
+
+ if(file_size > 0 && file_size < (long)buffer_size)
+ buffer[file_size] = '\0';
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_get_restore_token_from_cache: read cached restore token (%s)\n", buffer);
+ fclose(f);
+}
+
+static int gsr_capture_portal_setup_dbus(gsr_capture_portal *self, int *pipewire_fd, uint32_t *pipewire_node) {
+ *pipewire_fd = 0;
+ *pipewire_node = 0;
+ int response_status = 0;
+
+ char restore_token[1024];
+ restore_token[0] = '\0';
+ if(self->params.restore_portal_session)
+ gsr_capture_portal_get_restore_token_from_cache(restore_token, sizeof(restore_token), self->params.portal_session_token_filepath);
+
+ if(!gsr_dbus_init(&self->dbus, restore_token))
+ return -1;
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: CreateSession\n");
+ response_status = gsr_dbus_screencast_create_session(&self->dbus, &self->session_handle);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: CreateSession failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: SelectSources\n");
+ response_status = gsr_dbus_screencast_select_sources(&self->dbus, self->session_handle, GSR_PORTAL_CAPTURE_TYPE_ALL, self->params.record_cursor ? GSR_PORTAL_CURSOR_MODE_EMBEDDED : GSR_PORTAL_CURSOR_MODE_HIDDEN);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: SelectSources failed\n");
+ return response_status;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: Start\n");
+ response_status = gsr_dbus_screencast_start(&self->dbus, self->session_handle, pipewire_node);
+ if(response_status != 0) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: Start failed\n");
+ return response_status;
+ }
+
+ const char *screencast_restore_token = gsr_dbus_screencast_get_restore_token(&self->dbus);
+ if(screencast_restore_token)
+ gsr_capture_portal_save_restore_token(screencast_restore_token, self->params.portal_session_token_filepath);
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: OpenPipeWireRemote\n");
+ if(!gsr_dbus_screencast_open_pipewire_remote(&self->dbus, self->session_handle, pipewire_fd)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: OpenPipeWireRemote failed\n");
+ return -1;
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_setup_dbus: desktop portal setup finished\n");
+ return 0;
+}
+
+static bool gsr_capture_portal_get_frame_dimensions(gsr_capture_portal *self) {
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: waiting for pipewire negotiation\n");
+
+ const double start_time = clock_get_monotonic_seconds();
+ while(clock_get_monotonic_seconds() - start_time < 5.0) {
+ bool uses_external_image = false;
+ uint32_t fourcc = 0;
+ uint64_t modifiers = 0;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &fourcc, &modifiers, &uses_external_image)) {
+ gsr_capture_portal_cleanup_plane_fds(self);
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire negotiation finished\n");
+ return true;
+ }
+ usleep(30 * 1000); /* 30 milliseconds */
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: timed out waiting for pipewire negotiation (5 seconds)\n");
+ return false;
+}
+
+static int gsr_capture_portal_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_capture_portal *self = cap->priv;
+
+ gsr_capture_portal_create_input_textures(self);
+
+ int pipewire_fd = 0;
+ uint32_t pipewire_node = 0;
+ const int response_status = gsr_capture_portal_setup_dbus(self, &pipewire_fd, &pipewire_node);
+ if(response_status != 0) {
+ gsr_capture_portal_stop(self);
+ // Response status values:
+ // 0: Success, the request is carried out
+ // 1: The user cancelled the interaction
+ // 2: The user interaction was ended in some other way
+ // Response status value 2 happens usually if there was some kind of error in the desktop portal on the system
+ if(response_status == 2) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. Either you Wayland compositor doesn't support desktop portal capture or it's incorrectly setup on your system\n");
+ return 50;
+ } else if(response_status == 1) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: desktop portal capture failed. It seems like desktop portal capture was canceled by the user.\n");
+ return 60;
+ } else {
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: setting up pipewire\n");
+ /* TODO: support hdr when pipewire supports it */
+ /* gsr_pipewire closes the pipewire fd, even on failure */
+ if(!gsr_pipewire_init(&self->pipewire, pipewire_fd, pipewire_node, video_codec_context->framerate.num, self->params.record_cursor, self->params.egl)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_start: failed to setup pipewire with fd: %d, node: %" PRIu32 "\n", pipewire_fd, pipewire_node);
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+ fprintf(stderr, "gsr info: gsr_capture_portal_start: pipewire setup finished\n");
+
+ if(!gsr_capture_portal_get_frame_dimensions(self)) {
+ gsr_capture_portal_stop(self);
+ return -1;
+ }
+
+ /* Disable vsync */
+ self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
+
+ video_codec_context->width = FFALIGN(self->capture_size.x, 2);
+ video_codec_context->height = FFALIGN(self->capture_size.y, 2);
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ self->video_codec_context = video_codec_context;
+ return 0;
+}
+
+static int max_int(int a, int b) {
+ return a > b ? a : b;
+}
+
+static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ (void)frame;
+ (void)color_conversion;
+ gsr_capture_portal *self = cap->priv;
+
+ /* TODO: Handle formats other than RGB(a) */
+ gsr_pipewire_region region = {0, 0, 0, 0};
+ gsr_pipewire_region cursor_region = {0, 0, 0, 0};
+ uint32_t pipewire_fourcc = 0;
+ uint64_t pipewire_modifiers = 0;
+ bool using_external_image = false;
+ if(gsr_pipewire_map_texture(&self->pipewire, self->texture_map, &region, &cursor_region, self->dmabuf_data, &self->num_dmabuf_data, &pipewire_fourcc, &pipewire_modifiers, &using_external_image)) {
+ if(region.width != self->capture_size.x || region.height != self->capture_size.y) {
+ self->capture_size.x = region.width;
+ self->capture_size.y = region.height;
+ gsr_color_conversion_clear(color_conversion);
+ }
+ } else {
+ return 0;
+ }
+
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->capture_size.x / 2), max_int(0, frame->height / 2 - self->capture_size.y / 2) };
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ // TODO: Handle region crop
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(int i = 0; i < self->num_dmabuf_data; ++i) {
+ fds[i] = self->dmabuf_data[i].fd;
+ offsets[i] = self->dmabuf_data[i].offset;
+ pitches[i] = self->dmabuf_data[i].stride;
+ modifiers[i] = pipewire_modifiers;
+ }
+ if(!vaapi_copy_drm_planes_to_video_surface(self->video_codec_context, frame, (vec2i){region.x, region.y}, self->capture_size, target_pos, self->capture_size, pipewire_fourcc, self->capture_size, fds, offsets, pitches, modifiers, self->num_dmabuf_data)) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_capture: vaapi_copy_drm_planes_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
+
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
+ target_pos, self->capture_size,
+ (vec2i){region.x, region.y}, self->capture_size,
+ 0.0f, using_external_image);
+ }
+
+ if(self->params.record_cursor) {
+ const vec2i cursor_pos = {
+ target_pos.x + cursor_region.x,
+ target_pos.y + cursor_region.y
+ };
+
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->capture_size.x, self->capture_size.y);
+ gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
+ (vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width, cursor_region.height},
+ (vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
+ 0.0f, false);
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
+ }
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ gsr_capture_portal_cleanup_plane_fds(self);
+
+ return 0;
+}
+
+static gsr_source_color gsr_capture_portal_get_source_color(gsr_capture *cap) {
+ (void)cap;
+ return GSR_SOURCE_COLOR_RGB;
+}
+
+static bool gsr_capture_portal_uses_external_image(gsr_capture *cap) {
+ (void)cap;
+ return true;
+}
+
+static bool gsr_capture_portal_is_damaged(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ return gsr_pipewire_is_damaged(&self->pipewire);
+}
+
+static void gsr_capture_portal_clear_damage(gsr_capture *cap) {
+ gsr_capture_portal *self = cap->priv;
+ gsr_pipewire_clear_damage(&self->pipewire);
+}
+
+static void gsr_capture_portal_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
+ (void)video_codec_context;
+ gsr_capture_portal *self = cap->priv;
+ if(cap->priv) {
+ gsr_capture_portal_stop(self);
+ free(cap->priv);
+ cap->priv = NULL;
+ }
+ free(cap);
+}
+
+gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params) {
+ if(!params) {
+ fprintf(stderr, "gsr error: gsr_capture_portal_create params is NULL\n");
+ return NULL;
+ }
+
+ gsr_capture *cap = calloc(1, sizeof(gsr_capture));
+ if(!cap)
+ return NULL;
+
+ gsr_capture_portal *cap_portal = calloc(1, sizeof(gsr_capture_portal));
+ if(!cap_portal) {
+ free(cap);
+ return NULL;
+ }
+
+ cap_portal->params = *params;
+
+ *cap = (gsr_capture) {
+ .start = gsr_capture_portal_start,
+ .tick = NULL,
+ .should_stop = NULL,
+ .capture = gsr_capture_portal_capture,
+ .get_source_color = gsr_capture_portal_get_source_color,
+ .uses_external_image = gsr_capture_portal_uses_external_image,
+ .is_damaged = gsr_capture_portal_is_damaged,
+ .clear_damage = gsr_capture_portal_clear_damage,
+ .destroy = gsr_capture_portal_destroy,
+ .priv = cap_portal
+ };
+
+ return cap;
+}
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
index f5d2b2f..9e208d6 100644
--- a/src/capture/xcomposite.c
+++ b/src/capture/xcomposite.c
@@ -10,19 +10,18 @@
#include <assert.h>
#include <X11/Xlib.h>
-#include <X11/extensions/Xdamage.h>
#include <libavutil/frame.h>
#include <libavcodec/avcodec.h>
typedef struct {
gsr_capture_xcomposite_params params;
- XEvent xev;
bool should_stop;
bool stop_is_error;
bool window_resized;
bool follow_focused_initialized;
+ bool init_new_window;
Window window;
vec2i window_size;
@@ -30,25 +29,17 @@ typedef struct {
double window_resize_timer;
WindowTexture window_texture;
+ AVCodecContext *video_codec_context;
Atom net_active_window_atom;
gsr_cursor cursor;
- int damage_event;
- int damage_error;
- XID damage;
- bool damaged;
-
bool clear_background;
+ bool fast_path_failed;
} gsr_capture_xcomposite;
static void gsr_capture_xcomposite_stop(gsr_capture_xcomposite *self) {
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
window_texture_deinit(&self->window_texture);
gsr_cursor_deinit(&self->cursor);
}
@@ -71,23 +62,6 @@ static Window get_focused_window(Display *display, Atom net_active_window_atom)
return None;
}
-static void gsr_capture_xcomposite_setup_damage(gsr_capture_xcomposite *self, Window window) {
- if(self->damage_event == 0)
- return;
-
- if(self->damage) {
- XDamageDestroy(self->params.egl->x11.dpy, self->damage);
- self->damage = None;
- }
-
- self->damage = XDamageCreate(self->params.egl->x11.dpy, window, XDamageReportNonEmpty);
- if(self->damage) {
- XDamageSubtract(self->params.egl->x11.dpy, self->damage, None, None);
- } else {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_setup_damage: XDamageCreate failed\n");
- }
-}
-
static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_codec_context, AVFrame *frame) {
gsr_capture_xcomposite *self = cap->priv;
@@ -102,20 +76,6 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
self->window = self->params.window;
}
- if(self->params.track_damage) {
- if(!XDamageQueryExtension(self->params.egl->x11.dpy, &self->damage_event, &self->damage_error)) {
- fprintf(stderr, "gsr warning: gsr_capture_xcomposite_start: XDamage is not supported by your X11 server\n");
- self->damage_event = 0;
- self->damage_error = 0;
- }
- } else {
- self->damage_event = 0;
- self->damage_error = 0;
- }
-
- self->damaged = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
-
/* TODO: Do these in tick, and allow error if follow_focused */
XWindowAttributes attr;
@@ -133,16 +93,6 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
// TODO: Get select and add these on top of it and then restore at the end. Also do the same in other xcomposite
XSelectInput(self->params.egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
- if(!self->params.egl->eglExportDMABUFImageQueryMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageQueryMESA\n");
- return -1;
- }
-
- if(!self->params.egl->eglExportDMABUFImageMESA) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: could not find eglExportDMABUFImageMESA\n");
- return -1;
- }
-
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
if(window_texture_init(&self->window_texture, self->params.egl->x11.dpy, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
@@ -174,75 +124,20 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
+ self->video_codec_context = video_codec_context;
self->window_resize_timer = clock_get_monotonic_seconds();
return 0;
}
-static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_codec_context) {
- (void)video_codec_context;
+static void gsr_capture_xcomposite_tick(gsr_capture *cap) {
gsr_capture_xcomposite *self = cap->priv;
- bool init_new_window = false;
- while(XPending(self->params.egl->x11.dpy)) {
- XNextEvent(self->params.egl->x11.dpy, &self->xev);
-
- switch(self->xev.type) {
- case DestroyNotify: {
- /* Window died (when not following focused window), so we stop recording */
- if(!self->params.follow_focused && self->xev.xdestroywindow.window == self->window) {
- self->should_stop = true;
- self->stop_is_error = false;
- }
- break;
- }
- case Expose: {
- /* Requires window texture recreate */
- if(self->xev.xexpose.count == 0 && self->xev.xexpose.window == self->window) {
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case ConfigureNotify: {
- /* Window resized */
- if(self->xev.xconfigure.window == self->window && (self->xev.xconfigure.width != self->window_size.x || self->xev.xconfigure.height != self->window_size.y)) {
- self->window_size.x = max_int(self->xev.xconfigure.width, 0);
- self->window_size.y = max_int(self->xev.xconfigure.height, 0);
- self->window_resize_timer = clock_get_monotonic_seconds();
- self->window_resized = true;
- }
- break;
- }
- case PropertyNotify: {
- /* Focused window changed */
- if(self->params.follow_focused && self->xev.xproperty.atom == self->net_active_window_atom) {
- init_new_window = true;
- }
- break;
- }
- }
-
- if(self->damage_event && self->xev.type == self->damage_event + XDamageNotify) {
- XDamageNotifyEvent *de = (XDamageNotifyEvent*)&self->xev;
- XserverRegion region = XFixesCreateRegion(self->params.egl->x11.dpy, NULL, 0);
- // Subtract all the damage, repairing the window
- XDamageSubtract(self->params.egl->x11.dpy, de->damage, None, region);
- XFixesDestroyRegion(self->params.egl->x11.dpy, region);
- self->damaged = true;
- }
-
- if(gsr_cursor_update(&self->cursor, &self->xev)) {
- if(self->params.record_cursor && self->cursor.visible) {
- self->damaged = true;
- }
- }
- }
-
if(self->params.follow_focused && !self->follow_focused_initialized) {
- init_new_window = true;
+ self->init_new_window = true;
}
- if(init_new_window) {
+ if(self->init_new_window) {
+ self->init_new_window = false;
Window focused_window = get_focused_window(self->params.egl->x11.dpy, self->net_active_window_atom);
if(focused_window != self->window || !self->follow_focused_initialized) {
self->follow_focused_initialized = true;
@@ -272,7 +167,6 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
self->window_resized = false;
self->clear_background = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
}
}
@@ -296,18 +190,49 @@ static void gsr_capture_xcomposite_tick(gsr_capture *cap, AVCodecContext *video_
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
self->clear_background = true;
- gsr_capture_xcomposite_setup_damage(self, self->window);
}
}
-static bool gsr_capture_xcomposite_is_damaged(gsr_capture *cap) {
+static void gsr_capture_xcomposite_on_event(gsr_capture *cap, gsr_egl *egl) {
gsr_capture_xcomposite *self = cap->priv;
- return self->damage_event ? self->damaged : true;
-}
+ XEvent *xev = gsr_egl_get_event_data(egl);
+ switch(xev->type) {
+ case DestroyNotify: {
+ /* Window died (when not following focused window), so we stop recording */
+ if(!self->params.follow_focused && xev->xdestroywindow.window == self->window) {
+ self->should_stop = true;
+ self->stop_is_error = false;
+ }
+ break;
+ }
+ case Expose: {
+ /* Requires window texture recreate */
+ if(xev->xexpose.count == 0 && xev->xexpose.window == self->window) {
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case ConfigureNotify: {
+ /* Window resized */
+ if(xev->xconfigure.window == self->window && (xev->xconfigure.width != self->window_size.x || xev->xconfigure.height != self->window_size.y)) {
+ self->window_size.x = max_int(xev->xconfigure.width, 0);
+ self->window_size.y = max_int(xev->xconfigure.height, 0);
+ self->window_resize_timer = clock_get_monotonic_seconds();
+ self->window_resized = true;
+ }
+ break;
+ }
+ case PropertyNotify: {
+ /* Focused window changed */
+ if(self->params.follow_focused && xev->xproperty.atom == self->net_active_window_atom) {
+ self->init_new_window = true;
+ }
+ break;
+ }
+ }
-static void gsr_capture_xcomposite_clear_damage(gsr_capture *cap) {
- gsr_capture_xcomposite *self = cap->priv;
- self->damaged = false;
+ gsr_cursor_on_event(&self->cursor, xev);
}
static bool gsr_capture_xcomposite_should_stop(gsr_capture *cap, bool *err) {
@@ -327,55 +252,54 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
gsr_capture_xcomposite *self = cap->priv;
(void)frame;
- //self->params.egl->glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
- self->params.egl->glClear(0);
-
if(self->clear_background) {
self->clear_background = false;
gsr_color_conversion_clear(color_conversion);
}
- const int target_x = max_int(0, frame->width / 2 - self->texture_size.x / 2);
- const int target_y = max_int(0, frame->height / 2 - self->texture_size.y / 2);
+ const vec2i target_pos = { max_int(0, frame->width / 2 - self->texture_size.x / 2), max_int(0, frame->height / 2 - self->texture_size.y / 2) };
- const vec2i cursor_pos = {
- target_x + self->cursor.position.x - self->cursor.hotspot.x,
- target_y + self->cursor.position.y - self->cursor.hotspot.y
- };
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
+
+ /* Fast opengl free path */
+ if(!self->fast_path_failed && video_codec_context_is_vaapi(self->video_codec_context) && self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD) {
+ if(!vaapi_copy_egl_image_to_video_surface(self->params.egl, self->window_texture.image, (vec2i){0, 0}, self->texture_size, target_pos, self->texture_size, self->video_codec_context, frame)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_capture: vaapi_copy_egl_image_to_video_surface failed, falling back to opengl copy. Please report this as an issue at https://github.com/dec05eba/gpu-screen-recorder-issues\n");
+ self->fast_path_failed = true;
+ }
+ } else {
+ self->fast_path_failed = true;
+ }
- gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
- (vec2i){target_x, target_y}, self->texture_size,
- (vec2i){0, 0}, self->texture_size,
- 0.0f, false);
+ if(self->fast_path_failed) {
+ gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
+ target_pos, self->texture_size,
+ (vec2i){0, 0}, self->texture_size,
+ 0.0f, false);
+ }
if(self->params.record_cursor && self->cursor.visible) {
gsr_cursor_tick(&self->cursor, self->window);
- const bool cursor_inside_window =
- cursor_pos.x + self->cursor.size.x >= target_x &&
- cursor_pos.x <= target_x + self->texture_size.x &&
- cursor_pos.y + self->cursor.size.y >= target_y &&
- cursor_pos.y <= target_y + self->texture_size.y;
+ const vec2i cursor_pos = {
+ target_pos.x + self->cursor.position.x - self->cursor.hotspot.x,
+ target_pos.y + self->cursor.position.y - self->cursor.hotspot.y
+ };
- if(cursor_inside_window) {
- self->params.egl->glEnable(GL_SCISSOR_TEST);
- self->params.egl->glScissor(target_x, target_y, self->texture_size.x, self->texture_size.y);
+ self->params.egl->glEnable(GL_SCISSOR_TEST);
+ self->params.egl->glScissor(target_pos.x, target_pos.y, self->texture_size.x, self->texture_size.y);
- gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
- cursor_pos, self->cursor.size,
- (vec2i){0, 0}, self->cursor.size,
- 0.0f, false);
+ gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
+ cursor_pos, self->cursor.size,
+ (vec2i){0, 0}, self->cursor.size,
+ 0.0f, false);
- self->params.egl->glDisable(GL_SCISSOR_TEST);
- }
+ self->params.egl->glDisable(GL_SCISSOR_TEST);
}
- self->params.egl->eglSwapBuffers(self->params.egl->egl_display, self->params.egl->egl_surface);
-
- // TODO: Do video encoder specific conversion here
-
- //self->params.egl->glFlush();
- //self->params.egl->glFinish();
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
return 0;
}
@@ -385,6 +309,11 @@ static gsr_source_color gsr_capture_xcomposite_get_source_color(gsr_capture *cap
return GSR_SOURCE_COLOR_RGB;
}
+static uint64_t gsr_capture_xcomposite_get_window_id(gsr_capture *cap) {
+ gsr_capture_xcomposite *self = cap->priv;
+ return self->window;
+}
+
static void gsr_capture_xcomposite_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
if(cap->priv) {
@@ -415,14 +344,13 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
*cap = (gsr_capture) {
.start = gsr_capture_xcomposite_start,
+ .on_event = gsr_capture_xcomposite_on_event,
.tick = gsr_capture_xcomposite_tick,
- .is_damaged = gsr_capture_xcomposite_is_damaged,
- .clear_damage = gsr_capture_xcomposite_clear_damage,
.should_stop = gsr_capture_xcomposite_should_stop,
.capture = gsr_capture_xcomposite_capture,
- .capture_end = NULL,
.get_source_color = gsr_capture_xcomposite_get_source_color,
.uses_external_image = NULL,
+ .get_window_id = gsr_capture_xcomposite_get_window_id,
.destroy = gsr_capture_xcomposite_destroy,
.priv = cap_xcomp
};
diff --git a/src/codec_query/nvenc.c b/src/codec_query/nvenc.c
new file mode 100644
index 0000000..0501851
--- /dev/null
+++ b/src/codec_query/nvenc.c
@@ -0,0 +1,235 @@
+#include "../../include/codec_query/nvenc.h"
+#include "../../include/cuda.h"
+#include "../../external/nvEncodeAPI.h"
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include <string.h>
+
+static void* open_nvenc_library(void) {
+ dlerror(); /* clear */
+ void *lib = dlopen("libnvidia-encode.so.1", RTLD_LAZY);
+ if(!lib) {
+ lib = dlopen("libnvidia-encode.so", RTLD_LAZY);
+ if(!lib) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc failed: failed to load libnvidia-encode.so/libnvidia-encode.so.1, error: %s\n", dlerror());
+ return NULL;
+ }
+ }
+ return lib;
+}
+
+static bool profile_is_h264(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_H264_PROFILE_BASELINE_GUID,
+ &NV_ENC_H264_PROFILE_MAIN_GUID,
+ &NV_ENC_H264_PROFILE_HIGH_GUID,
+ &NV_ENC_H264_PROFILE_PROGRESSIVE_HIGH_GUID,
+ &NV_ENC_H264_PROFILE_CONSTRAINED_HIGH_GUID
+ };
+
+ for(int i = 0; i < 5; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_hevc(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_HEVC_PROFILE_MAIN_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_hevc_10bit(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_HEVC_PROFILE_MAIN10_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool profile_is_av1(const GUID *profile_guid) {
+ const GUID *h264_guids[] = {
+ &NV_ENC_AV1_PROFILE_MAIN_GUID,
+ };
+
+ for(int i = 0; i < 1; ++i) {
+ if(memcmp(profile_guid, h264_guids[i], sizeof(GUID)) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static bool encoder_get_supported_profiles(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, const GUID *encoder_guid, gsr_supported_video_codecs *supported_video_codecs) {
+ bool success = false;
+ GUID *profile_guids = NULL;
+
+ uint32_t profile_guid_count = 0;
+ if(function_list->nvEncGetEncodeProfileGUIDCount(nvenc_encoder, *encoder_guid, &profile_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ if(profile_guid_count == 0)
+ goto fail;
+
+ profile_guids = calloc(profile_guid_count, sizeof(GUID));
+ if(!profile_guids) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)profile_guid_count);
+ goto fail;
+ }
+
+ if(function_list->nvEncGetEncodeProfileGUIDs(nvenc_encoder, *encoder_guid, profile_guids, profile_guid_count, &profile_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeProfileGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ for(uint32_t i = 0; i < profile_guid_count; ++i) {
+ if(profile_is_h264(&profile_guids[i])) {
+ supported_video_codecs->h264 = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_hevc(&profile_guids[i])) {
+ supported_video_codecs->hevc = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_hevc_10bit(&profile_guids[i])) {
+ supported_video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, false };
+ } else if(profile_is_av1(&profile_guids[i])) {
+ supported_video_codecs->av1 = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->av1_hdr = (gsr_supported_video_codec){ true, false };
+ supported_video_codecs->av1_10bit = (gsr_supported_video_codec){ true, false };
+ }
+ }
+
+ success = true;
+ fail:
+
+ if(profile_guids)
+ free(profile_guids);
+
+ return success;
+}
+
+static bool get_supported_video_codecs(const NV_ENCODE_API_FUNCTION_LIST *function_list, void *nvenc_encoder, gsr_supported_video_codecs *supported_video_codecs) {
+ bool success = false;
+ GUID *encoder_guids = NULL;
+ *supported_video_codecs = (gsr_supported_video_codecs){0};
+
+ uint32_t encode_guid_count = 0;
+ if(function_list->nvEncGetEncodeGUIDCount(nvenc_encoder, &encode_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDCount failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ if(encode_guid_count == 0)
+ goto fail;
+
+ encoder_guids = calloc(encode_guid_count, sizeof(GUID));
+ if(!encoder_guids) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to allocate %d guids\n", (int)encode_guid_count);
+ goto fail;
+ }
+
+ if(function_list->nvEncGetEncodeGUIDs(nvenc_encoder, encoder_guids, encode_guid_count, &encode_guid_count) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncGetEncodeGUIDs failed, error: %s\n", function_list->nvEncGetLastErrorString(nvenc_encoder));
+ goto fail;
+ }
+
+ for(uint32_t i = 0; i < encode_guid_count; ++i) {
+ encoder_get_supported_profiles(function_list, nvenc_encoder, &encoder_guids[i], supported_video_codecs);
+ }
+
+ success = true;
+ fail:
+
+ if(encoder_guids)
+ free(encoder_guids);
+
+ return success;
+}
+
+#define NVENCAPI_VERSION_470 (11 | (1 << 24))
+#define NVENCAPI_STRUCT_VERSION_470(ver) ((uint32_t)NVENCAPI_VERSION_470 | ((ver)<<16) | (0x7 << 28))
+
+bool gsr_get_supported_video_codecs_nvenc(gsr_supported_video_codecs *video_codecs, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+
+ bool success = false;
+ void *nvenc_lib = NULL;
+ void *nvenc_encoder = NULL;
+ gsr_cuda cuda;
+ memset(&cuda, 0, sizeof(cuda));
+
+ if(!gsr_cuda_load(&cuda, NULL, false)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to load cuda\n");
+ goto done;
+ }
+
+ nvenc_lib = open_nvenc_library();
+ if(!nvenc_lib)
+ goto done;
+
+ typedef NVENCSTATUS NVENCAPI (*FUNC_NvEncodeAPICreateInstance)(NV_ENCODE_API_FUNCTION_LIST *functionList);
+ FUNC_NvEncodeAPICreateInstance nvEncodeAPICreateInstance = (FUNC_NvEncodeAPICreateInstance)dlsym(nvenc_lib, "NvEncodeAPICreateInstance");
+ if(!nvEncodeAPICreateInstance) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: failed to find NvEncodeAPICreateInstance in libnvidia-encode.so\n");
+ goto done;
+ }
+
+ NV_ENCODE_API_FUNCTION_LIST function_list;
+ memset(&function_list, 0, sizeof(function_list));
+ function_list.version = NVENCAPI_STRUCT_VERSION(2);
+ if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance failed\n");
+ goto done;
+ }
+
+ NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS params;
+ memset(&params, 0, sizeof(params));
+ params.version = NVENCAPI_STRUCT_VERSION(1);
+ params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
+ params.device = cuda.cu_ctx;
+ params.apiVersion = NVENCAPI_VERSION;
+ if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
+ // Old nvidia gpus dont support the new nvenc api (which is required for av1).
+ // In such cases fallback to old api version if possible and try again.
+ function_list.version = NVENCAPI_STRUCT_VERSION_470(2);
+ if(nvEncodeAPICreateInstance(&function_list) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncodeAPICreateInstance (retry) failed\n");
+ goto done;
+ }
+
+ params.version = NVENCAPI_STRUCT_VERSION_470(1);
+ params.apiVersion = NVENCAPI_VERSION_470;
+ if(function_list.nvEncOpenEncodeSessionEx(&params, &nvenc_encoder) != NV_ENC_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_nvenc: nvEncOpenEncodeSessionEx (retry) failed\n");
+ goto done;
+ }
+ }
+
+ success = get_supported_video_codecs(&function_list, nvenc_encoder, video_codecs);
+
+ done:
+ if(cleanup) {
+ if(nvenc_encoder)
+ function_list.nvEncDestroyEncoder(nvenc_encoder);
+ if(nvenc_lib)
+ dlclose(nvenc_lib);
+ gsr_cuda_unload(&cuda);
+ }
+
+ return success;
+}
diff --git a/src/codec_query/vaapi.c b/src/codec_query/vaapi.c
new file mode 100644
index 0000000..2c74d96
--- /dev/null
+++ b/src/codec_query/vaapi.c
@@ -0,0 +1,203 @@
+#include "../../include/codec_query/vaapi.h"
+#include "../../include/utils.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <va/va.h>
+#include <va/va_drm.h>
+
+static bool profile_is_h264(VAProfile profile) {
+ switch(profile) {
+ case 5: // VAProfileH264Baseline
+ case VAProfileH264Main:
+ case VAProfileH264High:
+ case VAProfileH264ConstrainedBaseline:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_hevc_8bit(VAProfile profile) {
+ switch(profile) {
+ case VAProfileHEVCMain:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_hevc_10bit(VAProfile profile) {
+ switch(profile) {
+ case VAProfileHEVCMain10:
+ //case VAProfileHEVCMain12:
+ //case VAProfileHEVCMain422_10:
+ //case VAProfileHEVCMain422_12:
+ //case VAProfileHEVCMain444:
+ //case VAProfileHEVCMain444_10:
+ //case VAProfileHEVCMain444_12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_av1(VAProfile profile) {
+ switch(profile) {
+ case VAProfileAV1Profile0:
+ case VAProfileAV1Profile1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_vp8(VAProfile profile) {
+ switch(profile) {
+ case VAProfileVP8Version0_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_is_vp9(VAProfile profile) {
+ switch(profile) {
+ case VAProfileVP9Profile0:
+ case VAProfileVP9Profile1:
+ case VAProfileVP9Profile2:
+ case VAProfileVP9Profile3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool profile_supports_video_encoding(VADisplay va_dpy, VAProfile profile, bool *low_power) {
+ *low_power = false;
+ int num_entrypoints = vaMaxNumEntrypoints(va_dpy);
+ if(num_entrypoints <= 0)
+ return false;
+
+ VAEntrypoint *entrypoint_list = calloc(num_entrypoints, sizeof(VAEntrypoint));
+ if(!entrypoint_list)
+ return false;
+
+ bool supports_encoding = false;
+ bool supports_low_power_encoding = false;
+ if(vaQueryConfigEntrypoints(va_dpy, profile, entrypoint_list, &num_entrypoints) == VA_STATUS_SUCCESS) {
+ for(int i = 0; i < num_entrypoints; ++i) {
+ if(entrypoint_list[i] == VAEntrypointEncSlice)
+ supports_encoding = true;
+ else if(entrypoint_list[i] == VAEntrypointEncSliceLP)
+ supports_low_power_encoding = true;
+ }
+ }
+
+ if(!supports_encoding && supports_low_power_encoding)
+ *low_power = true;
+
+ free(entrypoint_list);
+ return supports_encoding || supports_low_power_encoding;
+}
+
+static bool get_supported_video_codecs(VADisplay va_dpy, gsr_supported_video_codecs *video_codecs, bool cleanup) {
+ *video_codecs = (gsr_supported_video_codecs){0};
+ bool success = false;
+ VAProfile *profile_list = NULL;
+
+ vaSetInfoCallback(va_dpy, NULL, NULL);
+
+ int va_major = 0;
+ int va_minor = 0;
+ if(vaInitialize(va_dpy, &va_major, &va_minor) != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: vaInitialize failed\n");
+ goto fail;
+ }
+
+ int num_profiles = vaMaxNumProfiles(va_dpy);
+ if(num_profiles <= 0)
+ goto fail;
+
+ profile_list = calloc(num_profiles, sizeof(VAProfile));
+ if(!profile_list || vaQueryConfigProfiles(va_dpy, profile_list, &num_profiles) != VA_STATUS_SUCCESS)
+ goto fail;
+
+ for(int i = 0; i < num_profiles; ++i) {
+ bool low_power = false;
+ if(profile_is_h264(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->h264 = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_hevc_8bit(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->hevc = (gsr_supported_video_codec){ true, low_power };
+ } else if(profile_is_hevc_10bit(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->hevc_hdr = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->hevc_10bit = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_av1(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power)) {
+ video_codecs->av1 = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->av1_hdr = (gsr_supported_video_codec){ true, low_power };
+ video_codecs->av1_10bit = (gsr_supported_video_codec){ true, low_power };
+ }
+ } else if(profile_is_vp8(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->vp8 = (gsr_supported_video_codec){ true, low_power };
+ } else if(profile_is_vp9(profile_list[i])) {
+ if(profile_supports_video_encoding(va_dpy, profile_list[i], &low_power))
+ video_codecs->vp9 = (gsr_supported_video_codec){ true, low_power };
+ }
+ }
+
+ success = true;
+ fail:
+ if(profile_list)
+ free(profile_list);
+
+ if(cleanup)
+ vaTerminate(va_dpy);
+
+ return success;
+}
+
+bool gsr_get_supported_video_codecs_vaapi(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+ bool success = false;
+ int drm_fd = -1;
+
+ char render_path[128];
+ if(!gsr_card_path_get_render_path(card_path, render_path)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
+ goto done;
+ }
+
+ drm_fd = open(render_path, O_RDWR);
+ if(drm_fd == -1) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to open device %s\n", render_path);
+ goto done;
+ }
+
+ VADisplay va_dpy = vaGetDisplayDRM(drm_fd);
+ if(va_dpy) {
+ if(!get_supported_video_codecs(va_dpy, video_codecs, cleanup)) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vaapi: failed to query supported video codecs for device %s\n", render_path);
+ goto done;
+ }
+ success = true;
+ }
+
+ done:
+ if(cleanup) {
+ if(drm_fd > 0)
+ close(drm_fd);
+ }
+
+ return success;
+}
diff --git a/src/codec_query/vulkan.c b/src/codec_query/vulkan.c
new file mode 100644
index 0000000..15dd98b
--- /dev/null
+++ b/src/codec_query/vulkan.c
@@ -0,0 +1,156 @@
+#include "../../include/codec_query/vulkan.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <xf86drm.h>
+#define VK_NO_PROTOTYPES
+//#include <vulkan/vulkan.h>
+
+#define MAX_PHYSICAL_DEVICES 32
+
+static const char *required_device_extensions[] = {
+ "VK_KHR_external_memory_fd",
+ "VK_KHR_external_semaphore_fd",
+ "VK_KHR_video_encode_queue",
+ "VK_KHR_video_queue",
+ "VK_KHR_video_maintenance1",
+ "VK_EXT_external_memory_dma_buf",
+ "VK_EXT_external_memory_host",
+ "VK_EXT_image_drm_format_modifier"
+};
+static int num_required_device_extensions = 8;
+
+bool gsr_get_supported_video_codecs_vulkan(gsr_supported_video_codecs *video_codecs, const char *card_path, bool cleanup) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+#if 0
+ bool success = false;
+ VkInstance instance = NULL;
+ VkPhysicalDevice physical_devices[MAX_PHYSICAL_DEVICES];
+ VkDevice device = NULL;
+ VkExtensionProperties *device_extensions = NULL;
+
+ const VkApplicationInfo app_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = "GPU Screen Recorder",
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "GPU Screen Recorder",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_3,
+ };
+
+ const VkInstanceCreateInfo instance_create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &app_info
+ };
+
+ if(vkCreateInstance(&instance_create_info, NULL, &instance) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateInstance failed\n");
+ goto done;
+ }
+
+ uint32_t num_devices = 0;
+ if(vkEnumeratePhysicalDevices(instance, &num_devices, NULL) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (query num devices) failed\n");
+ goto done;
+ }
+
+ if(num_devices == 0) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: no vulkan capable device found\n");
+ goto done;
+ }
+
+ if(num_devices > MAX_PHYSICAL_DEVICES)
+ num_devices = MAX_PHYSICAL_DEVICES;
+
+ if(vkEnumeratePhysicalDevices(instance, &num_devices, physical_devices) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumeratePhysicalDevices (get data) failed\n");
+ goto done;
+ }
+
+ VkPhysicalDevice physical_device = NULL;
+ char device_card_path[128];
+ for(uint32_t i = 0; i < num_devices; ++i) {
+ VkPhysicalDeviceDrmPropertiesEXT device_drm_properties = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT
+ };
+
+ VkPhysicalDeviceProperties2 device_properties = {
+ .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
+ .pNext = &device_drm_properties
+ };
+ vkGetPhysicalDeviceProperties2(physical_devices[i], &device_properties);
+
+ if(!device_drm_properties.hasPrimary)
+ continue;
+
+ snprintf(device_card_path, sizeof(device_card_path), DRM_DEV_NAME, DRM_DIR_NAME, (int)device_drm_properties.primaryMinor);
+ if(strcmp(device_card_path, card_path) == 0) {
+ physical_device = physical_devices[i];
+ break;
+ }
+ }
+
+ if(!physical_device) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to find a vulkan device that matches opengl device %s\n", card_path);
+ goto done;
+ }
+
+ const VkDeviceCreateInfo device_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .enabledExtensionCount = num_required_device_extensions,
+ .ppEnabledExtensionNames = required_device_extensions
+ };
+
+ if(vkCreateDevice(physical_device, &device_create_info, NULL, &device) != VK_SUCCESS) {
+ //fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkCreateDevice failed. Device %s likely doesn't support vulkan video encoding\n", card_path);
+ goto done;
+ }
+
+ uint32_t num_device_extensions = 0;
+ if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, NULL) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (query num device extensions) failed\n");
+ goto done;
+ }
+
+ device_extensions = calloc(num_device_extensions, sizeof(VkExtensionProperties));
+ if(!device_extensions) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: failed to allocate %d device extensions\n", num_device_extensions);
+ goto done;
+ }
+
+ if(vkEnumerateDeviceExtensionProperties(physical_device, NULL, &num_device_extensions, device_extensions) != VK_SUCCESS) {
+ fprintf(stderr, "gsr error: gsr_get_supported_video_codecs_vulkan: vkEnumerateDeviceExtensionProperties (get data) failed\n");
+ goto done;
+ }
+
+ for(uint32_t i = 0; i < num_device_extensions; ++i) {
+ if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h264") == 0) {
+ video_codecs->h264 = true;
+ } else if(strcmp(device_extensions[i].extensionName, "VK_KHR_video_encode_h265") == 0) {
+ // TODO: Verify if 10bit and hdr are actually supported
+ video_codecs->hevc = true;
+ video_codecs->hevc_10bit = true;
+ video_codecs->hevc_hdr = true;
+ }
+ }
+
+ success = true;
+
+ done:
+ if(cleanup) {
+ if(device)
+ vkDestroyDevice(device, NULL);
+ if(instance)
+ vkDestroyInstance(instance, NULL);
+ }
+ if(device_extensions)
+ free(device_extensions);
+ return success;
+#else
+ // TODO: Low power query
+ video_codecs->h264 = (gsr_supported_video_codec){ true, false };
+ video_codecs->hevc = (gsr_supported_video_codec){ true, false };
+ return true;
+#endif
+}
diff --git a/src/cursor.c b/src/cursor.c
index 9825ad2..3dca0c6 100644
--- a/src/cursor.c
+++ b/src/cursor.c
@@ -6,8 +6,6 @@
#include <assert.h>
#include <X11/extensions/Xfixes.h>
-#include <X11/extensions/XI2.h>
-#include <X11/extensions/XInput2.h>
// TODO: Test cursor visibility with XFixesHideCursor
@@ -52,6 +50,7 @@ static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorI
}
}
+ // TODO: glTextureSubImage2D if same size
self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->size.x, self->size.y, 0, GL_RGBA, GL_UNSIGNED_BYTE, cursor_data);
free(cursor_data);
@@ -71,26 +70,6 @@ static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorI
return false;
}
-static bool xinput_is_supported(Display *dpy, int *xi_opcode) {
- *xi_opcode = 0;
- int query_event = 0;
- int query_error = 0;
- if(!XQueryExtension(dpy, "XInputExtension", xi_opcode, &query_event, &query_error)) {
- fprintf(stderr, "gsr error: gsr_cursor_init: X Input extension not available\n");
- return false;
- }
-
- int major = 2;
- int minor = 1;
- int retval = XIQueryVersion(dpy, &major, &minor);
- if (retval != Success) {
- fprintf(stderr, "gsr error: gsr_cursor_init: XInput 2.1 is not supported\n");
- return false;
- }
-
- return true;
-}
-
int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display) {
int x_fixes_error_base = 0;
@@ -107,31 +86,11 @@ int gsr_cursor_init(gsr_cursor *self, gsr_egl *egl, Display *display) {
return -1;
}
- if(!xinput_is_supported(self->display, &self->xi_opcode)) {
- gsr_cursor_deinit(self);
- return -1;
- }
-
- unsigned char mask[XIMaskLen(XI_LASTEVENT)];
- memset(mask, 0, sizeof(mask));
- XISetMask(mask, XI_RawMotion);
-
- XIEventMask xi_masks;
- xi_masks.deviceid = XIAllMasterDevices;
- xi_masks.mask_len = sizeof(mask);
- xi_masks.mask = mask;
- if(XISelectEvents(self->display, DefaultRootWindow(self->display), &xi_masks, 1) != Success) {
- fprintf(stderr, "gsr error: gsr_cursor_init: XISelectEvents failed\n");
- gsr_cursor_deinit(self);
- return -1;
- }
-
self->egl->glGenTextures(1, &self->texture_id);
XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), XFixesDisplayCursorNotifyMask);
gsr_cursor_set_from_x11_cursor_image(self, XFixesGetCursorImage(self->display), &self->visible);
self->cursor_image_set = true;
- self->cursor_moved = true;
return 0;
}
@@ -145,23 +104,15 @@ void gsr_cursor_deinit(gsr_cursor *self) {
self->texture_id = 0;
}
- XISelectEvents(self->display, DefaultRootWindow(self->display), NULL, 0);
- XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), 0);
+ if(self->display)
+ XFixesSelectCursorInput(self->display, DefaultRootWindow(self->display), 0);
self->display = NULL;
self->egl = NULL;
}
-bool gsr_cursor_update(gsr_cursor *self, XEvent *xev) {
+bool gsr_cursor_on_event(gsr_cursor *self, XEvent *xev) {
bool updated = false;
- XGenericEventCookie *cookie = (XGenericEventCookie*)&xev->xcookie;
- const Bool got_event_data = XGetEventData(self->display, cookie);
- if(got_event_data && cookie->type == GenericEvent && cookie->extension == self->xi_opcode && cookie->evtype == XI_RawMotion) {
- updated = true;
- self->cursor_moved = true;
- }
- if(got_event_data)
- XFreeEventData(self->display, cookie);
if(xev->type == self->x_fixes_event_base + XFixesCursorNotify) {
XFixesCursorNotifyEvent *cursor_notify_event = (XFixesCursorNotifyEvent*)xev;
@@ -180,11 +131,6 @@ bool gsr_cursor_update(gsr_cursor *self, XEvent *xev) {
}
void gsr_cursor_tick(gsr_cursor *self, Window relative_to) {
- if(!self->cursor_moved)
- return;
-
- self->cursor_moved = false;
-
Window dummy_window;
int dummy_i;
unsigned int dummy_u;
diff --git a/src/damage.c b/src/damage.c
new file mode 100644
index 0000000..8e62762
--- /dev/null
+++ b/src/damage.c
@@ -0,0 +1,324 @@
+#include "../include/damage.h"
+#include "../include/utils.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <X11/extensions/Xdamage.h>
+#include <X11/extensions/Xrandr.h>
+
+typedef struct {
+ vec2i pos;
+ vec2i size;
+} gsr_rectangle;
+
+static bool rectangles_intersect(gsr_rectangle rect1, gsr_rectangle rect2) {
+ return rect1.pos.x < rect2.pos.x + rect2.size.x && rect1.pos.x + rect1.size.x > rect2.pos.x &&
+ rect1.pos.y < rect2.pos.y + rect2.size.y && rect1.pos.y + rect1.size.y > rect2.pos.y;
+}
+
+static bool xrandr_is_supported(Display *display) {
+ int major_version = 0;
+ int minor_version = 0;
+ if(!XRRQueryVersion(display, &major_version, &minor_version))
+ return false;
+
+ return major_version > 1 || (major_version == 1 && minor_version >= 2);
+}
+
+bool gsr_damage_init(gsr_damage *self, gsr_egl *egl, bool track_cursor) {
+ memset(self, 0, sizeof(*self));
+ self->egl = egl;
+ self->track_cursor = track_cursor;
+
+ if(gsr_egl_get_display_server(egl) != GSR_DISPLAY_SERVER_X11) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: damage tracking is not supported on wayland\n");
+ return false;
+ }
+
+ if(!XDamageQueryExtension(self->egl->x11.dpy, &self->damage_event, &self->damage_error)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: XDamage is not supported by your X11 server\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(!XRRQueryExtension(self->egl->x11.dpy, &self->randr_event, &self->randr_error)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: XRandr is not supported by your X11 server\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(!xrandr_is_supported(self->egl->x11.dpy)) {
+ fprintf(stderr, "gsr warning: gsr_damage_init: your X11 randr version is too old\n");
+ gsr_damage_deinit(self);
+ return false;
+ }
+
+ if(self->track_cursor)
+ self->track_cursor = gsr_cursor_init(&self->cursor, self->egl, self->egl->x11.dpy) == 0;
+
+ XRRSelectInput(self->egl->x11.dpy, DefaultRootWindow(self->egl->x11.dpy), RRScreenChangeNotifyMask | RRCrtcChangeNotifyMask | RROutputChangeNotifyMask);
+
+ self->damaged = true;
+ return true;
+}
+
+void gsr_damage_deinit(gsr_damage *self) {
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ gsr_cursor_deinit(&self->cursor);
+
+ self->damage_event = 0;
+ self->damage_error = 0;
+
+ self->randr_event = 0;
+ self->randr_error = 0;
+}
+
+bool gsr_damage_set_target_window(gsr_damage *self, uint64_t window) {
+ if(self->damage_event == 0)
+ return false;
+
+ if(window == self->window)
+ return true;
+
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ if(self->window)
+ XSelectInput(self->egl->x11.dpy, self->window, 0);
+
+ self->window = window;
+ XSelectInput(self->egl->x11.dpy, self->window, StructureNotifyMask | ExposureMask);
+
+ XWindowAttributes win_attr;
+ win_attr.x = 0;
+ win_attr.y = 0;
+ win_attr.width = 0;
+ win_attr.height = 0;
+ if(!XGetWindowAttributes(self->egl->x11.dpy, self->window, &win_attr))
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_window failed: failed to get window attributes: %ld\n", (long)self->window);
+
+ //self->window_pos.x = win_attr.x;
+ //self->window_pos.y = win_attr.y;
+
+ self->window_size.x = win_attr.width;
+ self->window_size.y = win_attr.height;
+
+ self->damage = XDamageCreate(self->egl->x11.dpy, window, XDamageReportNonEmpty);
+ if(self->damage) {
+ XDamageSubtract(self->egl->x11.dpy, self->damage, None, None);
+ self->damaged = true;
+ self->track_type = GSR_DAMAGE_TRACK_WINDOW;
+ return true;
+ } else {
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_window: XDamageCreate failed\n");
+ self->track_type = GSR_DAMAGE_TRACK_NONE;
+ return false;
+ }
+}
+
+bool gsr_damage_set_target_monitor(gsr_damage *self, const char *monitor_name) {
+ if(self->damage_event == 0)
+ return false;
+
+ if(strcmp(self->monitor_name, monitor_name) == 0)
+ return true;
+
+ if(self->damage) {
+ XDamageDestroy(self->egl->x11.dpy, self->damage);
+ self->damage = None;
+ }
+
+ memset(&self->monitor, 0, sizeof(self->monitor));
+ if(strcmp(monitor_name, "screen") != 0 && strcmp(monitor_name, "screen-direct") != 0 && strcmp(monitor_name, "screen-direct-force") != 0) {
+ if(!get_monitor_by_name(self->egl, GSR_CONNECTION_X11, monitor_name, &self->monitor))
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_monitor: failed to find monitor: %s\n", monitor_name);
+ }
+
+ if(self->window)
+ XSelectInput(self->egl->x11.dpy, self->window, 0);
+
+ self->window = DefaultRootWindow(self->egl->x11.dpy);
+ self->damage = XDamageCreate(self->egl->x11.dpy, self->window, XDamageReportNonEmpty);
+ if(self->damage) {
+ XDamageSubtract(self->egl->x11.dpy, self->damage, None, None);
+ self->damaged = true;
+ snprintf(self->monitor_name, sizeof(self->monitor_name), "%s", monitor_name);
+ self->track_type = GSR_DAMAGE_TRACK_MONITOR;
+ return true;
+ } else {
+ fprintf(stderr, "gsr warning: gsr_damage_set_target_monitor: XDamageCreate failed\n");
+ self->track_type = GSR_DAMAGE_TRACK_NONE;
+ return false;
+ }
+}
+
+static void gsr_damage_on_crtc_change(gsr_damage *self, XEvent *xev) {
+ const XRRCrtcChangeNotifyEvent *rr_crtc_change_event = (XRRCrtcChangeNotifyEvent*)xev;
+ if(rr_crtc_change_event->crtc == 0 || self->monitor.monitor_identifier == 0)
+ return;
+
+ if(rr_crtc_change_event->crtc != self->monitor.monitor_identifier)
+ return;
+
+ if(rr_crtc_change_event->width == 0 || rr_crtc_change_event->height == 0)
+ return;
+
+ if(rr_crtc_change_event->x != self->monitor.pos.x || rr_crtc_change_event->y != self->monitor.pos.y ||
+ (int)rr_crtc_change_event->width != self->monitor.size.x || (int)rr_crtc_change_event->height != self->monitor.size.y) {
+ self->monitor.pos.x = rr_crtc_change_event->x;
+ self->monitor.pos.y = rr_crtc_change_event->y;
+
+ self->monitor.size.x = rr_crtc_change_event->width;
+ self->monitor.size.y = rr_crtc_change_event->height;
+ }
+}
+
+static void gsr_damage_on_output_change(gsr_damage *self, XEvent *xev) {
+ const XRROutputChangeNotifyEvent *rr_output_change_event = (XRROutputChangeNotifyEvent*)xev;
+ if(!rr_output_change_event->output || self->monitor.monitor_identifier == 0)
+ return;
+
+ XRRScreenResources *screen_res = XRRGetScreenResources(self->egl->x11.dpy, DefaultRootWindow(self->egl->x11.dpy));
+ if(!screen_res)
+ return;
+
+ XRROutputInfo *out_info = XRRGetOutputInfo(self->egl->x11.dpy, screen_res, rr_output_change_event->output);
+ if(out_info && out_info->crtc && out_info->crtc == self->monitor.monitor_identifier) {
+ XRRCrtcInfo *crtc_info = XRRGetCrtcInfo(self->egl->x11.dpy, screen_res, out_info->crtc);
+ if(crtc_info && (crtc_info->x != self->monitor.pos.x || crtc_info->y != self->monitor.pos.y ||
+ (int)crtc_info->width != self->monitor.size.x || (int)crtc_info->height != self->monitor.size.y))
+ {
+ self->monitor.pos.x = crtc_info->x;
+ self->monitor.pos.y = crtc_info->y;
+
+ self->monitor.size.x = crtc_info->width;
+ self->monitor.size.y = crtc_info->height;
+ }
+
+ if(crtc_info)
+ XRRFreeCrtcInfo(crtc_info);
+ }
+
+ if(out_info)
+ XRRFreeOutputInfo(out_info);
+
+ XRRFreeScreenResources(screen_res);
+}
+
+static void gsr_damage_on_randr_event(gsr_damage *self, XEvent *xev) {
+ const XRRNotifyEvent *rr_event = (XRRNotifyEvent*)xev;
+ switch(rr_event->subtype) {
+ case RRNotify_CrtcChange:
+ gsr_damage_on_crtc_change(self, xev);
+ break;
+ case RRNotify_OutputChange:
+ gsr_damage_on_output_change(self, xev);
+ break;
+ }
+}
+
+static void gsr_damage_on_damage_event(gsr_damage *self, XEvent *xev) {
+ const XDamageNotifyEvent *de = (XDamageNotifyEvent*)xev;
+ XserverRegion region = XFixesCreateRegion(self->egl->x11.dpy, NULL, 0);
+ /* Subtract all the damage, repairing the window */
+ XDamageSubtract(self->egl->x11.dpy, de->damage, None, region);
+
+ if(self->track_type == GSR_DAMAGE_TRACK_WINDOW || (self->track_type == GSR_DAMAGE_TRACK_MONITOR && self->monitor.connector_id == 0)) {
+ self->damaged = true;
+ } else {
+ int num_rectangles = 0;
+ XRectangle *rectangles = XFixesFetchRegion(self->egl->x11.dpy, region, &num_rectangles);
+ if(rectangles) {
+ const gsr_rectangle monitor_region = { self->monitor.pos, self->monitor.size };
+ for(int i = 0; i < num_rectangles; ++i) {
+ const gsr_rectangle damage_region = { (vec2i){rectangles[i].x, rectangles[i].y}, (vec2i){rectangles[i].width, rectangles[i].height} };
+ self->damaged = rectangles_intersect(monitor_region, damage_region);
+ if(self->damaged)
+ break;
+ }
+ XFree(rectangles);
+ }
+ }
+
+ XFixesDestroyRegion(self->egl->x11.dpy, region);
+ XFlush(self->egl->x11.dpy);
+}
+
+static void gsr_damage_on_tick_cursor(gsr_damage *self) {
+ vec2i prev_cursor_pos = self->cursor.position;
+ gsr_cursor_tick(&self->cursor, self->window);
+ if(self->cursor.position.x != prev_cursor_pos.x || self->cursor.position.y != prev_cursor_pos.y) {
+ const gsr_rectangle cursor_region = { self->cursor.position, self->cursor.size };
+ switch(self->track_type) {
+ case GSR_DAMAGE_TRACK_NONE: {
+ self->damaged = true;
+ break;
+ }
+ case GSR_DAMAGE_TRACK_WINDOW: {
+ const gsr_rectangle window_region = { (vec2i){0, 0}, self->window_size };
+ self->damaged = self->window_size.x == 0 || rectangles_intersect(window_region, cursor_region);
+ break;
+ }
+ case GSR_DAMAGE_TRACK_MONITOR: {
+ const gsr_rectangle monitor_region = { self->monitor.pos, self->monitor.size };
+ self->damaged = self->monitor.monitor_identifier == 0 || rectangles_intersect(monitor_region, cursor_region);
+ break;
+ }
+ }
+ }
+}
+
+static void gsr_damage_on_window_configure_notify(gsr_damage *self, XEvent *xev) {
+ if(xev->xconfigure.window != self->window)
+ return;
+
+ //self->window_pos.x = xev->xconfigure.x;
+ //self->window_pos.y = xev->xconfigure.y;
+
+ self->window_size.x = xev->xconfigure.width;
+ self->window_size.y = xev->xconfigure.height;
+}
+
+void gsr_damage_on_event(gsr_damage *self, XEvent *xev) {
+ if(self->damage_event == 0 || self->track_type == GSR_DAMAGE_TRACK_NONE)
+ return;
+
+ if(self->track_type == GSR_DAMAGE_TRACK_WINDOW && xev->type == ConfigureNotify)
+ gsr_damage_on_window_configure_notify(self, xev);
+
+ if(self->randr_event) {
+ if(xev->type == self->randr_event + RRScreenChangeNotify)
+ XRRUpdateConfiguration(xev);
+
+ if(xev->type == self->randr_event + RRNotify)
+ gsr_damage_on_randr_event(self, xev);
+ }
+
+ if(self->damage_event && xev->type == self->damage_event + XDamageNotify)
+ gsr_damage_on_damage_event(self, xev);
+
+ if(self->track_cursor)
+ gsr_cursor_on_event(&self->cursor, xev);
+}
+
+void gsr_damage_tick(gsr_damage *self) {
+ if(self->damage_event == 0 || self->track_type == GSR_DAMAGE_TRACK_NONE)
+ return;
+
+ if(self->track_cursor && self->cursor.visible && !self->damaged)
+ gsr_damage_on_tick_cursor(self);
+}
+
+bool gsr_damage_is_damaged(gsr_damage *self) {
+ return self->damage_event == 0 || !self->damage || self->damaged || self->track_type == GSR_DAMAGE_TRACK_NONE;
+}
+
+void gsr_damage_clear(gsr_damage *self) {
+ self->damaged = false;
+}
diff --git a/src/dbus.c b/src/dbus.c
new file mode 100644
index 0000000..5757b8b
--- /dev/null
+++ b/src/dbus.c
@@ -0,0 +1,876 @@
+#include "../include/dbus.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/random.h>
+
+/* TODO: Make non-blocking when GPU Screen Recorder is turned into a library */
+/* TODO: Make sure responses matches the requests */
+
+#define DESKTOP_PORTAL_SIGNAL_RULE "type='signal',interface='org.freedesktop.Portal.Request'"
+
+typedef enum {
+ DICT_TYPE_STRING,
+ DICT_TYPE_UINT32,
+ DICT_TYPE_BOOL,
+} dict_value_type;
+
+typedef struct {
+ const char *key;
+ dict_value_type value_type;
+ union {
+ char *str;
+ dbus_uint32_t u32;
+ dbus_bool_t boolean;
+ };
+} dict_entry;
+
+static const char* dict_value_type_to_string(dict_value_type type) {
+ switch(type) {
+ case DICT_TYPE_STRING: return "string";
+ case DICT_TYPE_UINT32: return "uint32";
+ case DICT_TYPE_BOOL: return "boolean";
+ }
+ return "(unknown)";
+}
+
+static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
+ /* TODO: Use other functions on other platforms than linux */
+ if(getrandom(buffer, buffer_size, 0) < buffer_size) {
+ fprintf(stderr, "gsr error: generate_random_characters: failed to get random bytes, error: %s\n", strerror(errno));
+ return false;
+ }
+
+ for(int i = 0; i < buffer_size; ++i) {
+ unsigned char c = *(unsigned char*)&buffer[i];
+ buffer[i] = alphabet[c % alphabet_size];
+ }
+
+ return true;
+}
+
+bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token) {
+ memset(self, 0, sizeof(*self));
+ dbus_error_init(&self->err);
+
+ self->random_str[DBUS_RANDOM_STR_SIZE] = '\0';
+ if(!generate_random_characters(self->random_str, DBUS_RANDOM_STR_SIZE, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to generate random string\n");
+ return false;
+ }
+
+ self->con = dbus_bus_get(DBUS_BUS_SESSION, &self->err);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_get failed with error: %s\n", self->err.message);
+ return false;
+ }
+
+ if(!self->con) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to get dbus session\n");
+ return false;
+ }
+
+ /* TODO: Check the name */
+ const int ret = dbus_bus_request_name(self->con, "com.dec05eba.gpu_screen_recorder", DBUS_NAME_FLAG_REPLACE_EXISTING, &self->err);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: dbus_bus_request_name failed with error: %s\n", self->err.message);
+ gsr_dbus_deinit(self);
+ return false;
+ }
+
+ if(screencast_restore_token) {
+ self->screencast_restore_token = strdup(screencast_restore_token);
+ if(!self->screencast_restore_token) {
+ fprintf(stderr, "gsr error: gsr_dbus_init: failed to clone restore token\n");
+ gsr_dbus_deinit(self);
+ return false;
+ }
+ }
+
+ (void)ret;
+ // if(ret != DBUS_REQUEST_NAME_REPLY_PRIMARY_OWNER) {
+ // fprintf(stderr, "gsr error: gsr_capture_portal_setup_dbus: dbus_bus_request_name failed to get primary owner\n");
+ // return false;
+ // }
+
+ return true;
+}
+
+void gsr_dbus_deinit(gsr_dbus *self) {
+ if(self->screencast_restore_token) {
+ free(self->screencast_restore_token);
+ self->screencast_restore_token = NULL;
+ }
+
+ if(self->desktop_portal_rule_added) {
+ dbus_bus_remove_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, NULL);
+ // dbus_connection_flush(self->con);
+ self->desktop_portal_rule_added = false;
+ }
+
+ if(self->con) {
+ dbus_error_free(&self->err);
+
+ dbus_bus_release_name(self->con, "com.dec05eba.gpu_screen_recorder", NULL);
+
+ // Apparently shouldn't be used when a connection is setup by using dbus_bus_get
+ //dbus_connection_close(self->con);
+ dbus_connection_unref(self->con);
+ self->con = NULL;
+ }
+}
+
+static bool gsr_dbus_desktop_portal_get_property(gsr_dbus *self, const char *interface, const char *property_name, uint32_t *result) {
+ *result = 0;
+
+ DBusMessage *msg = dbus_message_new_method_call(
+ "org.freedesktop.portal.Desktop", // target for the method call
+ "/org/freedesktop/portal/desktop", // object to call on
+ "org.freedesktop.DBus.Properties", // interface to call on
+ "Get"); // method name
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_message_new_method_call failed\n");
+ return false;
+ }
+
+ DBusMessageIter it;
+ dbus_message_iter_init_append(msg, &it);
+
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &interface)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add interface\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &property_name)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: failed to add property_name\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ DBusPendingCall *pending = NULL;
+ if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_connection_send_with_reply failed\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ dbus_connection_flush(self->con);
+
+ //fprintf(stderr, "Request Sent\n");
+
+ dbus_message_unref(msg);
+ msg = NULL;
+
+ dbus_pending_call_block(pending);
+
+ msg = dbus_pending_call_steal_reply(pending);
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: dbus_pending_call_steal_reply failed\n");
+ dbus_pending_call_unref(pending);
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ dbus_pending_call_unref(pending);
+ pending = NULL;
+
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is missing arguments\n");
+ dbus_message_unref(msg);
+ return false;
+ } else if(DBUS_TYPE_UINT32 == dbus_message_iter_get_arg_type(&resp_args)) {
+ dbus_message_iter_get_basic(&resp_args, result);
+ } else if(DBUS_TYPE_VARIANT == dbus_message_iter_get_arg_type(&resp_args)) {
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&resp_args, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) == DBUS_TYPE_UINT32) {
+ dbus_message_iter_get_basic(&variant_iter, result);
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not a variant with an uint32, %c\n", dbus_message_iter_get_arg_type(&variant_iter));
+ dbus_message_unref(msg);
+ return false;
+ }
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_desktop_portal_get_property: response message is not an uint32, %c\n", dbus_message_iter_get_arg_type(&resp_args));
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ }
+
+ dbus_message_unref(msg);
+ return true;
+}
+
+static uint32_t gsr_dbus_get_screencast_version_cached(gsr_dbus *self) {
+ if(self->screencast_version == 0)
+ gsr_dbus_desktop_portal_get_property(self, "org.freedesktop.portal.ScreenCast", "version", &self->screencast_version);
+ return self->screencast_version;
+}
+
+static bool gsr_dbus_ensure_desktop_portal_rule_added(gsr_dbus *self) {
+ if(self->desktop_portal_rule_added)
+ return true;
+
+ dbus_bus_add_match(self->con, DESKTOP_PORTAL_SIGNAL_RULE, &self->err);
+ dbus_connection_flush(self->con);
+ if(dbus_error_is_set(&self->err)) {
+ fprintf(stderr, "gsr error: gsr_dbus_ensure_desktop_portal_rule_added: failed to add dbus rule %s, error: %s\n", DESKTOP_PORTAL_SIGNAL_RULE, self->err.message);
+ return false;
+ }
+ self->desktop_portal_rule_added = true;
+ return true;
+}
+
+static void gsr_dbus_portal_get_unique_handle_token(gsr_dbus *self, char *buffer, int size) {
+ snprintf(buffer, size, "gpu_screen_recorder_handle_%s_%u", self->random_str, self->handle_counter++);
+}
+
+static void gsr_dbus_portal_get_unique_session_token(gsr_dbus *self, char *buffer, int size) {
+ snprintf(buffer, size, "gpu_screen_recorder_session_%s", self->random_str);
+}
+
+static bool dbus_add_dict(DBusMessageIter *it, const dict_entry *entries, int num_entries) {
+ DBusMessageIter array_it;
+ if(!dbus_message_iter_open_container(it, DBUS_TYPE_ARRAY, "{sv}", &array_it))
+ return false;
+
+ for (int i = 0; i < num_entries; ++i) {
+ DBusMessageIter entry_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ DBusMessageIter variant_it = DBUS_MESSAGE_ITER_INIT_CLOSED;
+
+ if(!dbus_message_iter_open_container(&array_it, DBUS_TYPE_DICT_ENTRY, NULL, &entry_it))
+ goto entry_err;
+
+ if(!dbus_message_iter_append_basic(&entry_it, DBUS_TYPE_STRING, &entries[i].key))
+ goto entry_err;
+
+ switch (entries[i].value_type) {
+ case DICT_TYPE_STRING: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_STRING_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_STRING, &entries[i].str))
+ goto entry_err;
+ break;
+ }
+ case DICT_TYPE_UINT32: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_UINT32_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_UINT32, &entries[i].u32))
+ goto entry_err;
+ break;
+ }
+ case DICT_TYPE_BOOL: {
+ if(!dbus_message_iter_open_container(&entry_it, DBUS_TYPE_VARIANT, DBUS_TYPE_BOOLEAN_AS_STRING, &variant_it))
+ goto entry_err;
+ if(!dbus_message_iter_append_basic(&variant_it, DBUS_TYPE_BOOLEAN, &entries[i].boolean))
+ goto entry_err;
+ break;
+ }
+ }
+
+ dbus_message_iter_close_container(&entry_it, &variant_it);
+ dbus_message_iter_close_container(&array_it, &entry_it);
+ continue;
+
+ entry_err:
+ dbus_message_iter_abandon_container_if_open(&array_it, &variant_it);
+ dbus_message_iter_abandon_container_if_open(&array_it, &entry_it);
+ dbus_message_iter_abandon_container_if_open(it, &array_it);
+ return false;
+ }
+
+ return dbus_message_iter_close_container(it, &array_it);
+}
+
+/* If |response_msg| is NULL then we dont wait for a response signal */
+static bool gsr_dbus_call_screencast_method(gsr_dbus *self, const char *method_name, const char *session_handle, const char *parent_window, const dict_entry *entries, int num_entries, int *resp_fd, DBusMessage **response_msg) {
+ if(resp_fd)
+ *resp_fd = -1;
+
+ if(response_msg)
+ *response_msg = NULL;
+
+ if(!gsr_dbus_ensure_desktop_portal_rule_added(self))
+ return false;
+
+ DBusMessage *msg = dbus_message_new_method_call(
+ "org.freedesktop.portal.Desktop", // target for the method call
+ "/org/freedesktop/portal/desktop", // object to call on
+ "org.freedesktop.portal.ScreenCast", // interface to call on
+ method_name); // method name
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_message_new_method_call failed\n");
+ return false;
+ }
+
+ DBusMessageIter it;
+ dbus_message_iter_init_append(msg, &it);
+
+ if(session_handle) {
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_OBJECT_PATH, &session_handle)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add session_handle\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ }
+
+ if(parent_window) {
+ if(!dbus_message_iter_append_basic(&it, DBUS_TYPE_STRING, &parent_window)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add parent_window\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ }
+
+ if(!dbus_add_dict(&it, entries, num_entries)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed to add dict\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ DBusPendingCall *pending = NULL;
+ if(!dbus_connection_send_with_reply(self->con, msg, &pending, -1) || !pending) { // -1 is default timeout
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_connection_send_with_reply failed\n");
+ dbus_message_unref(msg);
+ return false;
+ }
+ dbus_connection_flush(self->con);
+
+ //fprintf(stderr, "Request Sent\n");
+
+ dbus_message_unref(msg);
+ msg = NULL;
+
+ dbus_pending_call_block(pending);
+
+ msg = dbus_pending_call_steal_reply(pending);
+ if(!msg) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: dbus_pending_call_steal_reply failed\n");
+ dbus_pending_call_unref(pending);
+ dbus_message_unref(msg);
+ return false;
+ }
+
+ dbus_pending_call_unref(pending);
+ pending = NULL;
+
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is missing arguments\n");
+ dbus_message_unref(msg);
+ return false;
+ } else if (DBUS_TYPE_OBJECT_PATH == dbus_message_iter_get_arg_type(&resp_args)) {
+ const char *res = NULL;
+ dbus_message_iter_get_basic(&resp_args, &res);
+ } else if(DBUS_TYPE_UNIX_FD == dbus_message_iter_get_arg_type(&resp_args)) {
+ int fd = -1;
+ dbus_message_iter_get_basic(&resp_args, &fd);
+
+ if(resp_fd)
+ *resp_fd = fd;
+ } else if(DBUS_TYPE_STRING == dbus_message_iter_get_arg_type(&resp_args)) {
+ char *err = NULL;
+ dbus_message_iter_get_basic(&resp_args, &err);
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: failed with error: %s\n", err);
+
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ } else {
+ fprintf(stderr, "gsr error: gsr_dbus_call_screencast_method: response message is not an object path or unix fd\n");
+ dbus_message_unref(msg);
+ return false;
+ // TODO: Check dbus_error_is_set?
+ }
+
+ dbus_message_unref(msg);
+ if(!response_msg)
+ return true;
+
+ /* TODO: Add timeout, but take into consideration user interactive signals (such as selecting a monitor to capture for ScreenCast) */
+ for (;;) {
+ const int timeout_milliseconds = 10;
+ dbus_connection_read_write(self->con, timeout_milliseconds);
+ *response_msg = dbus_connection_pop_message(self->con);
+
+ if(!*response_msg)
+ continue;
+
+ if(!dbus_message_is_signal(*response_msg, "org.freedesktop.portal.Request", "Response")) {
+ dbus_message_unref(*response_msg);
+ *response_msg = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return true;
+}
+
+static int gsr_dbus_get_response_status(DBusMessageIter *resp_args) {
+ if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_UINT32) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_response_status: missing uint32 in response\n");
+ return -1;
+ }
+
+ dbus_uint32_t response_status = 0;
+ dbus_message_iter_get_basic(resp_args, &response_status);
+
+ dbus_message_iter_next(resp_args);
+ return (int)response_status;
+}
+
+static dict_entry* find_dict_entry_by_key(dict_entry *entries, int num_entries, const char *key) {
+ for(int i = 0; i < num_entries; ++i) {
+ if(strcmp(entries[i].key, key) == 0)
+ return &entries[i];
+ }
+ return NULL;
+}
+
+static bool gsr_dbus_get_variant_value(DBusMessageIter *iter, dict_entry *entry) {
+ if(dbus_message_iter_get_arg_type(iter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: value is not a variant\n");
+ return false;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(iter, &variant_iter);
+
+ switch(dbus_message_iter_get_arg_type(&variant_iter)) {
+ case DBUS_TYPE_STRING: {
+ if(entry->value_type != DICT_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a string\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ const char *value = NULL;
+ dbus_message_iter_get_basic(&variant_iter, &value);
+
+ if(!value) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to get entry value as value\n");
+ return false;
+ }
+
+ if(entry->str) {
+ free(entry->str);
+ entry->str = NULL;
+ }
+
+ entry->str = strdup(value);
+ if(!entry->str) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: failed to copy value\n");
+ return false;
+ }
+ return true;
+ }
+ case DBUS_TYPE_UINT32: {
+ if(entry->value_type != DICT_TYPE_UINT32) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was an uint32\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ dbus_message_iter_get_basic(&variant_iter, &entry->u32);
+ return true;
+ }
+ case DBUS_TYPE_BOOLEAN: {
+ if(entry->value_type != DICT_TYPE_BOOL) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: expected entry value to be a(n) %s was a boolean\n", dict_value_type_to_string(entry->value_type));
+ return false;
+ }
+
+ dbus_message_iter_get_basic(&variant_iter, &entry->boolean);
+ return true;
+ }
+ }
+
+ fprintf(stderr, "gsr error: gsr_dbus_get_variant_value: got unexpected type, expected string, uint32 or boolean\n");
+ return false;
+}
+
+/*
+ Parses a{sv} into matching key entries in |entries|.
+ If the entry value is a string then it's allocated with malloc and is null-terminated
+ and has to be free by the caller.
+ The entry values should be 0 before this method is called.
+ The entries are free'd if this function fails.
+*/
+static bool gsr_dbus_get_map(DBusMessageIter *resp_args, dict_entry *entries, int num_entries) {
+ if(dbus_message_iter_get_arg_type(resp_args) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: missing array in response\n");
+ return false;
+ }
+
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(resp_args, &subiter);
+
+ while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
+ DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ const char *key = NULL;
+ dict_entry *entry = NULL;
+
+ // fprintf(stderr, " array element type: %c, %s\n",
+ // dbus_message_iter_get_arg_type(&subiter),
+ // dbus_message_iter_get_signature(&subiter));
+ if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: array value is not an entry\n");
+ return false;
+ }
+
+ dbus_message_iter_recurse(&subiter, &dictiter);
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: entry key is not a string\n");
+ goto error;
+ }
+
+ dbus_message_iter_get_basic(&dictiter, &key);
+ if(!key) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: failed to get entry key as value\n");
+ goto error;
+ }
+
+ entry = find_dict_entry_by_key(entries, num_entries, key);
+ if(!entry) {
+ dbus_message_iter_next(&subiter);
+ continue;
+ }
+
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_get_map: missing entry value\n");
+ goto error;
+ }
+
+ if(!gsr_dbus_get_variant_value(&dictiter, entry))
+ goto error;
+
+ dbus_message_iter_next(&subiter);
+ }
+
+ return true;
+
+ error:
+ for(int i = 0; i < num_entries; ++i) {
+ if(entries[i].value_type == DICT_TYPE_STRING) {
+ free(entries[i].str);
+ entries[i].str = NULL;
+ }
+ }
+ return false;
+}
+
+int gsr_dbus_screencast_create_session(gsr_dbus *self, char **session_handle) {
+ assert(session_handle);
+ *session_handle = NULL;
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ char session_handle_token[64];
+ gsr_dbus_portal_get_unique_session_token(self, session_handle_token, sizeof(session_handle_token));
+
+ dict_entry args[2];
+ args[0].key = "handle_token";
+ args[0].value_type = DICT_TYPE_STRING;
+ args[0].str = handle_token;
+
+ args[1].key = "session_handle_token";
+ args[1].value_type = DICT_TYPE_STRING;
+ args[1].str = session_handle_token;
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "CreateSession", NULL, NULL, args, 2, NULL, &response_msg)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: failed to setup ScreenCast session. Make sure you have a desktop portal running with support for the ScreenCast interface and that the desktop portal matches the Wayland compositor you are running.\n");
+ return -1;
+ }
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ // DBUS_TYPE_ARRAY value?
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ dict_entry entries[1];
+ entries[0].key = "session_handle";
+ entries[0].str = NULL;
+ entries[0].value_type = DICT_TYPE_STRING;
+ if(!gsr_dbus_get_map(&resp_args, entries, 1)) {
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ if(!entries[0].str) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing \"session_handle\" in response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ *session_handle = entries[0].str;
+ //fprintf(stderr, "session handle: |%s|\n", entries[0].str);
+ //free(entries[0].str);
+
+ dbus_message_unref(response_msg);
+ return 0;
+}
+
+int gsr_dbus_screencast_select_sources(gsr_dbus *self, const char *session_handle, gsr_portal_capture_type capture_type, gsr_portal_cursor_mode cursor_mode) {
+ assert(session_handle);
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ int num_arg_dict = 4;
+ dict_entry args[6];
+ args[0].key = "types";
+ args[0].value_type = DICT_TYPE_UINT32;
+ args[0].u32 = capture_type;
+
+ args[1].key = "multiple";
+ args[1].value_type = DICT_TYPE_BOOL;
+ args[1].boolean = false; /* TODO: Wayland ignores this and still gives the option to select multiple sources. Support that case.. */
+
+ args[2].key = "handle_token";
+ args[2].value_type = DICT_TYPE_STRING;
+ args[2].str = handle_token;
+
+ args[3].key = "cursor_mode";
+ args[3].value_type = DICT_TYPE_UINT32;
+ args[3].u32 = cursor_mode;
+
+ const int screencast_server_version = gsr_dbus_get_screencast_version_cached(self);
+ if(screencast_server_version >= 4) {
+ num_arg_dict = 5;
+ args[4].key = "persist_mode";
+ args[4].value_type = DICT_TYPE_UINT32;
+ args[4].u32 = 2; /* persist until explicitly revoked */
+
+ if(self->screencast_restore_token && self->screencast_restore_token[0]) {
+ num_arg_dict = 6;
+
+ args[5].key = "restore_token";
+ args[5].value_type = DICT_TYPE_STRING;
+ args[5].str = self->screencast_restore_token;
+ }
+ } else if(self->screencast_restore_token && self->screencast_restore_token[0]) {
+ fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: tried to use restore token but this option is only available in screencast version >= 4, your wayland compositors screencast version is %d\n", screencast_server_version);
+ }
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg)) {
+ if(num_arg_dict == 6) {
+ /* We dont know what the error exactly is but assume it may be because of invalid restore token. In that case try without restore token */
+ fprintf(stderr, "gsr warning: gsr_dbus_screencast_select_sources: SelectSources failed, retrying without restore_token\n");
+ num_arg_dict = 5;
+ if(!gsr_dbus_call_screencast_method(self, "SelectSources", session_handle, NULL, args, num_arg_dict, NULL, &response_msg))
+ return -1;
+ } else {
+ return -1;
+ }
+ }
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_create_session: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ dbus_message_unref(response_msg);
+ return 0;
+}
+
+static dbus_uint32_t screencast_stream_get_pipewire_node(DBusMessageIter *iter) {
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(iter, &subiter);
+
+ if(dbus_message_iter_get_arg_type(&subiter) == DBUS_TYPE_STRUCT) {
+ DBusMessageIter structiter;
+ dbus_message_iter_recurse(&subiter, &structiter);
+
+ if(dbus_message_iter_get_arg_type(&structiter) == DBUS_TYPE_UINT32) {
+ dbus_uint32_t data = 0;
+ dbus_message_iter_get_basic(&structiter, &data);
+ return data;
+ }
+ }
+
+ return 0;
+}
+
+int gsr_dbus_screencast_start(gsr_dbus *self, const char *session_handle, uint32_t *pipewire_node) {
+ assert(session_handle);
+ *pipewire_node = 0;
+
+ char handle_token[64];
+ gsr_dbus_portal_get_unique_handle_token(self, handle_token, sizeof(handle_token));
+
+ dict_entry args[1];
+ args[0].key = "handle_token";
+ args[0].value_type = DICT_TYPE_STRING;
+ args[0].str = handle_token;
+
+ DBusMessage *response_msg = NULL;
+ if(!gsr_dbus_call_screencast_method(self, "Start", session_handle, "", args, 1, NULL, &response_msg))
+ return -1;
+
+ // TODO: Verify signal path matches |res|, maybe check the below
+ //fprintf(stderr, "signature: %s, sender: %s\n", dbus_message_get_signature(msg), dbus_message_get_sender(msg));
+ DBusMessageIter resp_args;
+ if(!dbus_message_iter_init(response_msg, &resp_args)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ const int response_status = gsr_dbus_get_response_status(&resp_args);
+ if(response_status != 0) {
+ dbus_message_unref(response_msg);
+ return response_status;
+ }
+
+ if(dbus_message_iter_get_arg_type(&resp_args) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing array in response\n");
+ dbus_message_unref(response_msg);
+ return -1;
+ }
+
+ DBusMessageIter subiter;
+ dbus_message_iter_recurse(&resp_args, &subiter);
+
+ while(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_INVALID) {
+ DBusMessageIter dictiter = DBUS_MESSAGE_ITER_INIT_CLOSED;
+ const char *key = NULL;
+
+ // fprintf(stderr, " array element type: %c, %s\n",
+ // dbus_message_iter_get_arg_type(&subiter),
+ // dbus_message_iter_get_signature(&subiter));
+ if(dbus_message_iter_get_arg_type(&subiter) != DBUS_TYPE_DICT_ENTRY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: array value is not an entry\n");
+ goto error;
+ }
+
+ dbus_message_iter_recurse(&subiter, &dictiter);
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: entry key is not a string\n");
+ goto error;
+ }
+
+ dbus_message_iter_get_basic(&dictiter, &key);
+ if(!key) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: failed to get entry key as value\n");
+ goto error;
+ }
+
+ if(strcmp(key, "restore_token") == 0) {
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing restore_token value\n");
+ goto error;
+ }
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a variant\n");
+ goto error;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&dictiter, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_STRING) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: restore_token is not a string\n");
+ goto error;
+ }
+
+ char *restore_token_str = NULL;
+ dbus_message_iter_get_basic(&variant_iter, &restore_token_str);
+
+ if(restore_token_str) {
+ if(self->screencast_restore_token) {
+ free(self->screencast_restore_token);
+ self->screencast_restore_token = NULL;
+ }
+ self->screencast_restore_token = strdup(restore_token_str);
+ //fprintf(stderr, "got restore token: %s\n", self->screencast_restore_token);
+ }
+ } else if(strcmp(key, "streams") == 0) {
+ if(!dbus_message_iter_next(&dictiter)) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: missing streams value\n");
+ goto error;
+ }
+
+ if(dbus_message_iter_get_arg_type(&dictiter) != DBUS_TYPE_VARIANT) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not a variant\n");
+ goto error;
+ }
+
+ DBusMessageIter variant_iter;
+ dbus_message_iter_recurse(&dictiter, &variant_iter);
+
+ if(dbus_message_iter_get_arg_type(&variant_iter) != DBUS_TYPE_ARRAY) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: streams value is not an array\n");
+ goto error;
+ }
+
+ int num_streams = dbus_message_iter_get_element_count(&variant_iter);
+ //fprintf(stderr, "num streams: %d\n", num_streams);
+ /* Skip over all streams except the last one, since kde can return multiple streams even if only 1 is requested. The last one is the valid one */
+ for(int i = 0; i < num_streams - 1; ++i) {
+ screencast_stream_get_pipewire_node(&variant_iter);
+ }
+
+ if(num_streams > 0) {
+ *pipewire_node = screencast_stream_get_pipewire_node(&variant_iter);
+ //fprintf(stderr, "pipewire node: %u\n", *pipewire_node);
+ }
+ }
+
+ dbus_message_iter_next(&subiter);
+ }
+
+ if(*pipewire_node == 0) {
+ fprintf(stderr, "gsr error: gsr_dbus_screencast_start: no pipewire node returned\n");
+ goto error;
+ }
+
+ dbus_message_unref(response_msg);
+ return 0;
+
+ error:
+ dbus_message_unref(response_msg);
+ return -1;
+}
+
+bool gsr_dbus_screencast_open_pipewire_remote(gsr_dbus *self, const char *session_handle, int *pipewire_fd) {
+ assert(session_handle);
+ *pipewire_fd = -1;
+ return gsr_dbus_call_screencast_method(self, "OpenPipeWireRemote", session_handle, NULL, NULL, 0, pipewire_fd, NULL);
+}
+
+const char* gsr_dbus_screencast_get_restore_token(gsr_dbus *self) {
+ return self->screencast_restore_token;
+}
diff --git a/src/egl.c b/src/egl.c
index ec9ad07..87c2b84 100644
--- a/src/egl.c
+++ b/src/egl.c
@@ -1,18 +1,19 @@
#include "../include/egl.h"
#include "../include/library_loader.h"
#include "../include/utils.h"
+
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <assert.h>
+#include <unistd.h>
+#include <sys/capability.h>
#include <wayland-client.h>
#include <wayland-egl.h>
-#include <unistd.h>
-#include <sys/capability.h>
-// TODO: rename gsr_egl to something else since this includes both egl and eglx and in the future maybe vulkan too
+// TODO: rename gsr_egl to something else since this includes both egl and glx and in the future maybe vulkan too
// TODO: Move this shit to a separate wayland file, and have a separate file for x11.
@@ -93,7 +94,7 @@ static void registry_add_object(void *data, struct wl_registry *registry, uint32
}
if(egl->wayland.num_outputs == GSR_MAX_OUTPUTS) {
- fprintf(stderr, "gsr warning: reached maximum outputs (32), ignoring output %u\n", name);
+ fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %u\n", GSR_MAX_OUTPUTS, name);
return;
}
@@ -134,21 +135,42 @@ static void reset_cap_nice(void) {
cap_free(caps);
}
-#define GLX_DRAWABLE_TYPE 0x8010
-#define GLX_RENDER_TYPE 0x8011
-#define GLX_RGBA_BIT 0x00000001
-#define GLX_WINDOW_BIT 0x00000001
-#define GLX_PIXMAP_BIT 0x00000002
+static void store_x11_monitor(const gsr_monitor *monitor, void *userdata) {
+ gsr_egl *egl = userdata;
+ if(egl->x11.num_outputs == GSR_MAX_OUTPUTS) {
+ fprintf(stderr, "gsr warning: reached maximum outputs (%d), ignoring output %s\n", GSR_MAX_OUTPUTS, monitor->name);
+ return;
+ }
+
+ char *monitor_name = strdup(monitor->name);
+ if(!monitor_name)
+ return;
+
+ const int index = egl->x11.num_outputs;
+ egl->x11.outputs[index].name = monitor_name;
+ egl->x11.outputs[index].pos = monitor->pos;
+ egl->x11.outputs[index].size = monitor->size;
+ egl->x11.outputs[index].connector_id = monitor->connector_id;
+ egl->x11.outputs[index].rotation = monitor->rotation;
+ egl->x11.outputs[index].monitor_identifier = monitor->monitor_identifier;
+ ++egl->x11.num_outputs;
+}
+
+#define GLX_DRAWABLE_TYPE 0x8010
+#define GLX_RENDER_TYPE 0x8011
+#define GLX_RGBA_BIT 0x00000001
+#define GLX_WINDOW_BIT 0x00000001
+#define GLX_PIXMAP_BIT 0x00000002
#define GLX_BIND_TO_TEXTURE_RGBA_EXT 0x20D1
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
-#define GLX_DOUBLEBUFFER 5
-#define GLX_RED_SIZE 8
-#define GLX_GREEN_SIZE 9
-#define GLX_BLUE_SIZE 10
-#define GLX_ALPHA_SIZE 11
-#define GLX_DEPTH_SIZE 12
-#define GLX_RGBA_TYPE 0x8014
+#define GLX_DOUBLEBUFFER 5
+#define GLX_RED_SIZE 8
+#define GLX_GREEN_SIZE 9
+#define GLX_BLUE_SIZE 10
+#define GLX_ALPHA_SIZE 11
+#define GLX_DEPTH_SIZE 12
+#define GLX_RGBA_TYPE 0x8014
#define GLX_CONTEXT_PRIORITY_LEVEL_EXT 0x3100
#define GLX_CONTEXT_PRIORITY_HIGH_EXT 0x3101
@@ -185,6 +207,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
EGLConfig ecfg;
int32_t num_config = 0;
+ // TODO: Use EGL_OPENGL_ES_BIT as amd requires that for external texture, but that breaks software encoding
const int32_t attr[] = {
EGL_BUFFER_SIZE, 24,
EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
@@ -226,6 +249,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
}
}
+ // TODO: Use EGL_OPENGL_ES_API as amd requires that for external texture, but that breaks software encoding
self->eglBindAPI(EGL_OPENGL_API);
self->egl_display = self->eglGetDisplay(self->wayland.dpy ? (EGLNativeDisplayType)self->wayland.dpy : (EGLNativeDisplayType)self->x11.dpy);
@@ -238,12 +262,12 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: eglInitialize failed\n");
goto fail;
}
-
+
if(!self->eglChooseConfig(self->egl_display, attr, &ecfg, 1, &num_config) || num_config != 1) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to find a matching config\n");
goto fail;
}
-
+
self->egl_context = self->eglCreateContext(self->egl_display, ecfg, NULL, ctxattr);
if(!self->egl_context) {
fprintf(stderr, "gsr error: gsr_egl_create_window failed: failed to create egl context\n");
@@ -251,6 +275,7 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
}
if(wayland) {
+ // TODO: Error check?
self->wayland.surface = wl_compositor_create_surface(self->wayland.compositor);
self->wayland.window = wl_egl_window_create(self->wayland.surface, 16, 16);
self->egl_surface = self->eglCreateWindowSurface(self->egl_display, ecfg, (EGLNativeWindowType)self->wayland.window, NULL);
@@ -268,6 +293,11 @@ static bool gsr_egl_create_window(gsr_egl *self, bool wayland) {
goto fail;
}
+ if(!wayland) {
+ self->x11.num_outputs = 0;
+ for_each_active_monitor_output_x11_not_cached(self->x11.dpy, store_x11_monitor, self);
+ }
+
reset_cap_nice();
return true;
@@ -363,6 +393,17 @@ static bool gsr_egl_proc_load_egl(gsr_egl *self) {
self->glEGLImageTargetTexture2DOES = (FUNC_glEGLImageTargetTexture2DOES)self->eglGetProcAddress("glEGLImageTargetTexture2DOES");
self->eglQueryDisplayAttribEXT = (FUNC_eglQueryDisplayAttribEXT)self->eglGetProcAddress("eglQueryDisplayAttribEXT");
self->eglQueryDeviceStringEXT = (FUNC_eglQueryDeviceStringEXT)self->eglGetProcAddress("eglQueryDeviceStringEXT");
+ self->eglQueryDmaBufModifiersEXT = (FUNC_eglQueryDmaBufModifiersEXT)self->eglGetProcAddress("eglQueryDmaBufModifiersEXT");
+
+ if(!self->eglExportDMABUFImageQueryMESA) {
+ fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageQueryMESA\n");
+ return false;
+ }
+
+ if(!self->eglExportDMABUFImageMESA) {
+ fprintf(stderr, "gsr error: gsr_egl_load failed: could not find eglExportDMABUFImageMESA\n");
+ return false;
+ }
if(!self->glEGLImageTargetTexture2DOES) {
fprintf(stderr, "gsr error: gsr_egl_load failed: could not find glEGLImageTargetTexture2DOES\n");
@@ -417,9 +458,7 @@ static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
{ (void**)&self->glTexParameteriv, "glTexParameteriv" },
{ (void**)&self->glGetTexLevelParameteriv, "glGetTexLevelParameteriv" },
{ (void**)&self->glTexImage2D, "glTexImage2D" },
- { (void**)&self->glCopyImageSubData, "glCopyImageSubData" },
{ (void**)&self->glGetTexImage, "glGetTexImage" },
- { (void**)&self->glClearTexImage, "glClearTexImage" },
{ (void**)&self->glGenFramebuffers, "glGenFramebuffers" },
{ (void**)&self->glBindFramebuffer, "glBindFramebuffer" },
{ (void**)&self->glDeleteFramebuffers, "glDeleteFramebuffers" },
@@ -460,6 +499,9 @@ static bool gsr_egl_load_gl(gsr_egl *self, void *library) {
{ (void**)&self->glUniform2f, "glUniform2f" },
{ (void**)&self->glDebugMessageCallback, "glDebugMessageCallback" },
{ (void**)&self->glScissor, "glScissor" },
+ { (void**)&self->glReadPixels, "glReadPixels" },
+ { (void**)&self->glMapBuffer, "glMapBuffer" },
+ { (void**)&self->glUnmapBuffer, "glUnmapBuffer" },
{ NULL, NULL }
};
@@ -503,10 +545,6 @@ bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland, bool is_monitor_cap
}
self->glx_library = dlopen("libGLX.so.0", RTLD_LAZY);
- if(!self->glx_library) {
- fprintf(stderr, "gsr error: gsr_egl_load: failed to load libGLX.so.0, error: %s\n", dlerror());
- goto fail;
- }
self->gl_library = dlopen("libGL.so.1", RTLD_LAZY);
if(!self->egl_library) {
@@ -517,7 +555,8 @@ bool gsr_egl_load(gsr_egl *self, Display *dpy, bool wayland, bool is_monitor_cap
if(!gsr_egl_load_egl(self, self->egl_library))
goto fail;
- if(!gsr_egl_load_glx(self, self->glx_library))
+ /* In some distros (alpine for example libGLX doesn't exist, but libGL can be used instead) */
+ if(!gsr_egl_load_glx(self, self->glx_library ? self->glx_library : self->gl_library))
goto fail;
if(!gsr_egl_load_gl(self, self->gl_library))
@@ -588,6 +627,14 @@ void gsr_egl_unload(gsr_egl *self) {
self->x11.window = None;
}
+ for(int i = 0; i < self->x11.num_outputs; ++i) {
+ if(self->x11.outputs[i].name) {
+ free(self->x11.outputs[i].name);
+ self->x11.outputs[i].name = NULL;
+ }
+ }
+ self->x11.num_outputs = 0;
+
if(self->wayland.window) {
wl_egl_window_destroy(self->wayland.window);
self->wayland.window = NULL;
@@ -644,10 +691,47 @@ void gsr_egl_unload(gsr_egl *self) {
memset(self, 0, sizeof(gsr_egl));
}
-void gsr_egl_update(gsr_egl *self) {
- if(!self->wayland.dpy)
- return;
+bool gsr_egl_process_event(gsr_egl *self) {
+ switch(gsr_egl_get_display_server(self)) {
+ case GSR_DISPLAY_SERVER_X11: {
+ if(XPending(self->x11.dpy)) {
+ XNextEvent(self->x11.dpy, &self->x11.xev);
+ return true;
+ }
+ return false;
+ }
+ case GSR_DISPLAY_SERVER_WAYLAND: {
+ // TODO: pselect on wl_display_get_fd before doing dispatch
+ const bool events_available = wl_display_dispatch_pending(self->wayland.dpy) > 0;
+ wl_display_flush(self->wayland.dpy);
+ return events_available;
+ }
+ }
+ return false;
+}
+
+void gsr_egl_swap_buffers(gsr_egl *self) {
+ /* This uses less cpu than swap buffer on nvidia */
+ // TODO: Do these and remove swap
+ //self->glFlush();
+ //self->glFinish();
+ if(self->egl_display) {
+ self->eglSwapBuffers(self->egl_display, self->egl_surface);
+ } else if(self->x11.window) {
+ self->glXSwapBuffers(self->x11.dpy, self->x11.window);
+ }
+}
- // TODO: pselect on wl_display_get_fd before doing dispatch
- wl_display_dispatch(self->wayland.dpy);
+gsr_display_server gsr_egl_get_display_server(const gsr_egl *self) {
+ if(self->wayland.dpy)
+ return GSR_DISPLAY_SERVER_WAYLAND;
+ else
+ return GSR_DISPLAY_SERVER_X11;
+}
+
+XEvent* gsr_egl_get_event_data(gsr_egl *self) {
+ if(gsr_egl_get_display_server(self) == GSR_DISPLAY_SERVER_X11)
+ return &self->x11.xev;
+ else
+ return NULL;
}
diff --git a/src/encoder/video/cuda.c b/src/encoder/video/cuda.c
index 2568bc7..6d26cdd 100644
--- a/src/encoder/video/cuda.c
+++ b/src/encoder/video/cuda.c
@@ -12,6 +12,8 @@ typedef struct {
unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
+
gsr_cuda cuda;
CUgraphicsResource cuda_graphics_resources[2];
CUarray mapped_arrays[2];
@@ -19,47 +21,46 @@ typedef struct {
} gsr_video_encoder_cuda;
static bool gsr_video_encoder_cuda_setup_context(gsr_video_encoder_cuda *self, AVCodecContext *video_codec_context) {
- AVBufferRef *device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
- if(!device_ctx) {
+ self->device_ctx = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_CUDA);
+ if(!self->device_ctx) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hardware device context\n");
return false;
}
- AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)device_ctx->data;
+ AVHWDeviceContext *hw_device_context = (AVHWDeviceContext*)self->device_ctx->data;
AVCUDADeviceContext *cuda_device_context = (AVCUDADeviceContext*)hw_device_context->hwctx;
cuda_device_context->cuda_ctx = self->cuda.cu_ctx;
- if(av_hwdevice_ctx_init(device_ctx) < 0) {
+ if(av_hwdevice_ctx_init(self->device_ctx) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hardware device context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = self->params.hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_context failed: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
self->cuda_stream = cuda_device_context->stream;
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
return true;
}
@@ -108,7 +109,7 @@ static bool gsr_video_encoder_cuda_setup_textures(gsr_video_encoder_cuda *self,
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
- self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], !self->params.hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_textures: failed to create opengl texture\n");
return false;
@@ -125,22 +126,22 @@ static bool gsr_video_encoder_cuda_setup_textures(gsr_video_encoder_cuda *self,
static void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_cuda_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
+ gsr_video_encoder_cuda *self = encoder->priv;
- // TODO: Force set overclock to false if wayland
- if(!gsr_cuda_load(&encoder_cuda->cuda, encoder_cuda->params.egl->x11.dpy, encoder_cuda->params.overclock)) {
+ const bool overclock = gsr_egl_get_display_server(self->params.egl) == GSR_DISPLAY_SERVER_X11 ? self->params.overclock : false;
+ if(!gsr_cuda_load(&self->cuda, self->params.egl->x11.dpy, overclock)) {
fprintf(stderr, "gsr error: gsr_video_encoder_cuda_start: failed to load cuda\n");
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_cuda_setup_context(encoder_cuda, video_codec_context)) {
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ if(!gsr_video_encoder_cuda_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_cuda_setup_textures(encoder_cuda, video_codec_context, frame)) {
- gsr_video_encoder_cuda_stop(encoder_cuda, video_codec_context);
+ if(!gsr_video_encoder_cuda_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_cuda_stop(self, video_codec_context);
return false;
}
@@ -152,10 +153,10 @@ void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *v
self->target_textures[0] = 0;
self->target_textures[1] = 0;
- if(video_codec_context->hw_device_ctx)
- av_buffer_unref(&video_codec_context->hw_device_ctx);
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
if(self->cuda.cu_ctx) {
for(int i = 0; i < 2; ++i) {
@@ -170,8 +171,8 @@ void gsr_video_encoder_cuda_stop(gsr_video_encoder_cuda *self, AVCodecContext *v
gsr_cuda_unload(&self->cuda);
}
-static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
+static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_cuda *self = encoder->priv;
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
CUDA_MEMCPY2D memcpy_struct;
@@ -183,26 +184,26 @@ static void gsr_video_encoder_cuda_copy_textures_to_frame(gsr_video_encoder *enc
memcpy_struct.dstY = 0;
memcpy_struct.dstMemoryType = CU_MEMORYTYPE_DEVICE;
- memcpy_struct.srcArray = encoder_cuda->mapped_arrays[i];
+ memcpy_struct.srcArray = self->mapped_arrays[i];
memcpy_struct.srcPitch = frame->width / div[i];
memcpy_struct.dstDevice = (CUdeviceptr)frame->data[i];
memcpy_struct.dstPitch = frame->linesize[i];
- memcpy_struct.WidthInBytes = frame->width * (encoder_cuda->params.hdr ? 2 : 1);
+ memcpy_struct.WidthInBytes = frame->width * (self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? 2 : 1);
memcpy_struct.Height = frame->height / div[i];
// TODO: Remove this copy if possible
- encoder_cuda->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, encoder_cuda->cuda_stream);
+ self->cuda.cuMemcpy2DAsync_v2(&memcpy_struct, self->cuda_stream);
}
// TODO: needed?
- encoder_cuda->cuda.cuStreamSynchronize(encoder_cuda->cuda_stream);
+ self->cuda.cuStreamSynchronize(self->cuda_stream);
}
static void gsr_video_encoder_cuda_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_cuda *encoder_cuda = encoder->priv;
- textures[0] = encoder_cuda->target_textures[0];
- textures[1] = encoder_cuda->target_textures[1];
+ gsr_video_encoder_cuda *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_cuda->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_cuda_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
diff --git a/src/encoder/video/software.c b/src/encoder/video/software.c
index 4666ffd..be227f2 100644
--- a/src/encoder/video/software.c
+++ b/src/encoder/video/software.c
@@ -48,7 +48,7 @@ static bool gsr_video_encoder_software_setup_textures(gsr_video_encoder_software
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
- self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], !self->params.hdr ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
if(self->target_textures[i] == 0) {
fprintf(stderr, "gsr error: gsr_capture_kms_setup_cuda_textures: failed to create opengl texture\n");
return false;
@@ -61,7 +61,7 @@ static bool gsr_video_encoder_software_setup_textures(gsr_video_encoder_software
static void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_software_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
+ gsr_video_encoder_software *self = encoder->priv;
video_codec_context->width = FFALIGN(video_codec_context->width, LINESIZE_ALIGNMENT);
video_codec_context->height = FFALIGN(video_codec_context->height, 2);
@@ -69,8 +69,8 @@ static bool gsr_video_encoder_software_start(gsr_video_encoder *encoder, AVCodec
frame->width = video_codec_context->width;
frame->height = video_codec_context->height;
- if(!gsr_video_encoder_software_setup_textures(encoder_software, video_codec_context, frame)) {
- gsr_video_encoder_software_stop(encoder_software, video_codec_context);
+ if(!gsr_video_encoder_software_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_software_stop(self, video_codec_context);
return false;
}
@@ -84,24 +84,29 @@ void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecCo
self->target_textures[1] = 0;
}
-static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
+static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_software *self = encoder->priv;
// TODO: hdr support
const unsigned int formats[2] = { GL_RED, GL_RG };
for(int i = 0; i < 2; ++i) {
- encoder_software->params.egl->glBindTexture(GL_TEXTURE_2D, encoder_software->target_textures[i]);
- encoder_software->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
+ // We could use glGetTexSubImage and then we wouldn't have to use a specific linesize (LINESIZE_ALIGNMENT) that adds padding,
+ // but glGetTexSubImage is only available starting from opengl 4.5.
+ self->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
}
- encoder_software->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+ self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
// cap_kms->kms.base.egl->eglSwapBuffers(cap_kms->kms.base.egl->egl_display, cap_kms->kms.base.egl->egl_surface);
+
+ self->params.egl->glFlush();
+ self->params.egl->glFinish();
}
static void gsr_video_encoder_software_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_software *encoder_software = encoder->priv;
- textures[0] = encoder_software->target_textures[0];
- textures[1] = encoder_software->target_textures[1];
+ gsr_video_encoder_software *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_software->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_software_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
diff --git a/src/encoder/video/vaapi.c b/src/encoder/video/vaapi.c
index 2df140d..d558785 100644
--- a/src/encoder/video/vaapi.c
+++ b/src/encoder/video/vaapi.c
@@ -4,17 +4,20 @@
#include <libavcodec/avcodec.h>
#include <libavutil/hwcontext_vaapi.h>
+#include <libavutil/intreadwrite.h>
#include <va/va_drmcommon.h>
#include <stdlib.h>
#include <unistd.h>
+#include <fcntl.h>
typedef struct {
gsr_video_encoder_vaapi_params params;
unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
VADisplay va_dpy;
VADRMPRIMESurfaceDescriptor prime;
} gsr_video_encoder_vaapi;
@@ -26,43 +29,40 @@ static bool gsr_video_encoder_vaapi_setup_context(gsr_video_encoder_vaapi *self,
return false;
}
- AVBufferRef *device_ctx;
- if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
+ if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hardware device context\n");
return false;
}
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
if(!frame_context) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
return false;
}
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
hw_frame_context->width = video_codec_context->width;
hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = self->params.hdr ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
//hw_frame_context->initial_pool_size = 20;
- AVVAAPIDeviceContext *vactx =((AVHWDeviceContext*)device_ctx->data)->hwctx;
+ AVVAAPIDeviceContext *vactx = ((AVHWDeviceContext*)self->device_ctx->data)->hwctx;
self->va_dpy = vactx->display;
if (av_hwframe_ctx_init(frame_context) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vaapi_setup_context: failed to initialize hardware frame context "
"(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
+ av_buffer_unref(&self->device_ctx);
//av_buffer_unref(&frame_context);
return false;
}
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
return true;
}
@@ -96,20 +96,22 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
self->params.egl->glGenTextures(2, self->target_textures);
for(int i = 0; i < 2; ++i) {
const int layer = i;
- const int plane = 0;
-
- const uint64_t modifier = self->prime.objects[self->prime.layers[layer].object_index[plane]].drm_format_modifier;
- const intptr_t img_attr[] = {
- EGL_LINUX_DRM_FOURCC_EXT, formats[i],
- EGL_WIDTH, self->prime.width / div[i],
- EGL_HEIGHT, self->prime.height / div[i],
- EGL_DMA_BUF_PLANE0_FD_EXT, self->prime.objects[self->prime.layers[layer].object_index[plane]].fd,
- EGL_DMA_BUF_PLANE0_OFFSET_EXT, self->prime.layers[layer].offset[plane],
- EGL_DMA_BUF_PLANE0_PITCH_EXT, self->prime.layers[layer].pitch[plane],
- EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT, modifier & 0xFFFFFFFFULL,
- EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT, modifier >> 32ULL,
- EGL_NONE
- };
+
+ int fds[4];
+ uint32_t offsets[4];
+ uint32_t pitches[4];
+ uint64_t modifiers[4];
+ for(uint32_t j = 0; j < self->prime.layers[layer].num_planes; ++j) {
+ // TODO: Close these? in _stop, using self->prime
+ fds[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].fd;
+ offsets[j] = self->prime.layers[layer].offset[j];
+ pitches[j] = self->prime.layers[layer].pitch[j];
+ modifiers[j] = self->prime.objects[self->prime.layers[layer].object_index[j]].drm_format_modifier;
+ }
+
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, formats[i], self->prime.width / div[i], self->prime.height / div[i],
+ fds, offsets, pitches, modifiers, self->prime.layers[layer].num_planes, true);
while(self->params.egl->eglGetError() != EGL_SUCCESS){}
EGLImage image = self->params.egl->eglCreateImage(self->params.egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
@@ -149,13 +151,13 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
static void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext *video_codec_context);
static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
- gsr_video_encoder_vaapi *encoder_vaapi = encoder->priv;
+ gsr_video_encoder_vaapi *self = encoder->priv;
- if(encoder_vaapi->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_HEVC) {
// TODO: dont do this if using ffmpeg reports that this is not needed (AMD driver bug that was fixed recently)
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
video_codec_context->height = FFALIGN(video_codec_context->height, 16);
- } else if(encoder_vaapi->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
+ } else if(self->params.egl->gpu_info.vendor == GSR_GPU_VENDOR_AMD && video_codec_context->codec_id == AV_CODEC_ID_AV1) {
// TODO: Dont do this for VCN 5 and forward which should fix this hardware bug
video_codec_context->width = FFALIGN(video_codec_context->width, 64);
// AMD driver has special case handling for 1080 height to set it to 1082 instead of 1088 (1080 aligned to 16).
@@ -167,13 +169,40 @@ static bool gsr_video_encoder_vaapi_start(gsr_video_encoder *encoder, AVCodecCon
}
}
- if(!gsr_video_encoder_vaapi_setup_context(encoder_vaapi, video_codec_context)) {
- gsr_video_encoder_vaapi_stop(encoder_vaapi, video_codec_context);
+ const int crop_top = (video_codec_context->height - frame->height) / 2;
+ const int crop_left = (video_codec_context->width - frame->width) / 2;
+ if(crop_top != 0 || crop_left != 0) {
+ fprintf(stderr, "gsr warning: gsr_video_encoder_vaapi_start: black bars have been added to the video because of a bug in AMD drivers/hardware. Record with h264 codec instead (-k h264) to get around this issue\n");
+#if 0
+ #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(61, 10, 100)
+ const int crop_bottom = crop_top;
+ const int crop_right = crop_left;
+ fprintf(stderr, "gsr info: cropping metadata has been added to the file to try and workaround this issue. Video players that support this will remove the black bars when the video is playing\n");
+ const int frame_cropping_data_size = 4 * sizeof(uint32_t);
+ uint8_t *frame_cropping = av_malloc(frame_cropping_data_size);
+ if(frame_cropping) {
+ AV_WL32(frame_cropping + 0, crop_top);
+ AV_WL32(frame_cropping + 4, crop_bottom);
+ AV_WL32(frame_cropping + 8, crop_left);
+ AV_WL32(frame_cropping + 12, crop_right);
+ const bool sidedata_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_FRAME_CROPPING, frame_cropping, frame_cropping_data_size, 0) != NULL;
+ if(!sidedata_added)
+ av_free(frame_cropping);
+ }
+ #endif
+#endif
+ }
+
+ frame->width = video_codec_context->width;
+ frame->height = video_codec_context->height;
+
+ if(!gsr_video_encoder_vaapi_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
- if(!gsr_video_encoder_vaapi_setup_textures(encoder_vaapi, video_codec_context, frame)) {
- gsr_video_encoder_vaapi_stop(encoder_vaapi, video_codec_context);
+ if(!gsr_video_encoder_vaapi_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_vaapi_stop(self, video_codec_context);
return false;
}
@@ -185,10 +214,10 @@ void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext
self->target_textures[0] = 0;
self->target_textures[1] = 0;
- if(video_codec_context->hw_device_ctx)
- av_buffer_unref(&video_codec_context->hw_device_ctx);
if(video_codec_context->hw_frames_ctx)
av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
for(uint32_t i = 0; i < self->prime.num_objects; ++i) {
if(self->prime.objects[i].fd > 0) {
@@ -199,11 +228,11 @@ void gsr_video_encoder_vaapi_stop(gsr_video_encoder_vaapi *self, AVCodecContext
}
static void gsr_video_encoder_vaapi_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
- gsr_video_encoder_vaapi *encoder_vaapi = encoder->priv;
- textures[0] = encoder_vaapi->target_textures[0];
- textures[1] = encoder_vaapi->target_textures[1];
+ gsr_video_encoder_vaapi *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
*num_textures = 2;
- *destination_color = encoder_vaapi->params.hdr ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
}
static void gsr_video_encoder_vaapi_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
@@ -227,7 +256,6 @@ gsr_video_encoder* gsr_video_encoder_vaapi_create(const gsr_video_encoder_vaapi_
*encoder = (gsr_video_encoder) {
.start = gsr_video_encoder_vaapi_start,
- .copy_textures_to_frame = NULL,
.get_textures = gsr_video_encoder_vaapi_get_textures,
.destroy = gsr_video_encoder_vaapi_destroy,
.priv = encoder_vaapi
diff --git a/src/encoder/video/video.c b/src/encoder/video/video.c
index 9b0def0..76d53b0 100644
--- a/src/encoder/video/video.c
+++ b/src/encoder/video/video.c
@@ -9,10 +9,10 @@ bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_c
return res;
}
-void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame) {
+void gsr_video_encoder_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
assert(encoder->started);
if(encoder->copy_textures_to_frame)
- encoder->copy_textures_to_frame(encoder, frame);
+ encoder->copy_textures_to_frame(encoder, frame, color_conversion);
}
void gsr_video_encoder_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
diff --git a/src/encoder/video/vulkan.c b/src/encoder/video/vulkan.c
new file mode 100644
index 0000000..0b6c380
--- /dev/null
+++ b/src/encoder/video/vulkan.c
@@ -0,0 +1,313 @@
+#include "../../../include/encoder/video/vulkan.h"
+#include "../../../include/utils.h"
+#include "../../../include/egl.h"
+
+#include <libavcodec/avcodec.h>
+#define VK_NO_PROTOTYPES
+#include <libavutil/hwcontext_vulkan.h>
+
+//#include <vulkan/vulkan_core.h>
+
+#define GL_TEXTURE_TILING_EXT 0x9580
+#define GL_OPTIMAL_TILING_EXT 0x9584
+#define GL_LINEAR_TILING_EXT 0x9585
+
+#define GL_PIXEL_PACK_BUFFER 0x88EB
+#define GL_PIXEL_UNPACK_BUFFER 0x88EC
+#define GL_STREAM_READ 0x88E1
+#define GL_STREAM_DRAW 0x88E0
+#define GL_READ_ONLY 0x88B8
+#define GL_WRITE_ONLY 0x88B9
+#define GL_READ_FRAMEBUFFER 0x8CA8
+
+typedef struct {
+ gsr_video_encoder_vulkan_params params;
+ unsigned int target_textures[2];
+ AVBufferRef *device_ctx;
+ AVVulkanDeviceContext* vv;
+ unsigned int pbo_y[2];
+ unsigned int pbo_uv[2];
+ AVFrame *sw_frame;
+} gsr_video_encoder_vulkan;
+
+static bool gsr_video_encoder_vulkan_setup_context(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
+ AVDictionary *options = NULL;
+ //av_dict_set(&options, "linear_images", "1", 0);
+ //av_dict_set(&options, "disable_multiplane", "1", 0);
+
+ // TODO: Use correct device
+ if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VULKAN, NULL, options, 0) < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hardware device context\n");
+ return false;
+ }
+
+ AVBufferRef *frame_context = av_hwframe_ctx_alloc(self->device_ctx);
+ if(!frame_context) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hwframe context\n");
+ av_buffer_unref(&self->device_ctx);
+ return false;
+ }
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)frame_context->data;
+ hw_frame_context->width = video_codec_context->width;
+ hw_frame_context->height = video_codec_context->height;
+ hw_frame_context->sw_format = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? AV_PIX_FMT_P010LE : AV_PIX_FMT_NV12;
+ hw_frame_context->format = video_codec_context->pix_fmt;
+ hw_frame_context->device_ctx = (AVHWDeviceContext*)self->device_ctx->data;
+
+ //AVVulkanFramesContext *vk_frame_ctx = (AVVulkanFramesContext*)hw_frame_context->hwctx;
+ //hw_frame_context->initial_pool_size = 20;
+
+ if (av_hwframe_ctx_init(frame_context) < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to initialize hardware frame context "
+ "(note: ffmpeg version needs to be > 4.0)\n");
+ av_buffer_unref(&self->device_ctx);
+ //av_buffer_unref(&frame_context);
+ return false;
+ }
+
+ video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
+ av_buffer_unref(&frame_context);
+ return true;
+}
+
+static unsigned int gl_create_texture(gsr_egl *egl, int width, int height, int internal_format, unsigned int format) {
+ unsigned int texture_id = 0;
+ egl->glGenTextures(1, &texture_id);
+ egl->glBindTexture(GL_TEXTURE_2D, texture_id);
+ //egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_TILING_EXT, GL_OPTIMAL_TILING_EXT);
+ egl->glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, GL_UNSIGNED_BYTE, NULL);
+
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+
+ egl->glBindTexture(GL_TEXTURE_2D, 0);
+ return texture_id;
+}
+
+static AVVulkanDeviceContext* video_codec_context_get_vulkan_data(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ if(device_context->type != AV_HWDEVICE_TYPE_VULKAN)
+ return NULL;
+
+ return (AVVulkanDeviceContext*)device_context->hwctx;
+}
+
+static bool gsr_video_encoder_vulkan_setup_textures(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context, AVFrame *frame) {
+ const int res = av_hwframe_get_buffer(video_codec_context->hw_frames_ctx, frame, 0);
+ if(res < 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_textures: av_hwframe_get_buffer failed: %d\n", res);
+ return false;
+ }
+
+ //AVVkFrame *target_surface_id = (AVVkFrame*)frame->data[0];
+ self->vv = video_codec_context_get_vulkan_data(video_codec_context);
+
+ const unsigned int internal_formats_nv12[2] = { GL_RGBA8, GL_RGBA8 };
+ const unsigned int internal_formats_p010[2] = { GL_R16, GL_RG16 };
+ const unsigned int formats[2] = { GL_RED, GL_RG };
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
+
+ for(int i = 0; i < 2; ++i) {
+ self->target_textures[i] = gl_create_texture(self->params.egl, video_codec_context->width / div[i], video_codec_context->height / div[i], self->params.color_depth == GSR_COLOR_DEPTH_8_BITS ? internal_formats_nv12[i] : internal_formats_p010[i], formats[i]);
+ if(self->target_textures[i] == 0) {
+ fprintf(stderr, "gsr error: gsr_video_encoder_cuda_setup_textures: failed to create opengl texture\n");
+ return false;
+ }
+ }
+
+ self->params.egl->glGenBuffers(2, self->pbo_y);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[0]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[1]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ self->params.egl->glGenBuffers(2, self->pbo_uv);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[0]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[1]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+ self->sw_frame = av_frame_alloc();
+ self->sw_frame->format = AV_PIX_FMT_NV12;
+ self->sw_frame->width = frame->width;
+ self->sw_frame->height = frame->height;
+
+ // TODO: Remove
+ if(av_frame_get_buffer(self->sw_frame, 0) < 0) {
+ fprintf(stderr, "failed to allocate sw frame\n");
+ }
+
+ // TODO: Remove
+ if(av_frame_make_writable(self->sw_frame) < 0) {
+ fprintf(stderr, "failed to make writable\n");
+ }
+ return true;
+}
+
+static void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context);
+
+static bool gsr_video_encoder_vulkan_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+
+ if(!gsr_video_encoder_vulkan_setup_context(self, video_codec_context)) {
+ gsr_video_encoder_vulkan_stop(self, video_codec_context);
+ return false;
+ }
+
+ if(!gsr_video_encoder_vulkan_setup_textures(self, video_codec_context, frame)) {
+ gsr_video_encoder_vulkan_stop(self, video_codec_context);
+ return false;
+ }
+
+ return true;
+}
+
+void gsr_video_encoder_vulkan_stop(gsr_video_encoder_vulkan *self, AVCodecContext *video_codec_context) {
+ self->params.egl->glDeleteTextures(2, self->target_textures);
+ self->target_textures[0] = 0;
+ self->target_textures[1] = 0;
+
+ if(video_codec_context->hw_frames_ctx)
+ av_buffer_unref(&video_codec_context->hw_frames_ctx);
+ if(self->device_ctx)
+ av_buffer_unref(&self->device_ctx);
+}
+
+static void nop_free(void *opaque, uint8_t *data) {
+
+}
+
+static void gsr_video_encoder_vulkan_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+
+ static int counter = 0;
+ ++counter;
+
+ // AVBufferRef *av_buffer_create(uint8_t *data, size_t size,
+ // void (*free)(void *opaque, uint8_t *data),
+ // void *opaque, int flags);
+
+ while(self->params.egl->glGetError()){}
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, color_conversion->framebuffers[0]);
+ //fprintf(stderr, "1 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[counter % 2]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+ self->params.egl->glReadPixels(0, 0, frame->width, frame->height, GL_RED, GL_UNSIGNED_BYTE, 0);
+ //fprintf(stderr, "2 gl err: %d\n", self->params.egl->glGetError());
+
+ const int next_pbo_y = (counter + 1) % 2;
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_y]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, frame->width * frame->height, 0, GL_STREAM_READ);
+ //fprintf(stderr, "3 gl err: %d\n", self->params.egl->glGetError());
+ uint8_t *ptr_y = (uint8_t*)self->params.egl->glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
+ //fprintf(stderr, "4 gl err: %d\n", self->params.egl->glGetError());
+ if(!ptr_y) {
+ fprintf(stderr, "failed to map buffer y!\n");
+ }
+
+ while(self->params.egl->glGetError()){}
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, color_conversion->framebuffers[1]);
+ //fprintf(stderr, "5 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[counter % 2]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+ //fprintf(stderr, "5.5 gl err: %d\n", self->params.egl->glGetError());
+ self->params.egl->glReadPixels(0, 0, frame->width/2, frame->height/2, GL_RG, GL_UNSIGNED_BYTE, 0);
+ //fprintf(stderr, "6 gl err: %d\n", self->params.egl->glGetError());
+
+ const int next_pbo_uv = (counter + 1) % 2;
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_uv[next_pbo_uv]);
+ self->params.egl->glBufferData(GL_PIXEL_PACK_BUFFER, (frame->width/2 * frame->height/2) * 2, 0, GL_STREAM_READ);
+ //fprintf(stderr, "7 gl err: %d\n", self->params.egl->glGetError());
+ uint8_t *ptr_uv = (uint8_t*)self->params.egl->glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
+ //fprintf(stderr, "8 gl err: %d\n", self->params.egl->glGetError());
+ if(!ptr_uv) {
+ fprintf(stderr, "failed to map buffer uv!\n");
+ }
+
+ //self->sw_frame->buf[0] = av_buffer_create(ptr_y, 3840 * 2160, nop_free, NULL, 0);
+ //self->sw_frame->buf[1] = av_buffer_create(ptr_uv, 1920 * 1080 * 2, nop_free, NULL, 0);
+ //self->sw_frame->data[0] = self->sw_frame->buf[0]->data;
+ //self->sw_frame->data[1] = self->sw_frame->buf[1]->data;
+ //self->sw_frame->extended_data[0] = self->sw_frame->data[0];
+ //self->sw_frame->extended_data[1] = self->sw_frame->data[1];
+
+ self->sw_frame->data[0] = ptr_y;
+ self->sw_frame->data[1] = ptr_uv;
+
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+ self->params.egl->glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+
+ //self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[1]);
+ //self->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, sw_frame->data[1]);
+
+ //self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ int ret = av_hwframe_transfer_data(frame, self->sw_frame, 0);
+ if(ret < 0) {
+ fprintf(stderr, "transfer data failed, error: %s\n", av_err2str(ret));
+ }
+
+ //av_buffer_unref(&self->sw_frame->buf[0]);
+ //av_buffer_unref(&self->sw_frame->buf[1]);
+
+ //av_frame_free(&sw_frame);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_y]);
+ self->params.egl->glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, self->pbo_y[next_pbo_uv]);
+ self->params.egl->glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+ self->params.egl->glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+}
+
+static void gsr_video_encoder_vulkan_get_textures(gsr_video_encoder *encoder, unsigned int *textures, int *num_textures, gsr_destination_color *destination_color) {
+ gsr_video_encoder_vulkan *self = encoder->priv;
+ textures[0] = self->target_textures[0];
+ textures[1] = self->target_textures[1];
+ *num_textures = 2;
+ *destination_color = self->params.color_depth == GSR_COLOR_DEPTH_10_BITS ? GSR_DESTINATION_COLOR_P010 : GSR_DESTINATION_COLOR_NV12;
+}
+
+static void gsr_video_encoder_vulkan_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
+ gsr_video_encoder_vulkan_stop(encoder->priv, video_codec_context);
+ free(encoder->priv);
+ free(encoder);
+}
+
+gsr_video_encoder* gsr_video_encoder_vulkan_create(const gsr_video_encoder_vulkan_params *params) {
+ gsr_video_encoder *encoder = calloc(1, sizeof(gsr_video_encoder));
+ if(!encoder)
+ return NULL;
+
+ gsr_video_encoder_vulkan *encoder_vulkan = calloc(1, sizeof(gsr_video_encoder_vulkan));
+ if(!encoder_vulkan) {
+ free(encoder);
+ return NULL;
+ }
+
+ encoder_vulkan->params = *params;
+
+ *encoder = (gsr_video_encoder) {
+ .start = gsr_video_encoder_vulkan_start,
+ .copy_textures_to_frame = gsr_video_encoder_vulkan_copy_textures_to_frame,
+ .get_textures = gsr_video_encoder_vulkan_get_textures,
+ .destroy = gsr_video_encoder_vulkan_destroy,
+ .priv = encoder_vulkan
+ };
+
+ return encoder;
+}
diff --git a/src/main.cpp b/src/main.cpp
index 18a810a..de8c352 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -2,11 +2,20 @@ extern "C" {
#include "../include/capture/nvfbc.h"
#include "../include/capture/xcomposite.h"
#include "../include/capture/kms.h"
+#ifdef GSR_PORTAL
+#include "../include/capture/portal.h"
+#include "../include/dbus.h"
+#endif
#include "../include/encoder/video/cuda.h"
#include "../include/encoder/video/vaapi.h"
+#include "../include/encoder/video/vulkan.h"
#include "../include/encoder/video/software.h"
+#include "../include/codec_query/nvenc.h"
+#include "../include/codec_query/vaapi.h"
+#include "../include/codec_query/vulkan.h"
#include "../include/egl.h"
#include "../include/utils.h"
+#include "../include/damage.h"
#include "../include/color_conversion.h"
}
@@ -35,6 +44,7 @@ extern "C" {
#include <libswresample/swresample.h>
#include <libavutil/avutil.h>
#include <libavutil/time.h>
+#include <libavutil/mastering_display_metadata.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
@@ -43,6 +53,10 @@ extern "C" {
#include <deque>
#include <future>
+#ifndef GSR_VERSION
+#define GSR_VERSION "unknown"
+#endif
+
// TODO: If options are not supported then they are returned (allocated) in the options. This should be free'd.
// TODO: Remove LIBAVUTIL_VERSION_MAJOR checks in the future when ubuntu, pop os LTS etc update ffmpeg to >= 5.0
@@ -85,8 +99,14 @@ enum class VideoCodec {
H264,
HEVC,
HEVC_HDR,
+ HEVC_10BIT,
AV1,
- AV1_HDR
+ AV1_HDR,
+ AV1_10BIT,
+ VP8,
+ VP9,
+ H264_VULKAN,
+ HEVC_VULKAN
};
enum class AudioCodec {
@@ -106,6 +126,11 @@ enum class FramerateMode {
CONTENT
};
+enum class BitrateMode {
+ QP,
+ VBR
+};
+
static int x11_error_handler(Display*, XErrorEvent*) {
return 0;
}
@@ -115,6 +140,7 @@ static int x11_io_error_handler(Display*) {
}
static bool video_codec_is_hdr(VideoCodec video_codec) {
+ // TODO: Vulkan
switch(video_codec) {
case VideoCodec::HEVC_HDR:
case VideoCodec::AV1_HDR:
@@ -124,6 +150,65 @@ static bool video_codec_is_hdr(VideoCodec video_codec) {
}
}
+static VideoCodec hdr_video_codec_to_sdr_video_codec(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::HEVC_HDR:
+ return VideoCodec::HEVC;
+ case VideoCodec::AV1_HDR:
+ return VideoCodec::AV1;
+ default:
+ return video_codec;
+ }
+}
+
+static gsr_color_depth video_codec_to_bit_depth(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return GSR_COLOR_DEPTH_10_BITS;
+ default:
+ return GSR_COLOR_DEPTH_8_BITS;
+ }
+}
+
+// static bool video_codec_is_hevc(VideoCodec video_codec) {
+// TODO: Vulkan
+// switch(video_codec) {
+// case VideoCodec::HEVC:
+// case VideoCodec::HEVC_HDR:
+// case VideoCodec::HEVC_10BIT:
+// return true;
+// default:
+// return false;
+// }
+// }
+
+static bool video_codec_is_av1(VideoCodec video_codec) {
+ // TODO: Vulkan
+ switch(video_codec) {
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool video_codec_is_vulkan(VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264_VULKAN:
+ case VideoCodec::HEVC_VULKAN:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct PacketData {
PacketData() {}
PacketData(const PacketData&) = delete;
@@ -223,7 +308,8 @@ static AVCodecID audio_codec_get_id(AudioCodec audio_codec) {
return AV_CODEC_ID_AAC;
}
-static AVSampleFormat audio_codec_get_sample_format(AudioCodec audio_codec, const AVCodec *codec, bool mix_audio) {
+static AVSampleFormat audio_codec_get_sample_format(AVCodecContext *audio_codec_context, AudioCodec audio_codec, const AVCodec *codec, bool mix_audio) {
+ (void)audio_codec_context;
switch(audio_codec) {
case AudioCodec::AAC: {
return AV_SAMPLE_FMT_FLTP;
@@ -232,13 +318,32 @@ static AVSampleFormat audio_codec_get_sample_format(AudioCodec audio_codec, cons
bool supports_s16 = false;
bool supports_flt = false;
- for(size_t i = 0; codec->sample_fmts && codec->sample_fmts[i] != -1; ++i) {
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(61, 15, 0)
+ for(size_t i = 0; codec->sample_fmts && codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) {
if(codec->sample_fmts[i] == AV_SAMPLE_FMT_S16) {
supports_s16 = true;
} else if(codec->sample_fmts[i] == AV_SAMPLE_FMT_FLT) {
supports_flt = true;
}
}
+ #else
+ const enum AVSampleFormat *sample_fmts = NULL;
+ if(avcodec_get_supported_config(audio_codec_context, codec, AV_CODEC_CONFIG_SAMPLE_FORMAT, 0, (const void**)&sample_fmts, NULL) >= 0) {
+ if(sample_fmts) {
+ for(size_t i = 0; sample_fmts[i] != AV_SAMPLE_FMT_NONE; ++i) {
+ if(sample_fmts[i] == AV_SAMPLE_FMT_S16) {
+ supports_s16 = true;
+ } else if(sample_fmts[i] == AV_SAMPLE_FMT_FLT) {
+ supports_flt = true;
+ }
+ }
+ } else {
+ // What a dumb API. It returns NULL if all formats are supported
+ supports_s16 = true;
+ supports_flt = true;
+ }
+ }
+ #endif
// Amix only works with float audio
if(mix_audio)
@@ -307,7 +412,7 @@ static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_code
assert(codec->type == AVMEDIA_TYPE_AUDIO);
codec_context->codec_id = codec->id;
- codec_context->sample_fmt = audio_codec_get_sample_format(audio_codec, codec, mix_audio);
+ codec_context->sample_fmt = audio_codec_get_sample_format(codec_context, audio_codec, codec, mix_audio);
codec_context->bit_rate = audio_bitrate == 0 ? audio_codec_get_get_bitrate(audio_codec) : audio_bitrate;
codec_context->sample_rate = AUDIO_SAMPLE_RATE;
if(audio_codec == AudioCodec::AAC)
@@ -327,10 +432,62 @@ static AVCodecContext* create_audio_codec_context(int fps, AudioCodec audio_code
return codec_context;
}
+static int vbr_get_quality_parameter(AVCodecContext *codec_context, VideoQuality video_quality, bool hdr) {
+ // 8 bit / 10 bit = 80%
+ const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 160 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 130 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 110 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 90 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ return 35 * qp_multiply;
+ case VideoQuality::HIGH:
+ return 30 * qp_multiply;
+ case VideoQuality::VERY_HIGH:
+ return 25 * qp_multiply;
+ case VideoQuality::ULTRA:
+ return 22 * qp_multiply;
+ }
+ }
+ assert(false);
+ return 22 * qp_multiply;
+}
+
static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
VideoQuality video_quality,
int fps, const AVCodec *codec, bool low_latency, gsr_gpu_vendor vendor, FramerateMode framerate_mode,
- bool hdr, gsr_color_range color_range, float keyint) {
+ bool hdr, gsr_color_range color_range, float keyint, bool use_software_video_encoder, BitrateMode bitrate_mode, VideoCodec video_codec) {
AVCodecContext *codec_context = avcodec_alloc_context3(codec);
@@ -372,69 +529,91 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
}
//codec_context->chroma_sample_location = AVCHROMA_LOC_CENTER;
if(codec->id == AV_CODEC_ID_HEVC)
- codec_context->codec_tag = MKTAG('h', 'v', 'c', '1');
- switch(video_quality) {
- case VideoQuality::MEDIUM:
- //codec_context->qmin = 35;
- //codec_context->qmax = 35;
- codec_context->bit_rate = 100000;//4500000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::HIGH:
- //codec_context->qmin = 34;
- //codec_context->qmax = 34;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::VERY_HIGH:
- //codec_context->qmin = 28;
- //codec_context->qmax = 28;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- case VideoQuality::ULTRA:
- //codec_context->qmin = 22;
- //codec_context->qmax = 22;
- codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
- break;
- }
- //codec_context->profile = FF_PROFILE_H264_MAIN;
- if (codec_context->codec_id == AV_CODEC_ID_MPEG1VIDEO)
- codec_context->mb_decision = 2;
-
- // stream->time_base = codec_context->time_base;
- // codec_context->ticks_per_frame = 30;
- //av_opt_set(codec_context->priv_data, "tune", "hq", 0);
- // TODO: Do this for better file size? also allows setting qmin, qmax per frame? which can then be used to dynamically set bitrate to reduce quality
- // if live streaming is slow or if the users harddrive is cant handle writing megabytes of data per second.
- #if 0
- char qmin_str[32];
- snprintf(qmin_str, sizeof(qmin_str), "%d", codec_context->qmin);
-
- char qmax_str[32];
- snprintf(qmax_str, sizeof(qmax_str), "%d", codec_context->qmax);
-
- av_opt_set(codec_context->priv_data, "cq", qmax_str, 0);
- av_opt_set(codec_context->priv_data, "rc", "vbr", 0);
- av_opt_set(codec_context->priv_data, "qmin", qmin_str, 0);
- av_opt_set(codec_context->priv_data, "qmax", qmax_str, 0);
- codec_context->bit_rate = 0;
- #endif
+ codec_context->codec_tag = MKTAG('h', 'v', 'c', '1'); // QuickTime on MacOS requires this or the video wont be playable
- // 8 bit / 10 bit = 80%, and increase it even more
- const float quality_multiply = hdr ? (8.0f/10.0f * 0.7f) : 1.0f;
- if(vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(bitrate_mode == BitrateMode::VBR) {
+ const int quality = vbr_get_quality_parameter(codec_context, video_quality, hdr);
switch(video_quality) {
case VideoQuality::MEDIUM:
- codec_context->global_quality = 180 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//4500000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::HIGH:
- codec_context->global_quality = 140 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::VERY_HIGH:
- codec_context->global_quality = 120 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
case VideoQuality::ULTRA:
- codec_context->global_quality = 100 * quality_multiply;
+ codec_context->qmin = quality;
+ codec_context->qmax = quality;
+ codec_context->bit_rate = 100000;//10000000-9000000 + (codec_context->width * codec_context->height)*0.75;
break;
}
+
+ codec_context->rc_max_rate = codec_context->bit_rate;
+ codec_context->rc_min_rate = codec_context->bit_rate;
+ codec_context->rc_buffer_size = codec_context->bit_rate;//codec_context->bit_rate / 10;
+ codec_context->rc_initial_buffer_occupancy = 100000;//codec_context->bit_rate * 1000;
+ }
+ //codec_context->profile = FF_PROFILE_H264_MAIN;
+ if (codec_context->codec_id == AV_CODEC_ID_MPEG1VIDEO)
+ codec_context->mb_decision = 2;
+
+ if(!use_software_video_encoder && vendor != GSR_GPU_VENDOR_NVIDIA) {
+ // 8 bit / 10 bit = 80%, and increase it even more
+ const float quality_multiply = hdr ? (8.0f/10.0f * 0.7f) : 1.0f;
+ if(codec_context->codec_id == AV_CODEC_ID_AV1 || codec_context->codec_id == AV_CODEC_ID_H264 || codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 150 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 120 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 100 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 90 * quality_multiply;
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 35 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 30 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 20 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 10 * quality_multiply;
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ codec_context->global_quality = 35 * quality_multiply;
+ break;
+ case VideoQuality::HIGH:
+ codec_context->global_quality = 30 * quality_multiply;
+ break;
+ case VideoQuality::VERY_HIGH:
+ codec_context->global_quality = 20 * quality_multiply;
+ break;
+ case VideoQuality::ULTRA:
+ codec_context->global_quality = 10 * quality_multiply;
+ break;
+ }
+ }
}
av_opt_set_int(codec_context->priv_data, "b_ref_mode", 0, 0);
@@ -443,158 +622,37 @@ static AVCodecContext *create_video_codec_context(AVPixelFormat pix_fmt,
if(vendor != GSR_GPU_VENDOR_NVIDIA) {
// TODO: More options, better options
//codec_context->bit_rate = codec_context->width * codec_context->height;
- av_opt_set(codec_context->priv_data, "rc_mode", "CQP", 0);
+ switch(bitrate_mode) {
+ case BitrateMode::QP: {
+ if(video_codec_is_vulkan(video_codec))
+ av_opt_set(codec_context->priv_data, "rc_mode", "cqp", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_opt_set(codec_context->priv_data, "rc", "constqp", 0);
+ else
+ av_opt_set(codec_context->priv_data, "rc_mode", "CQP", 0);
+ break;
+ }
+ case BitrateMode::VBR: {
+ if(video_codec_is_vulkan(video_codec))
+ av_opt_set(codec_context->priv_data, "rc_mode", "vbr", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_opt_set(codec_context->priv_data, "rc", "vbr", 0);
+ else
+ av_opt_set(codec_context->priv_data, "rc_mode", "VBR", 0);
+ break;
+ }
+ }
//codec_context->global_quality = 4;
//codec_context->compression_level = 2;
}
//av_opt_set(codec_context->priv_data, "bsf", "hevc_metadata=colour_primaries=9:transfer_characteristics=16:matrix_coefficients=9", 0);
- //codec_context->rc_max_rate = codec_context->bit_rate;
- //codec_context->rc_min_rate = codec_context->bit_rate;
- //codec_context->rc_buffer_size = codec_context->bit_rate / 10;
- // TODO: Do this when not using cqp
- //codec_context->rc_initial_buffer_occupancy = codec_context->bit_rate * 1000;
-
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
return codec_context;
}
-static bool vaapi_create_codec_context(AVCodecContext *video_codec_context, const char *card_path) {
- char render_path[128];
- if(!gsr_card_path_get_render_path(card_path, render_path)) {
- fprintf(stderr, "gsr error: failed to get /dev/dri/renderDXXX file from %s\n", card_path);
- return false;
- }
-
- AVBufferRef *device_ctx;
- if(av_hwdevice_ctx_create(&device_ctx, AV_HWDEVICE_TYPE_VAAPI, render_path, NULL, 0) < 0) {
- fprintf(stderr, "Error: Failed to create hardware device context\n");
- return false;
- }
-
- AVBufferRef *frame_context = av_hwframe_ctx_alloc(device_ctx);
- if(!frame_context) {
- fprintf(stderr, "Error: Failed to create hwframe context\n");
- av_buffer_unref(&device_ctx);
- return false;
- }
-
- AVHWFramesContext *hw_frame_context =
- (AVHWFramesContext *)frame_context->data;
- hw_frame_context->width = video_codec_context->width;
- hw_frame_context->height = video_codec_context->height;
- hw_frame_context->sw_format = AV_PIX_FMT_NV12;
- hw_frame_context->format = video_codec_context->pix_fmt;
- hw_frame_context->device_ref = device_ctx;
- hw_frame_context->device_ctx = (AVHWDeviceContext*)device_ctx->data;
-
- //hw_frame_context->initial_pool_size = 1;
-
- if (av_hwframe_ctx_init(frame_context) < 0) {
- fprintf(stderr, "Error: Failed to initialize hardware frame context "
- "(note: ffmpeg version needs to be > 4.0)\n");
- av_buffer_unref(&device_ctx);
- //av_buffer_unref(&frame_context);
- return false;
- }
-
- video_codec_context->hw_device_ctx = av_buffer_ref(device_ctx);
- video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
- return true;
-}
-
-static bool check_if_codec_valid_for_hardware(const AVCodec *codec, gsr_gpu_vendor vendor, const char *card_path) {
- // Do not use AV_PIX_FMT_CUDA because we dont want to do full check with hardware context
- AVCodecContext *codec_context = create_video_codec_context(vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_VAAPI, VideoQuality::VERY_HIGH, 60, codec, false, vendor, FramerateMode::CONSTANT, false, GSR_COLOR_RANGE_LIMITED, 2);
- if(!codec_context)
- return false;
-
- codec_context->width = 512;
- codec_context->height = 512;
-
- if(vendor != GSR_GPU_VENDOR_NVIDIA) {
- if(!vaapi_create_codec_context(codec_context, card_path)) {
- avcodec_free_context(&codec_context);
- return false;
- }
- }
-
- bool success = false;
- success = avcodec_open2(codec_context, codec_context->codec, NULL) == 0;
- if(codec_context->hw_device_ctx)
- av_buffer_unref(&codec_context->hw_device_ctx);
- if(codec_context->hw_frames_ctx)
- av_buffer_unref(&codec_context->hw_frames_ctx);
- avcodec_free_context(&codec_context);
- return success;
-}
-
-static const AVCodec* find_h264_software_encoder() {
- return avcodec_find_encoder_by_name("libx264");
-}
-
-static const AVCodec* find_h264_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "h264_nvenc" : "h264_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_h264" : "vaapi_h264");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
-static const AVCodec* find_hevc_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "hevc_nvenc" : "hevc_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_hevc" : "vaapi_hevc");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
-static const AVCodec* find_av1_encoder(gsr_gpu_vendor vendor, const char *card_path) {
- // Workaround bug with av1 nvidia in older ffmpeg versions that causes the whole application to crash
- // when avcodec_open2 is opened with av1_nvenc
- if(vendor == GSR_GPU_VENDOR_NVIDIA && LIBAVCODEC_BUILD < AV_VERSION_INT(60, 30, 100)) {
- return nullptr;
- }
-
- const AVCodec *codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "av1_nvenc" : "av1_vaapi");
- if(!codec)
- codec = avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "nvenc_av1" : "vaapi_av1");
-
- if(!codec)
- return nullptr;
-
- static bool checked = false;
- static bool checked_success = true;
- if(!checked) {
- checked = true;
- if(!check_if_codec_valid_for_hardware(codec, vendor, card_path))
- checked_success = false;
- }
- return checked_success ? codec : nullptr;
-}
-
static void open_audio(AVCodecContext *audio_codec_context) {
AVDictionary *options = nullptr;
av_dict_set(&options, "strict", "experimental", 0);
@@ -633,64 +691,110 @@ static AVFrame* create_audio_frame(AVCodecContext *audio_codec_context) {
return frame;
}
-static void open_video_software(AVCodecContext *codec_context, VideoQuality video_quality, PixelFormat pixel_format, bool hdr) {
- (void)pixel_format; // TODO:
- AVDictionary *options = nullptr;
+static void dict_set_profile(AVCodecContext *codec_context, gsr_gpu_vendor vendor, gsr_color_depth color_depth, AVDictionary **options) {
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(61, 17, 100)
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: Only for vaapi
+ //if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ // av_dict_set(options, "profile", "high10", 0);
+ //else
+ av_dict_set(options, "profile", "high", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "highbitdepth", 1, 0);
+ } else {
+ av_dict_set(options, "profile", "main", 0); // TODO: use professional instead?
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set(options, "profile", "main10", 0);
+ else
+ av_dict_set(options, "profile", "main", 0);
+ }
+ #else
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: Only for vaapi
+ //if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ // av_dict_set_int(options, "profile", AV_PROFILE_H264_HIGH_10, 0);
+ //else
+ av_dict_set_int(options, "profile", AV_PROFILE_H264_HIGH, 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "highbitdepth", 1, 0);
+ } else {
+ av_dict_set_int(options, "profile", AV_PROFILE_AV1_MAIN, 0); // TODO: use professional instead?
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(color_depth == GSR_COLOR_DEPTH_10_BITS)
+ av_dict_set_int(options, "profile", AV_PROFILE_HEVC_MAIN_10, 0);
+ else
+ av_dict_set_int(options, "profile", AV_PROFILE_HEVC_MAIN, 0);
+ }
+ #endif
+}
+static void video_software_set_qp(AVCodecContext *codec_context, VideoQuality video_quality, bool hdr, AVDictionary **options) {
+ // 8 bit / 10 bit = 80%
const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
} else {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
}
+}
+
+static void open_video_software(AVCodecContext *codec_context, VideoQuality video_quality, PixelFormat pixel_format, bool hdr, gsr_color_depth color_depth, BitrateMode bitrate_mode) {
+ (void)pixel_format; // TODO:
+ AVDictionary *options = nullptr;
+
+ if(bitrate_mode == BitrateMode::QP)
+ video_software_set_qp(codec_context, video_quality, hdr, &options);
av_dict_set(&options, "preset", "medium", 0);
- if(hdr) {
- av_dict_set(&options, "profile", "high10", 0);
- } else {
- av_dict_set(&options, "profile", "high", 0);
- }
+ dict_set_profile(codec_context, GSR_GPU_VENDOR_INTEL, color_depth, &options);
// TODO: If streaming or piping output set this to zerolatency
av_dict_set(&options, "tune", "fastdecode", 0);
@@ -707,131 +811,94 @@ static void open_video_software(AVCodecContext *codec_context, VideoQuality vide
}
}
-static void open_video_hardware(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu, gsr_gpu_vendor vendor, PixelFormat pixel_format, bool hdr) {
- (void)very_old_gpu;
- AVDictionary *options = nullptr;
+static void video_set_rc(VideoCodec video_codec, gsr_gpu_vendor vendor, BitrateMode bitrate_mode, AVDictionary **options) {
+ switch(bitrate_mode) {
+ case BitrateMode::QP: {
+ if(video_codec_is_vulkan(video_codec))
+ av_dict_set(options, "rc_mode", "cqp", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_dict_set(options, "rc", "constqp", 0);
+ else
+ av_dict_set(options, "rc_mode", "CQP", 0);
+ break;
+ }
+ case BitrateMode::VBR: {
+ if(video_codec_is_vulkan(video_codec))
+ av_dict_set(options, "rc_mode", "vbr", 0);
+ else if(vendor == GSR_GPU_VENDOR_NVIDIA)
+ av_dict_set(options, "rc", "vbr", 0);
+ else
+ av_dict_set(options, "rc_mode", "VBR", 0);
+ break;
+ }
+ }
+}
+
+static void video_hardware_set_qp(AVCodecContext *codec_context, VideoQuality video_quality, gsr_gpu_vendor vendor, bool hdr, AVDictionary **options) {
// 8 bit / 10 bit = 80%
const float qp_multiply = hdr ? 8.0f/10.0f : 1.0f;
if(vendor == GSR_GPU_VENDOR_NVIDIA) {
- // Disable setting preset since some nvidia gpus cant handle it nicely and greatly reduce encoding performance (from more than 60 fps to less than 45 fps) (such as Nvidia RTX A2000)
- #if 0
- bool supports_p4 = false;
- bool supports_p5 = false;
-
- const AVOption *opt = nullptr;
- while((opt = av_opt_next(codec_context->priv_data, opt))) {
- if(opt->type == AV_OPT_TYPE_CONST) {
- if(strcmp(opt->name, "p4") == 0)
- supports_p4 = true;
- else if(strcmp(opt->name, "p5") == 0)
- supports_p5 = true;
- }
- }
- #endif
-
+ // TODO: Test if these should be in the same range as vaapi
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
- } else {
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
- }
-
- #if 0
- if(!supports_p4 && !supports_p5)
- fprintf(stderr, "Info: your ffmpeg version is outdated. It's recommended that you use the flatpak version of gpu-screen-recorder version instead, which you can find at https://flathub.org/apps/details/com.dec05eba.gpu_screen_recorder\n");
-
- //if(is_livestream) {
- // av_dict_set_int(&options, "zerolatency", 1, 0);
- // //av_dict_set(&options, "preset", "llhq", 0);
- //}
-
- // I want to use a good preset for the gpu but all gpus prefer different
- // presets. Nvidia and ffmpeg used to support "hq" preset that chose the best preset for the gpu
- // with pretty good performance but you now have to choose p1-p7, which are gpu agnostic and on
- // older gpus p5-p7 slow the gpu down to a crawl...
- // "hq" is now just an alias for p7 in ffmpeg :(
- // TODO: Temporary disable because of stuttering?
-
- // TODO: Preset is set to p5 for now but it should ideally be p6 or p7.
- // This change is needed because for certain sizes of a window (or monitor?) such as 971x780 causes encoding to freeze
- // when using h264 codec. This is a new(?) nvidia driver bug.
- if(very_old_gpu)
- av_dict_set(&options, "preset", supports_p4 ? "p4" : "medium", 0);
- else
- av_dict_set(&options, "preset", supports_p5 ? "p5" : "slow", 0);
- #endif
-
- av_dict_set(&options, "tune", "hq", 0);
- av_dict_set(&options, "rc", "constqp", 0);
-
- // TODO: Enable multipass
-
- if(codec_context->codec_id == AV_CODEC_ID_H264) {
- switch(pixel_format) {
- case PixelFormat::YUV420:
- av_dict_set(&options, "profile", "high", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
- case PixelFormat::YUV444:
- av_dict_set(&options, "profile", "high444p", 0);
+ case VideoQuality::HIGH:
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
- }
- } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
- switch(pixel_format) {
- case PixelFormat::YUV420:
- av_dict_set(&options, "rgb_mode", "yuv420", 0);
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
- case PixelFormat::YUV444:
- av_dict_set(&options, "rgb_mode", "yuv444", 0);
+ case VideoQuality::ULTRA:
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
- } else {
- //av_dict_set(&options, "profile", "main10", 0);
- //av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
- if(hdr) {
- av_dict_set(&options, "profile", "main10", 0);
- } else {
- av_dict_set(&options, "profile", "main", 0);
- }
}
} else {
if(codec_context->codec_id == AV_CODEC_ID_AV1) {
@@ -839,54 +906,109 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
} else if(codec_context->codec_id == AV_CODEC_ID_H264) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 34 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 34 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 30 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 26 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 23 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 22 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 20 * qp_multiply, 0);
break;
}
- } else {
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
switch(video_quality) {
case VideoQuality::MEDIUM:
- av_dict_set_int(&options, "qp", 37 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
break;
case VideoQuality::HIGH:
- av_dict_set_int(&options, "qp", 32 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
break;
case VideoQuality::VERY_HIGH:
- av_dict_set_int(&options, "qp", 28 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
break;
case VideoQuality::ULTRA:
- av_dict_set_int(&options, "qp", 24 * qp_multiply, 0);
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_VP8 || codec_context->codec_id == AV_CODEC_ID_VP9) {
+ switch(video_quality) {
+ case VideoQuality::MEDIUM:
+ av_dict_set_int(options, "qp", 35 * qp_multiply, 0);
+ break;
+ case VideoQuality::HIGH:
+ av_dict_set_int(options, "qp", 30 * qp_multiply, 0);
+ break;
+ case VideoQuality::VERY_HIGH:
+ av_dict_set_int(options, "qp", 25 * qp_multiply, 0);
+ break;
+ case VideoQuality::ULTRA:
+ av_dict_set_int(options, "qp", 22 * qp_multiply, 0);
break;
}
}
+ }
+}
+
+static void open_video_hardware(AVCodecContext *codec_context, VideoQuality video_quality, bool very_old_gpu, gsr_gpu_vendor vendor, PixelFormat pixel_format, bool hdr, gsr_color_depth color_depth, BitrateMode bitrate_mode, VideoCodec video_codec, bool low_power) {
+ (void)very_old_gpu;
+ AVDictionary *options = nullptr;
+
+ if(bitrate_mode == BitrateMode::QP)
+ video_hardware_set_qp(codec_context, video_quality, vendor, hdr, &options);
+
+ video_set_rc(video_codec, vendor, bitrate_mode, &options);
+
+ // TODO: Enable multipass
+
+ if(vendor == GSR_GPU_VENDOR_NVIDIA) {
+ av_dict_set(&options, "tune", "hq", 0);
+
+ dict_set_profile(codec_context, vendor, color_depth, &options);
+ if(codec_context->codec_id == AV_CODEC_ID_H264) {
+ // TODO: h264 10bit?
+ // TODO:
+ // switch(pixel_format) {
+ // case PixelFormat::YUV420:
+ // av_dict_set_int(&options, "profile", AV_PROFILE_H264_HIGH, 0);
+ // break;
+ // case PixelFormat::YUV444:
+ // av_dict_set_int(&options, "profile", AV_PROFILE_H264_HIGH_444, 0);
+ // break;
+ // }
+ } else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
+ switch(pixel_format) {
+ case PixelFormat::YUV420:
+ av_dict_set(&options, "rgb_mode", "yuv420", 0);
+ break;
+ case PixelFormat::YUV444:
+ av_dict_set(&options, "rgb_mode", "yuv444", 0);
+ break;
+ }
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ //av_dict_set(&options, "pix_fmt", "yuv420p16le", 0);
+ }
+ } else {
// TODO: More quality options
- av_dict_set(&options, "rc_mode", "CQP", 0);
- //av_dict_set_int(&options, "low_power", 1, 0);
+ if(low_power)
+ av_dict_set_int(&options, "low_power", 1, 0);
+ // Improves performance but increases vram
+ //av_dict_set_int(&options, "async_depth", 8, 0);
if(codec_context->codec_id == AV_CODEC_ID_H264) {
- av_dict_set(&options, "profile", "high", 0);
// Removed because it causes stutter in games for some people
//av_dict_set_int(&options, "quality", 5, 0); // quality preset
} else if(codec_context->codec_id == AV_CODEC_ID_AV1) {
- av_dict_set(&options, "profile", "main", 0); // TODO: use professional instead?
av_dict_set(&options, "tier", "main", 0);
- } else {
- if(hdr) {
- av_dict_set(&options, "profile", "main10", 0);
+ } else if(codec_context->codec_id == AV_CODEC_ID_HEVC) {
+ if(hdr)
av_dict_set(&options, "sei", "hdr", 0);
- } else {
- av_dict_set(&options, "profile", "main", 0);
- }
}
+
+ // TODO: vp8/vp9 10bit
}
if(codec_context->codec_id == AV_CODEC_ID_H264) {
@@ -905,24 +1027,27 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
static void usage_header() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
- fprintf(stderr, "usage: %s -w <window_id|monitor|focused> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|hevc_hdr|av1|av1_hdr] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-cr limited|full] [-mf yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [-h|--help]\n", program_name);
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name);
}
+// TODO: Update with portal info
static void usage_full() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
usage_header();
fprintf(stderr, "\n");
fprintf(stderr, "OPTIONS:\n");
- fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\" or \"focused\".\n");
- fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded.\n");
+ fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\", \"focused\" or \"portal\".\n");
+ fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with pipewire will be used. Portal option is only available on Wayland.\n");
+ fprintf(stderr, " If you select to save the session (token) in the desktop portal capture popup then the session will be saved for the next time you use \"portal\",\n");
+ fprintf(stderr, " but the session will be ignored unless you run GPU Screen Recorder with the '-restore-portal-session yes' option.\n");
+ fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded on Nvidia X11. On AMD/Intel or wayland \"screen\" will record the first monitor found.\n");
fprintf(stderr, " \"screen-direct-force\" is not recommended unless you use a VRR (G-SYNC) monitor on Nvidia X11 and you are aware that using this option can cause games to freeze/crash or other issues because of Nvidia driver issues.\n");
fprintf(stderr, " \"screen-direct-force\" option is only available on Nvidia X11. VRR works without this option on other systems.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode.\n");
fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
- fprintf(stderr, " Only containers that support h264, hevc or av1 are supported, which means that only mp4, mkv, flv (and some others) are supported.\n");
- fprintf(stderr, " WebM is not supported yet (most hardware doesn't support WebM video encoding).\n");
+ fprintf(stderr, " Only containers that support h264, hevc, av1, vp8 or vp9 are supported, which means that only mp4, mkv, flv, webm (and some others) are supported.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -s The size (area) to record at in the format WxH, for example 1920x1080. This option is only supported (and required) when -w is \"focused\".\n");
fprintf(stderr, "\n");
@@ -934,6 +1059,7 @@ static void usage_full() {
fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
+ fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
fprintf(stderr, " Optional, no audio track is added by default.\n");
fprintf(stderr, "\n");
@@ -944,12 +1070,14 @@ static void usage_full() {
fprintf(stderr, " and the video will only be saved when the gpu-screen-recorder is closed. This feature is similar to Nvidia's instant replay feature.\n");
fprintf(stderr, " This option has be between 5 and 1200. Note that the replay buffer size will not always be precise, because of keyframes. Optional, disabled by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -k Video codec to use. Should be either 'auto', 'h264', 'hevc', 'av1', 'hevc_hdr' or 'av1_hdr'. Optional, defaults to 'auto' which defaults to 'h264'.\n");
- fprintf(stderr, " Forcefully set to 'h264' if the file container type is 'flv'.\n");
- fprintf(stderr, " 'hevc_hdr' and 'av1_hdr' option is not available on X11.\n");
- fprintf(stderr, " Note: hdr metadata is not included in the video when recording with 'hevc_hdr'/'av1_hdr' because of bugs in AMD, Intel and NVIDIA drivers (amazin', they are all bugged).\n");
+ fprintf(stderr, " -k Video codec to use. Should be either 'auto', 'h264', 'hevc', 'av1', 'vp8', 'vp9', 'hevc_hdr', 'av1_hdr', 'hevc_10bit' or 'av1_10bit'.\n");
+ fprintf(stderr, " Optional, set to 'auto' by default which defaults to 'h264'. Forcefully set to 'h264' if the file container type is 'flv'.\n");
+ fprintf(stderr, " 'hevc_hdr' and 'av1_hdr' option is not available on X11 nor when using the portal capture option.\n");
+ fprintf(stderr, " 'hevc_10bit' and 'av1_10bit' options allow you to select 10 bit color depth which can reduce banding and improve quality in darker areas, but not all video players support 10 bit color depth\n");
+ fprintf(stderr, " and if you upload the video to a website the website might reduce 10 bit to 8 bit.\n");
+ fprintf(stderr, " Note that when using 'hevc_hdr' or 'av1_hdr' the color depth is also 10 bits.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -ac Audio codec to use. Should be either 'aac', 'opus' or 'flac'. Defaults to 'opus' for .mp4/.mkv files, otherwise defaults to 'aac'.\n");
+ fprintf(stderr, " -ac Audio codec to use. Should be either 'aac', 'opus' or 'flac'. Optional, set to 'opus' for .mp4/.mkv files, otherwise set to 'aac'.\n");
fprintf(stderr, " 'opus' and 'flac' is only supported by .mp4/.mkv files. 'opus' is recommended for best performance and smallest audio size.\n");
fprintf(stderr, " Flac audio codec is option is disable at the moment because of a temporary issue.\n");
fprintf(stderr, "\n");
@@ -960,37 +1088,66 @@ static void usage_full() {
fprintf(stderr, " is dropped when you record a game. Only needed if you are recording a game that is bottlenecked by GPU. The same issue exists on Wayland but overclocking is not possible on Wayland.\n");
fprintf(stderr, " Works only if your have \"Coolbits\" set to \"12\" in NVIDIA X settings, see README for more information. Note! use at your own risk! Optional, disabled by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -fm Framerate mode. Should be either 'cfr' (constant frame rate), 'vfr' (variable frame rate) or 'content'. Defaults to 'vfr'.\n");
+ fprintf(stderr, " -fm Framerate mode. Should be either 'cfr' (constant frame rate), 'vfr' (variable frame rate) or 'content'. Optional, set to 'vfr' by default.\n");
fprintf(stderr, " 'vfr' is recommended for recording for less issue with very high system load but some applications such as video editors may not support it properly.\n");
- fprintf(stderr, " 'content' is currently only supported when recording a single window, on X11. The 'content' option matches the recording frame rate to the captured content.\n");
+ fprintf(stderr, " 'content' is currently only supported on X11 or when using portal capture option. The 'content' option matches the recording frame rate to the captured content.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -bm Bitrate mode. Should be either 'auto', 'qp' (constant quality) or 'vbr' (variable bitrate). Optional, set to 'auto' by default which defaults to 'qp' on all devices\n");
+ fprintf(stderr, " except steam deck that has broken drivers and doesn't support qp.\n");
+ fprintf(stderr, " 'vbr' option is not supported when using '-encoder cpu' option.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -cr Color range. Should be either 'limited' (aka mpeg) or 'full' (aka jpeg). Defaults to 'limited'.\n");
+ fprintf(stderr, " -cr Color range. Should be either 'limited' (aka mpeg) or 'full' (aka jpeg). Optional, set to 'limited' by default.\n");
fprintf(stderr, " Limited color range means that colors are in range 16-235 (4112-60395 for hdr) while full color range means that colors are in range 0-255 (0-65535 for hdr).\n");
- fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range.\n");
+ fprintf(stderr, " Note that some buggy video players (such as vlc) are unable to correctly display videos in full color range and when upload the video to websites the website\n");
+ fprintf(stderr, " might re-encoder the video to make the video limited color range.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -mf Organise replays in folders based on the current date.\n");
+ fprintf(stderr, " -df Organise replays in folders based on the current date.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -sc Run a script on the saved video file (non-blocking). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
+ fprintf(stderr, " -sc Run a script on the saved video file (asynchronously). The first argument to the script is the filepath to the saved video file and the second argument is the recording type (either \"regular\" or \"replay\").\n");
fprintf(stderr, " Not applicable for live streams.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -cursor\n");
- fprintf(stderr, " Record cursor. Defaults to 'yes'.\n");
+ fprintf(stderr, " Record cursor. Optional, set to 'yes' by default.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -keyint\n");
fprintf(stderr, " Specifies the keyframe interval in seconds, the max amount of time to wait to generate a keyframe. Keyframes can be generated more often than this.\n");
fprintf(stderr, " This also affects seeking in the video and may affect how the replay video is cut. If this is set to 10 for example then you can only seek in 10-second chunks in the video.\n");
fprintf(stderr, " Setting this to a higher value reduces the video file size if you are ok with the previously described downside. This option is expected to be a floating point number.\n");
fprintf(stderr, " By default this value is set to 2.0.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -restore-portal-session\n");
+ fprintf(stderr, " If GPU Screen Recorder should use the same capture option as the last time. Using this option removes the popup asking what you want to record the next time you record with '-w portal' if you selected the option to save session (token) in the desktop portal screencast popup.\n");
+ fprintf(stderr, " This option may not have any effect on your Wayland compositor and your systems desktop portal needs to support ScreenCast version 5 or later. Optional, set to 'no' by default.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -portal-session-token-filepath\n");
+ fprintf(stderr, " This option is used together with -restore-portal-session option to specify the file path to save/restore the portal session token to/from.\n");
+ fprintf(stderr, " This can be used to remember different portal capture options depending on different recording option (such as recording/replay).\n");
+ fprintf(stderr, " Optional, set to \"$XDG_CONFIG_HOME/gpu-screen-recorder/restore_token\" by default ($XDG_CONFIG_HOME defaults to \"$HOME/.config\").\n");
+ fprintf(stderr, " Note: the directory to the portal session token file is created automatically if it doesn't exist.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -encoder\n");
fprintf(stderr, " Which device should be used for video encoding. Should either be 'gpu' or 'cpu'. Does currently only work with h264 codec option (-k).\n");
fprintf(stderr, " Optional, set to 'gpu' by default.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " --list-supported-video-codecs\n");
- fprintf(stderr, " List supported video codecs and exits. Prints h264, hevc, hevc_hdr, av1 and av1_hdr (if supported).\n");
+ fprintf(stderr, " --info\n");
+ fprintf(stderr, " List info about the system (for use by GPU Screen Recorder UI). Lists the following information (prints them to stdout and exits):\n");
+ fprintf(stderr, " Supported video codecs (h264, h264_software, hevc, hevc_hdr, hevc_10bit, av1, av1_hdr, av1_10bit, vp8, vp9 (if supported)).\n");
+ fprintf(stderr, " Supported capture options (window, focused, screen, monitors and portal, if supported by the system).\n");
+ fprintf(stderr, " If opengl initialization fails then the program exits with 22, if no usable drm device is found then it exits with 23. On success it exits with 0.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " --list-audio-devices\n");
+ fprintf(stderr, " List audio devices (for use by GPU Screen Recorder UI). Lists audio devices in the following format (prints them to stdout and exits):\n");
+ fprintf(stderr, " <audio_device_name>|<audio_device_name_in_human_readable_format>\n");
+ fprintf(stderr, " For example:\n");
+ fprintf(stderr, " bluez_input.88:C9:E8:66:A2:27|WH-1000XM4\n");
+ fprintf(stderr, " The <audio_device_name> is the name to pass to GPU Screen Recorder in a -a option.\n");
+ fprintf(stderr, " --version\n");
+ fprintf(stderr, " Print version (%s) and exit\n", GSR_VERSION);
fprintf(stderr, "\n");
- //fprintf(stderr, " -pixfmt The pixel format to use for the output video. yuv420 is the most common format and is best supported, but the color is compressed, so colors can look washed out and certain colors of text can look bad. Use yuv444 for no color compression, but the video may not work everywhere and it may not work with hardware video decoding. Optional, defaults to yuv420\n");
+ //fprintf(stderr, " -pixfmt The pixel format to use for the output video. yuv420 is the most common format and is best supported, but the color is compressed, so colors can look washed out and certain colors of text can look bad. Use yuv444 for no color compression, but the video may not work everywhere and it may not work with hardware video decoding. Optional, set to 'yuv420' by default\n");
fprintf(stderr, " -o The output file path. If omitted then the encoded data is sent to stdout. Required in replay mode (when using -r).\n");
fprintf(stderr, " In replay mode this has to be a directory instead of a file.\n");
- fprintf(stderr, " The directory to the file is created (recursively) if it doesn't already exist.\n");
+ fprintf(stderr, " Note: the directory to the file is created automatically if it doesn't already exist.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -v Prints per second, fps updates. Optional, set to 'yes' by default.\n");
fprintf(stderr, "\n");
@@ -1003,9 +1160,11 @@ static void usage_full() {
fprintf(stderr, " Send signal SIGUSR2 to gpu-screen-recorder (killall -SIGUSR2 gpu-screen-recorder) to pause/unpause recording. Only applicable and useful when recording (not streaming nor replay).\n");
fprintf(stderr, "\n");
fprintf(stderr, "EXAMPLES:\n");
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor|$(pactl get-default-source)\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
- fprintf(stderr, " %s -w screen -f 60 -a \"$(pactl get-default-sink).monitor\" -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"default_output|default_input\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
+ fprintf(stderr, " %s -w portal -f 60 -a default_output -restore-portal-session yes -o \"$HOME/Videos/video.mp4\"\n", program_name);
//fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
_exit(1);
}
@@ -1176,43 +1335,62 @@ struct AudioTrack {
int64_t pts = 0;
};
-static std::future<void> save_replay_thread;
-static std::vector<std::shared_ptr<PacketData>> save_replay_packets;
-static std::string save_replay_output_filepath;
+static bool add_hdr_metadata_to_video_stream(gsr_capture *cap, AVStream *video_stream) {
+ size_t light_metadata_size = 0;
+ size_t mastering_display_metadata_size = 0;
+ AVContentLightMetadata *light_metadata = av_content_light_metadata_alloc(&light_metadata_size);
+ #if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(59, 37, 100)
+ AVMasteringDisplayMetadata *mastering_display_metadata = av_mastering_display_metadata_alloc();
+ mastering_display_metadata_size = sizeof(*mastering_display_metadata);
+ #else
+ AVMasteringDisplayMetadata *mastering_display_metadata = av_mastering_display_metadata_alloc_size(&mastering_display_metadata_size);
+ #endif
-static int create_directory_recursive(char *path) {
- int path_len = strlen(path);
- char *p = path;
- char *end = path + path_len;
- for(;;) {
- char *slash_p = strchr(p, '/');
+ if(!light_metadata || !mastering_display_metadata) {
+ if(light_metadata)
+ av_freep(light_metadata);
- // Skips first '/', we don't want to try and create the root directory
- if(slash_p == path) {
- ++p;
- continue;
- }
+ if(mastering_display_metadata)
+ av_freep(mastering_display_metadata);
- if(!slash_p)
- slash_p = end;
+ return false;
+ }
- char prev_char = *slash_p;
- *slash_p = '\0';
- int err = mkdir(path, S_IRWXU);
- *slash_p = prev_char;
+ if(!gsr_capture_set_hdr_metadata(cap, mastering_display_metadata, light_metadata)) {
+ av_freep(light_metadata);
+ av_freep(mastering_display_metadata);
+ return false;
+ }
- if(err == -1 && errno != EEXIST)
- return err;
+ // TODO: More error checking
- if(slash_p == end)
- break;
- else
- p = slash_p + 1;
- }
- return 0;
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 31, 102)
+ const bool content_light_level_added = av_stream_add_side_data(video_stream, AV_PKT_DATA_CONTENT_LIGHT_LEVEL, (uint8_t*)light_metadata, light_metadata_size) == 0;
+ #else
+ const bool content_light_level_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_CONTENT_LIGHT_LEVEL, light_metadata, light_metadata_size, 0) != NULL;
+ #endif
+
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(60, 31, 102)
+ const bool mastering_display_metadata_added = av_stream_add_side_data(video_stream, AV_PKT_DATA_MASTERING_DISPLAY_METADATA, (uint8_t*)mastering_display_metadata, mastering_display_metadata_size) == 0;
+ #else
+ const bool mastering_display_metadata_added = av_packet_side_data_add(&video_stream->codecpar->coded_side_data, &video_stream->codecpar->nb_coded_side_data, AV_PKT_DATA_MASTERING_DISPLAY_METADATA, mastering_display_metadata, mastering_display_metadata_size, 0) != NULL;
+ #endif
+
+ if(!content_light_level_added)
+ av_freep(light_metadata);
+
+ if(!mastering_display_metadata_added)
+ av_freep(mastering_display_metadata);
+
+ // Return true even on failure because we dont want to retry adding hdr metadata on failure
+ return true;
}
-static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, std::deque<std::shared_ptr<PacketData>> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex, bool make_folders) {
+static std::future<void> save_replay_thread;
+static std::vector<std::shared_ptr<PacketData>> save_replay_packets;
+static std::string save_replay_output_filepath;
+
+static void save_replay_async(AVCodecContext *video_codec_context, int video_stream_index, std::vector<AudioTrack> &audio_tracks, std::deque<std::shared_ptr<PacketData>> &frame_data_queue, bool frames_erased, std::string output_dir, const char *container_format, const std::string &file_extension, std::mutex &write_output_mutex, bool date_folders, bool hdr, gsr_capture *capture) {
if(save_replay_thread.valid())
return;
@@ -1255,7 +1433,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
}
}
- if (make_folders) {
+ if (date_folders) {
std::string output_folder = output_dir + '/' + get_date_only_str();
create_directory_recursive(&output_folder[0]);
save_replay_output_filepath = output_folder + "/Replay_" + get_time_only_str() + "." + file_extension;
@@ -1264,36 +1442,42 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
save_replay_output_filepath = output_dir + "/Replay_" + get_date_str() + "." + file_extension;
}
- save_replay_thread = std::async(std::launch::async, [video_stream_index, container_format, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks]() mutable {
- AVFormatContext *av_format_context;
- avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
+ AVFormatContext *av_format_context;
+ avformat_alloc_output_context2(&av_format_context, nullptr, container_format, nullptr);
- AVStream *video_stream = create_stream(av_format_context, video_codec_context);
- avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
+ AVStream *video_stream = create_stream(av_format_context, video_codec_context);
+ avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
- std::unordered_map<int, AudioTrack*> stream_index_to_audio_track_map;
- for(AudioTrack &audio_track : audio_tracks) {
- stream_index_to_audio_track_map[audio_track.stream_index] = &audio_track;
- AVStream *audio_stream = create_stream(av_format_context, audio_track.codec_context);
- avcodec_parameters_from_context(audio_stream->codecpar, audio_track.codec_context);
- audio_track.stream = audio_stream;
- }
+ std::unordered_map<int, AudioTrack*> stream_index_to_audio_track_map;
+ for(AudioTrack &audio_track : audio_tracks) {
+ stream_index_to_audio_track_map[audio_track.stream_index] = &audio_track;
+ AVStream *audio_stream = create_stream(av_format_context, audio_track.codec_context);
+ avcodec_parameters_from_context(audio_stream->codecpar, audio_track.codec_context);
+ audio_track.stream = audio_stream;
+ }
- int ret = avio_open(&av_format_context->pb, save_replay_output_filepath.c_str(), AVIO_FLAG_WRITE);
- if (ret < 0) {
- fprintf(stderr, "Error: Could not open '%s': %s. Make sure %s is an existing directory with write access\n", save_replay_output_filepath.c_str(), av_error_to_string(ret), save_replay_output_filepath.c_str());
- return;
- }
+ const int open_ret = avio_open(&av_format_context->pb, save_replay_output_filepath.c_str(), AVIO_FLAG_WRITE);
+ if (open_ret < 0) {
+ fprintf(stderr, "Error: Could not open '%s': %s. Make sure %s is an existing directory with write access\n", save_replay_output_filepath.c_str(), av_error_to_string(open_ret), save_replay_output_filepath.c_str());
+ return;
+ }
- AVDictionary *options = nullptr;
- av_dict_set(&options, "strict", "experimental", 0);
+ AVDictionary *options = nullptr;
+ av_dict_set(&options, "strict", "experimental", 0);
- ret = avformat_write_header(av_format_context, &options);
- if (ret < 0) {
- fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(ret));
- return;
- }
+ const int header_write_ret = avformat_write_header(av_format_context, &options);
+ if (header_write_ret < 0) {
+ fprintf(stderr, "Error occurred when writing header to output file: %s\n", av_error_to_string(header_write_ret));
+ avio_close(av_format_context->pb);
+ avformat_free_context(av_format_context);
+ av_dict_free(&options);
+ return;
+ }
+
+ if(hdr)
+ add_hdr_metadata_to_video_stream(capture, video_stream);
+ save_replay_thread = std::async(std::launch::async, [video_stream_index, video_stream, start_index, video_pts_offset, audio_pts_offset, video_codec_context, &audio_tracks, stream_index_to_audio_track_map, av_format_context, options]() mutable {
for(size_t i = start_index; i < save_replay_packets.size(); ++i) {
// TODO: Check if successful
AVPacket av_packet;
@@ -1325,7 +1509,7 @@ static void save_replay_async(AVCodecContext *video_codec_context, int video_str
av_packet.stream_index = stream->index;
av_packet_rescale_ts(&av_packet, codec_context->time_base, stream->time_base);
- ret = av_write_frame(av_format_context, &av_packet);
+ const int ret = av_write_frame(av_format_context, &av_packet);
if(ret < 0)
fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", stream->index, av_error_to_string(ret), ret);
@@ -1498,6 +1682,69 @@ static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph
return 0;
}
+static gsr_video_encoder* create_video_encoder(gsr_egl *egl, bool overclock, gsr_color_depth color_depth, bool use_software_video_encoder, VideoCodec video_codec) {
+ gsr_video_encoder *video_encoder = nullptr;
+
+ if(use_software_video_encoder) {
+ gsr_video_encoder_software_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_software_create(&params);
+ return video_encoder;
+ }
+
+ if(video_codec_is_vulkan(video_codec)) {
+ gsr_video_encoder_vulkan_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_vulkan_create(&params);
+ return video_encoder;
+ }
+
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ case GSR_GPU_VENDOR_INTEL: {
+ gsr_video_encoder_vaapi_params params;
+ params.egl = egl;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_vaapi_create(&params);
+ break;
+ }
+ case GSR_GPU_VENDOR_NVIDIA: {
+ gsr_video_encoder_cuda_params params;
+ params.egl = egl;
+ params.overclock = overclock;
+ params.color_depth = color_depth;
+ video_encoder = gsr_video_encoder_cuda_create(&params);
+ break;
+ }
+ }
+
+ return video_encoder;
+}
+
+static bool get_supported_video_codecs(gsr_egl *egl, VideoCodec video_codec, bool use_software_video_encoder, bool cleanup, gsr_supported_video_codecs *video_codecs) {
+ memset(video_codecs, 0, sizeof(*video_codecs));
+
+ if(use_software_video_encoder) {
+ video_codecs->h264.supported = true;
+ return true;
+ }
+
+ if(video_codec_is_vulkan(video_codec))
+ return gsr_get_supported_video_codecs_vulkan(video_codecs, egl->card_path, cleanup);
+
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ case GSR_GPU_VENDOR_INTEL:
+ return gsr_get_supported_video_codecs_vaapi(video_codecs, egl->card_path, cleanup);
+ case GSR_GPU_VENDOR_NVIDIA:
+ return gsr_get_supported_video_codecs_nvenc(video_codecs, cleanup);
+ }
+
+ return false;
+}
+
static void xwayland_check_callback(const gsr_monitor *monitor, void *userdata) {
bool *xwayland_found = (bool*)userdata;
if(monitor->name_len >= 8 && strncmp(monitor->name, "XWAYLAND", 8) == 0)
@@ -1512,11 +1759,195 @@ static bool is_xwayland(Display *display) {
return true;
bool xwayland_found = false;
- for_each_active_monitor_output_x11(display, xwayland_check_callback, &xwayland_found);
+ for_each_active_monitor_output_x11_not_cached(display, xwayland_check_callback, &xwayland_found);
return xwayland_found;
}
-static void list_supported_video_codecs() {
+static bool is_using_prime_run() {
+ const char *prime_render_offload = getenv("__NV_PRIME_RENDER_OFFLOAD");
+ return prime_render_offload && strcmp(prime_render_offload, "1") == 0;
+}
+
+static void disable_prime_run() {
+ unsetenv("__NV_PRIME_RENDER_OFFLOAD");
+ unsetenv("__NV_PRIME_RENDER_OFFLOAD_PROVIDER");
+ unsetenv("__GLX_VENDOR_LIBRARY_NAME");
+ unsetenv("__VK_LAYER_NV_optimus");
+}
+
+static void list_system_info(bool wayland) {
+ printf("display_server|%s\n", wayland ? "wayland" : "x11");
+}
+
+static void list_gpu_info(gsr_egl *egl) {
+ switch(egl->gpu_info.vendor) {
+ case GSR_GPU_VENDOR_AMD:
+ printf("vendor|amd\n");
+ break;
+ case GSR_GPU_VENDOR_INTEL:
+ printf("vendor|intel\n");
+ break;
+ case GSR_GPU_VENDOR_NVIDIA:
+ printf("vendor|nvidia\n");
+ break;
+ }
+}
+
+static const AVCodec* get_ffmpeg_video_codec(VideoCodec video_codec, gsr_gpu_vendor vendor) {
+ switch(video_codec) {
+ case VideoCodec::H264:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "h264_nvenc" : "h264_vaapi");
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "hevc_nvenc" : "hevc_vaapi");
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "av1_nvenc" : "av1_vaapi");
+ case VideoCodec::VP8:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "vp8_nvenc" : "vp8_vaapi");
+ case VideoCodec::VP9:
+ return avcodec_find_encoder_by_name(vendor == GSR_GPU_VENDOR_NVIDIA ? "vp9_nvenc" : "vp9_vaapi");
+ case VideoCodec::H264_VULKAN:
+ return avcodec_find_encoder_by_name("h264_vulkan");
+ case VideoCodec::HEVC_VULKAN:
+ return avcodec_find_encoder_by_name("hevc_vulkan");
+ }
+ return nullptr;
+}
+
+static void set_supported_video_codecs_ffmpeg(gsr_supported_video_codecs *supported_video_codecs, gsr_supported_video_codecs *supported_video_codecs_vulkan, gsr_gpu_vendor vendor) {
+ if(!get_ffmpeg_video_codec(VideoCodec::H264, vendor)) {
+ supported_video_codecs->h264.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::HEVC, vendor)) {
+ supported_video_codecs->hevc.supported = false;
+ supported_video_codecs->hevc_hdr.supported = false;
+ supported_video_codecs->hevc_10bit.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::AV1, vendor)) {
+ supported_video_codecs->av1.supported = false;
+ supported_video_codecs->av1_hdr.supported = false;
+ supported_video_codecs->av1_10bit.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::VP8, vendor)) {
+ supported_video_codecs->vp8.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::VP9, vendor)) {
+ supported_video_codecs->vp9.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::H264_VULKAN, vendor)) {
+ supported_video_codecs_vulkan->h264.supported = false;
+ }
+
+ if(!get_ffmpeg_video_codec(VideoCodec::HEVC_VULKAN, vendor)) {
+ supported_video_codecs_vulkan->hevc.supported = false;
+ supported_video_codecs_vulkan->hevc_hdr.supported = false;
+ supported_video_codecs_vulkan->hevc_10bit.supported = false;
+ }
+}
+
+static void list_supported_video_codecs(gsr_egl *egl, bool wayland) {
+ // Dont clean it up on purpose to increase shutdown speed
+ gsr_supported_video_codecs supported_video_codecs;
+ get_supported_video_codecs(egl, VideoCodec::H264, false, false, &supported_video_codecs);
+
+ gsr_supported_video_codecs supported_video_codecs_vulkan;
+ get_supported_video_codecs(egl, VideoCodec::H264_VULKAN, false, false, &supported_video_codecs_vulkan);
+
+ set_supported_video_codecs_ffmpeg(&supported_video_codecs, &supported_video_codecs_vulkan, egl->gpu_info.vendor);
+
+ if(supported_video_codecs.h264.supported)
+ puts("h264");
+ if(avcodec_find_encoder_by_name("libx264"))
+ puts("h264_software");
+ if(supported_video_codecs.hevc.supported)
+ puts("hevc");
+ if(supported_video_codecs.hevc_hdr.supported && wayland)
+ puts("hevc_hdr");
+ if(supported_video_codecs.hevc_10bit.supported)
+ puts("hevc_10bit");
+ if(supported_video_codecs.av1.supported)
+ puts("av1");
+ if(supported_video_codecs.av1_hdr.supported && wayland)
+ puts("av1_hdr");
+ if(supported_video_codecs.av1_10bit.supported)
+ puts("av1_10bit");
+ if(supported_video_codecs.vp8.supported)
+ puts("vp8");
+ if(supported_video_codecs.vp9.supported)
+ puts("vp9");
+ //if(supported_video_codecs_vulkan.h264.supported)
+ // puts("h264_vulkan");
+ //if(supported_video_codecs_vulkan.hevc.supported)
+ // puts("hevc_vulkan"); // TODO: hdr, 10 bit
+}
+
+static bool monitor_capture_use_drm(gsr_egl *egl, bool wayland) {
+ return wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA;
+}
+
+typedef struct {
+ bool wayland;
+ gsr_egl *egl;
+} capture_options_callback;
+
+static void output_monitor_info(const gsr_monitor *monitor, void *userdata) {
+ const capture_options_callback *options = (capture_options_callback*)userdata;
+ if(options->wayland && monitor_capture_use_drm(options->egl, options->wayland)) {
+ vec2i monitor_size = monitor->size;
+ const gsr_monitor_rotation rot = drm_monitor_get_display_server_rotation(options->egl, monitor);
+ if(rot == GSR_MONITOR_ROT_90 || rot == GSR_MONITOR_ROT_270)
+ std::swap(monitor_size.x, monitor_size.y);
+ printf("%.*s|%dx%d\n", monitor->name_len, monitor->name, monitor_size.x, monitor_size.y);
+ } else {
+ printf("%.*s|%dx%d\n", monitor->name_len, monitor->name, monitor->size.x, monitor->size.y);
+ }
+}
+
+static void list_supported_capture_options(gsr_egl *egl, bool wayland) {
+ if(!wayland) {
+ puts("window");
+ puts("focused");
+ }
+
+ capture_options_callback options;
+ options.wayland = wayland;
+ options.egl = egl;
+ if(monitor_capture_use_drm(egl, wayland)) {
+ const bool is_x11 = gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+ for_each_active_monitor_output(egl, connection_type, output_monitor_info, &options);
+ } else {
+ puts("screen"); // All monitors in one, only available on Nvidia X11
+ for_each_active_monitor_output(egl, GSR_CONNECTION_X11, output_monitor_info, &options);
+ }
+
+#ifdef GSR_PORTAL
+ // Desktop portal capture on x11 doesn't seem to be hardware accelerated
+ if(!wayland)
+ return;
+
+ gsr_dbus dbus;
+ if(!gsr_dbus_init(&dbus, NULL))
+ return;
+
+ char *session_handle = NULL;
+ if(gsr_dbus_screencast_create_session(&dbus, &session_handle) == 0) {
+ free(session_handle);
+ puts("portal");
+ }
+ gsr_dbus_deinit(&dbus);
+#endif
+}
+
+static void info_command() {
bool wayland = false;
Display *dpy = XOpenDisplay(nullptr);
if (!dpy) {
@@ -1530,46 +1961,81 @@ static void list_supported_video_codecs() {
if(!wayland)
wayland = is_xwayland(dpy);
+ if(!wayland && is_using_prime_run()) {
+ // Disable prime-run and similar options as it doesn't work, the monitor to capture has to be run on the same device.
+ // This is fine on wayland since nvidia uses drm interface there and the monitor query checks the monitors connected
+ // to the drm device.
+ fprintf(stderr, "Warning: use of prime-run on X11 is not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
gsr_egl egl;
if(!gsr_egl_load(&egl, dpy, wayland, false)) {
fprintf(stderr, "gsr error: failed to load opengl\n");
- _exit(1);
+ _exit(22);
}
- char card_path[128];
- card_path[0] = '\0';
- if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ egl.card_path[0] = '\0';
+ if(monitor_capture_use_drm(&egl, wayland)) {
// TODO: Allow specifying another card, and in other places
- if(!gsr_get_valid_card_path(&egl, card_path, false)) {
- fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it. Also make sure that you have at least one connected monitor or record a single window instead on X11\n");
- _exit(2);
+ if(!gsr_get_valid_card_path(&egl, egl.card_path, false)) {
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. Make sure that you have at least one monitor connected\n");
+ _exit(23);
}
}
av_log_set_level(AV_LOG_FATAL);
- // TODO: Output hdr
- if(find_h264_encoder(egl.gpu_info.vendor, card_path))
- puts("h264");
- if(find_hevc_encoder(egl.gpu_info.vendor, card_path))
- puts("hevc");
- if(find_av1_encoder(egl.gpu_info.vendor, card_path))
- puts("av1");
+ puts("section=system_info");
+ list_system_info(wayland);
+ if(egl.gpu_info.is_steam_deck)
+ puts("is_steam_deck|yes");
+ else
+ puts("is_steam_deck|no");
+ puts("section=gpu_info");
+ list_gpu_info(&egl);
+ puts("section=video_codecs");
+ list_supported_video_codecs(&egl, wayland);
+ puts("section=capture_options");
+ list_supported_capture_options(&egl, wayland);
fflush(stdout);
- gsr_egl_unload(&egl);
- if(dpy)
- XCloseDisplay(dpy);
+ // Not needed as this will just slow down shutdown
+ //gsr_egl_unload(&egl);
+ //if(dpy)
+ // XCloseDisplay(dpy);
+
+ _exit(0);
}
-static gsr_capture* create_capture_impl(const char *window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, bool overclock, VideoCodec video_codec, gsr_color_range color_range, bool record_cursor, bool track_damage, bool use_software_video_encoder) {
+static void list_audio_devices_command() {
+ const AudioDevices audio_devices = get_pulseaudio_inputs();
+
+ if(!audio_devices.default_output.empty())
+ puts("default_output|Default output");
+
+ if(!audio_devices.default_input.empty())
+ puts("default_input|Default input");
+
+ for(const auto &audio_input : audio_devices.audio_inputs) {
+ printf("%s|%s\n", audio_input.name.c_str(), audio_input.description.c_str());
+ }
+
+ fflush(stdout);
+ _exit(0);
+}
+
+static gsr_capture* create_capture_impl(std::string &window_str, const char *screen_region, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
+ bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
+ gsr_color_depth color_depth)
+{
vec2i region_size = { 0, 0 };
Window src_window_id = None;
bool follow_focused = false;
gsr_capture *capture = nullptr;
- if(strcmp(window_str, "focused") == 0) {
+ if(strcmp(window_str.c_str(), "focused") == 0) {
if(wayland) {
fprintf(stderr, "Error: GPU Screen Recorder window capture only works in a pure X11 session. Xwayland is not supported. You can record a monitor instead on wayland\n");
_exit(2);
@@ -1591,35 +2057,60 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
follow_focused = true;
- } else if(contains_non_hex_number(window_str)) {
- if(wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
- if(strcmp(window_str, "screen") == 0) {
+ } else if(strcmp(window_str.c_str(), "portal") == 0) {
+#ifdef GSR_PORTAL
+ // Desktop portal capture on x11 doesn't seem to be hardware accelerated
+ if(!wayland) {
+ fprintf(stderr, "Error: desktop portal capture is not supported on X11\n");
+ _exit(1);
+ }
+
+ gsr_capture_portal_params portal_params;
+ portal_params.egl = egl;
+ portal_params.color_depth = color_depth;
+ portal_params.color_range = color_range;
+ portal_params.record_cursor = record_cursor;
+ portal_params.restore_portal_session = restore_portal_session;
+ portal_params.portal_session_token_filepath = portal_session_token_filepath;
+ capture = gsr_capture_portal_create(&portal_params);
+ if(!capture)
+ _exit(1);
+#else
+ fprintf(stderr, "Error: option '-w portal' used but GPU Screen Recorder was compiled without desktop portal support\n");
+ _exit(2);
+#endif
+ } else if(contains_non_hex_number(window_str.c_str())) {
+ if(monitor_capture_use_drm(egl, wayland)) {
+ const bool is_x11 = gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_X11;
+ const gsr_connection_type connection_type = is_x11 ? GSR_CONNECTION_X11 : GSR_CONNECTION_DRM;
+
+ if(strcmp(window_str.c_str(), "screen") == 0) {
FirstOutputCallback first_output;
first_output.output_name = NULL;
- for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, get_first_output, &first_output);
+ for_each_active_monitor_output(egl, connection_type, get_first_output, &first_output);
if(first_output.output_name) {
window_str = first_output.output_name;
} else {
- fprintf(stderr, "Error: no available output found\n");
+ fprintf(stderr, "Error: no usable output found\n");
+ _exit(1);
+ }
+ } else {
+ gsr_monitor gmon;
+ if(!get_monitor_by_name(egl, connection_type, window_str.c_str(), &gmon)) {
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str.c_str());
+ fprintf(stderr, " \"screen\"\n");
+ for_each_active_monitor_output(egl, connection_type, monitor_output_callback_print, NULL);
_exit(1);
}
- }
-
- gsr_monitor gmon;
- if(!get_monitor_by_name(egl, GSR_CONNECTION_DRM, window_str, &gmon)) {
- fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
- fprintf(stderr, " \"screen\"\n");
- for_each_active_monitor_output(egl, GSR_CONNECTION_DRM, monitor_output_callback_print, NULL);
- _exit(1);
}
} else {
- if(strcmp(window_str, "screen") != 0 && strcmp(window_str, "screen-direct") != 0 && strcmp(window_str, "screen-direct-force") != 0) {
+ if(strcmp(window_str.c_str(), "screen") != 0 && strcmp(window_str.c_str(), "screen-direct") != 0 && strcmp(window_str.c_str(), "screen-direct-force") != 0) {
gsr_monitor gmon;
- if(!get_monitor_by_name(egl, GSR_CONNECTION_X11, window_str, &gmon)) {
+ if(!get_monitor_by_name(egl, GSR_CONNECTION_X11, window_str.c_str(), &gmon)) {
const int screens_width = XWidthOfScreen(DefaultScreenOfDisplay(egl->x11.dpy));
const int screens_height = XWidthOfScreen(DefaultScreenOfDisplay(egl->x11.dpy));
- fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str);
+ fprintf(stderr, "gsr error: display \"%s\" not found, expected one of:\n", window_str.c_str());
fprintf(stderr, " \"screen\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
fprintf(stderr, " \"screen-direct\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
fprintf(stderr, " \"screen-direct-force\" (%dx%d+%d+%d)\n", screens_width, screens_height, 0, 0);
@@ -1629,70 +2120,47 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
}
- if(use_software_video_encoder && (wayland || egl->gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA)) {
+ if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && !wayland) {
+ const char *capture_target = window_str.c_str();
+ bool direct_capture = strcmp(window_str.c_str(), "screen-direct") == 0;
+ if(direct_capture) {
+ capture_target = "screen";
+ // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
+ direct_capture = false;
+ fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
+ }
+
+ if(strcmp(window_str.c_str(), "screen-direct-force") == 0) {
+ direct_capture = true;
+ capture_target = "screen";
+ }
+
+ gsr_capture_nvfbc_params nvfbc_params;
+ nvfbc_params.egl = egl;
+ nvfbc_params.display_to_capture = capture_target;
+ nvfbc_params.fps = fps;
+ nvfbc_params.pos = { 0, 0 };
+ nvfbc_params.size = { 0, 0 };
+ nvfbc_params.direct_capture = direct_capture;
+ nvfbc_params.color_depth = color_depth;
+ nvfbc_params.color_range = color_range;
+ nvfbc_params.record_cursor = record_cursor;
+ nvfbc_params.use_software_video_encoder = use_software_video_encoder;
+ capture = gsr_capture_nvfbc_create(&nvfbc_params);
+ if(!capture)
+ _exit(1);
+ } else {
gsr_capture_kms_params kms_params;
kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.display_to_capture = window_str.c_str();
+ kms_params.color_depth = color_depth;
kms_params.color_range = color_range;
kms_params.record_cursor = record_cursor;
+ kms_params.hdr = video_codec_is_hdr(video_codec);
+ kms_params.fps = fps;
capture = gsr_capture_kms_create(&kms_params);
if(!capture)
_exit(1);
- } else {
- if(egl->gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA) {
- if(wayland) {
- gsr_capture_kms_params kms_params;
- kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
- kms_params.color_range = color_range;
- kms_params.record_cursor = record_cursor;
- capture = gsr_capture_kms_create(&kms_params);
- if(!capture)
- _exit(1);
- } else {
- const char *capture_target = window_str;
- bool direct_capture = strcmp(window_str, "screen-direct") == 0;
- if(direct_capture) {
- capture_target = "screen";
- // TODO: Temporary disable direct capture because push model causes stuttering when it's direct capturing. This might be a nvfbc bug. This does not happen when using a compositor.
- direct_capture = false;
- fprintf(stderr, "Warning: screen-direct has temporary been disabled as it causes stuttering. This is likely a NvFBC bug. Falling back to \"screen\".\n");
- }
-
- if(strcmp(window_str, "screen-direct-force") == 0) {
- direct_capture = true;
- capture_target = "screen";
- }
-
- gsr_capture_nvfbc_params nvfbc_params;
- nvfbc_params.egl = egl;
- nvfbc_params.display_to_capture = capture_target;
- nvfbc_params.fps = fps;
- nvfbc_params.pos = { 0, 0 };
- nvfbc_params.size = { 0, 0 };
- nvfbc_params.direct_capture = direct_capture;
- nvfbc_params.overclock = overclock;
- nvfbc_params.hdr = video_codec_is_hdr(video_codec);
- nvfbc_params.color_range = color_range;
- nvfbc_params.record_cursor = record_cursor;
- nvfbc_params.use_software_video_encoder = use_software_video_encoder;
- capture = gsr_capture_nvfbc_create(&nvfbc_params);
- if(!capture)
- _exit(1);
- }
- } else {
- gsr_capture_kms_params kms_params;
- kms_params.egl = egl;
- kms_params.display_to_capture = window_str;
- kms_params.hdr = video_codec_is_hdr(video_codec);
- kms_params.color_range = color_range;
- kms_params.record_cursor = record_cursor;
- capture = gsr_capture_kms_create(&kms_params);
- if(!capture)
- _exit(1);
- }
}
} else {
if(wayland) {
@@ -1701,9 +2169,9 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
}
errno = 0;
- src_window_id = strtol(window_str, nullptr, 0);
+ src_window_id = strtol(window_str.c_str(), nullptr, 0);
if(src_window_id == None || errno == EINVAL) {
- fprintf(stderr, "Invalid window number %s\n", window_str);
+ fprintf(stderr, "Invalid window number %s\n", window_str.c_str());
usage();
}
}
@@ -1716,7 +2184,7 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
xcomposite_params.region_size = region_size;
xcomposite_params.color_range = color_range;
xcomposite_params.record_cursor = record_cursor;
- xcomposite_params.track_damage = track_damage;
+ xcomposite_params.color_depth = color_depth;
capture = gsr_capture_xcomposite_create(&xcomposite_params);
if(!capture)
_exit(1);
@@ -1725,44 +2193,14 @@ static gsr_capture* create_capture_impl(const char *window_str, const char *scre
return capture;
}
-static gsr_video_encoder* create_video_encoder(gsr_egl *egl, bool overclock, bool hdr, bool use_software_video_encoder) {
- gsr_video_encoder *video_encoder = nullptr;
-
- if(use_software_video_encoder) {
- gsr_video_encoder_software_params params;
- params.egl = egl;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_software_create(&params);
- return video_encoder;
- }
-
- switch(egl->gpu_info.vendor) {
- case GSR_GPU_VENDOR_AMD:
- case GSR_GPU_VENDOR_INTEL: {
- gsr_video_encoder_vaapi_params params;
- params.egl = egl;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_vaapi_create(&params);
- break;
- }
- case GSR_GPU_VENDOR_NVIDIA: {
- gsr_video_encoder_cuda_params params;
- params.egl = egl;
- params.overclock = overclock;
- params.hdr = hdr;
- video_encoder = gsr_video_encoder_cuda_create(&params);
- break;
- }
- }
-
- return video_encoder;
-}
-
-static AVPixelFormat get_pixel_format(gsr_gpu_vendor vendor, bool use_software_video_encoder) {
+static AVPixelFormat get_pixel_format(VideoCodec video_codec, gsr_gpu_vendor vendor, bool use_software_video_encoder) {
if(use_software_video_encoder) {
return AV_PIX_FMT_NV12;
} else {
- return vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI;
+ if(video_codec_is_vulkan(video_codec))
+ return AV_PIX_FMT_VULKAN;
+ else
+ return vendor == GSR_GPU_VENDOR_NVIDIA ? AV_PIX_FMT_CUDA : AV_PIX_FMT_VAAPI;
}
}
@@ -1778,6 +2216,350 @@ struct Arg {
}
};
+// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
+// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
+// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
+static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, bool &uses_amix) {
+ std::vector<MergedAudioInputs> requested_audio_inputs;
+ uses_amix = false;
+
+ for(const char *audio_input : audio_input_arg.values) {
+ if(!audio_input || audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input)});
+ if(requested_audio_inputs.back().audio_inputs.size() > 1)
+ uses_amix = true;
+
+ for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
+ bool match = false;
+
+ if(!audio_devices.default_output.empty() && request_audio_input.name == "default_output") {
+ request_audio_input.name = audio_devices.default_output;
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-Default output";
+ match = true;
+ }
+
+ if(!audio_devices.default_input.empty() && request_audio_input.name == "default_input") {
+ request_audio_input.name = audio_devices.default_input;
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-Default input";
+ match = true;
+ }
+
+ for(const auto &existing_audio_input : audio_devices.audio_inputs) {
+ if(request_audio_input.name == existing_audio_input.name) {
+ if(request_audio_input.description.empty())
+ request_audio_input.description = "gsr-" + existing_audio_input.description;
+
+ match = true;
+ break;
+ }
+ }
+
+ if(!match) {
+ fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device, expected one of:\n", request_audio_input.name.c_str());
+ if(!audio_devices.default_output.empty())
+ fprintf(stderr, " default_output (Default output)\n");
+ if(!audio_devices.default_input.empty())
+ fprintf(stderr, " default_input (Default input)\n");
+ for(const auto &existing_audio_input : audio_devices.audio_inputs) {
+ fprintf(stderr, " %s (%s)\n", existing_audio_input.name.c_str(), existing_audio_input.description.c_str());
+ }
+ _exit(2);
+ }
+ }
+ }
+
+ return requested_audio_inputs;
+}
+
+static AudioCodec select_audio_codec_with_fallback(AudioCodec audio_codec, const std::string &file_extension,bool uses_amix) {
+ switch(audio_codec) {
+ case AudioCodec::AAC: {
+ if(file_extension == "webm") {
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from aac to opus\n");
+ }
+ break;
+ }
+ case AudioCodec::OPUS: {
+ // TODO: Also check mpegts?
+ if(file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm") {
+ //audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: opus audio codec is only supported by .mp4, .mkv and .webm files, falling back to aac instead\n");
+ }
+ break;
+ }
+ case AudioCodec::FLAC: {
+ // TODO: Also check mpegts?
+ if(file_extension == "webm") {
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from flac to opus\n");
+ } else if(file_extension != "mp4" && file_extension != "mkv") {
+ //audio_codec_to_use = "aac";
+ audio_codec = AudioCodec::AAC;
+ fprintf(stderr, "Warning: flac audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
+ } else if(uses_amix) {
+ // TODO: remove this? is it true anymore?
+ //audio_codec_to_use = "opus";
+ audio_codec = AudioCodec::OPUS;
+ fprintf(stderr, "Warning: flac audio codec is not supported when mixing audio sources, falling back to opus instead\n");
+ }
+ break;
+ }
+ }
+ return audio_codec;
+}
+
+static const char* video_codec_to_string(VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264: return "h264";
+ case VideoCodec::HEVC: return "hevc";
+ case VideoCodec::HEVC_HDR: return "hevc_hdr";
+ case VideoCodec::HEVC_10BIT: return "hevc_10bit";
+ case VideoCodec::AV1: return "av1";
+ case VideoCodec::AV1_HDR: return "av1_hdr";
+ case VideoCodec::AV1_10BIT: return "av1_10bit";
+ case VideoCodec::VP8: return "vp8";
+ case VideoCodec::VP9: return "vp9";
+ case VideoCodec::H264_VULKAN: return "h264_vulkan";
+ case VideoCodec::HEVC_VULKAN: return "hevc_vulkan";
+ }
+ return "";
+}
+
+static bool video_codec_only_supports_low_power_mode(const gsr_supported_video_codecs &supported_video_codecs, VideoCodec video_codec) {
+ switch(video_codec) {
+ case VideoCodec::H264: return supported_video_codecs.h264.low_power;
+ case VideoCodec::HEVC: return supported_video_codecs.hevc.low_power;
+ case VideoCodec::HEVC_HDR: return supported_video_codecs.hevc_hdr.low_power;
+ case VideoCodec::HEVC_10BIT: return supported_video_codecs.hevc_10bit.low_power;
+ case VideoCodec::AV1: return supported_video_codecs.av1.low_power;
+ case VideoCodec::AV1_HDR: return supported_video_codecs.av1_hdr.low_power;
+ case VideoCodec::AV1_10BIT: return supported_video_codecs.av1_10bit.low_power;
+ case VideoCodec::VP8: return supported_video_codecs.vp8.low_power;
+ case VideoCodec::VP9: return supported_video_codecs.vp9.low_power;
+ case VideoCodec::H264_VULKAN: return supported_video_codecs.h264.low_power;
+ case VideoCodec::HEVC_VULKAN: return supported_video_codecs.hevc.low_power; // TODO: hdr, 10 bit
+ }
+ return false;
+}
+
+static const AVCodec* pick_video_codec(VideoCodec *video_codec, gsr_egl *egl, bool use_software_video_encoder, bool video_codec_auto, const char *video_codec_to_use, bool is_flv, bool *low_power) {
+ // TODO: software encoder for hevc, av1, vp8 and vp9
+ *low_power = false;
+
+ gsr_supported_video_codecs supported_video_codecs;
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+
+ const AVCodec *video_codec_f = nullptr;
+
+ switch(*video_codec) {
+ case VideoCodec::H264: {
+ if(use_software_video_encoder)
+ video_codec_f = avcodec_find_encoder_by_name("libx264");
+ else if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC: {
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_HDR: {
+ if(supported_video_codecs.hevc_hdr.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_10BIT: {
+ if(supported_video_codecs.hevc_10bit.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1: {
+ if(supported_video_codecs.av1.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1_HDR: {
+ if(supported_video_codecs.av1_hdr.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1_10BIT: {
+ if(supported_video_codecs.av1_10bit.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP8: {
+ if(supported_video_codecs.vp8.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP9: {
+ if(supported_video_codecs.vp9.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::H264_VULKAN: {
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_VULKAN: {
+ // TODO: hdr, 10 bit
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ }
+
+ if(!video_codec_auto && !video_codec_f && !is_flv) {
+ switch(*video_codec) {
+ case VideoCodec::H264: {
+ fprintf(stderr, "Warning: selected video codec h264 is not supported, trying hevc instead\n");
+ video_codec_to_use = "hevc";
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC:
+ case VideoCodec::HEVC_HDR:
+ case VideoCodec::HEVC_10BIT: {
+ fprintf(stderr, "Warning: selected video codec hevc is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::AV1:
+ case VideoCodec::AV1_HDR:
+ case VideoCodec::AV1_10BIT: {
+ fprintf(stderr, "Warning: selected video codec av1 is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::VP8:
+ case VideoCodec::VP9:
+ // TODO: Cant fallback to other codec because webm only supports vp8/vp9
+ break;
+ case VideoCodec::H264_VULKAN: {
+ fprintf(stderr, "Warning: selected video codec h264_vulkan is not supported, trying h264 instead\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ // Need to do a query again because this time it's without vulkan
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+ if(supported_video_codecs.h264.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ case VideoCodec::HEVC_VULKAN: {
+ fprintf(stderr, "Warning: selected video codec hevc_vulkan is not supported, trying hevc instead\n");
+ video_codec_to_use = "hevc";
+ *video_codec = VideoCodec::HEVC;
+ // Need to do a query again because this time it's without vulkan
+ if(!get_supported_video_codecs(egl, *video_codec, use_software_video_encoder, true, &supported_video_codecs)) {
+ fprintf(stderr, "Error: failed to query for supported video codecs\n");
+ _exit(11);
+ }
+ if(supported_video_codecs.hevc.supported)
+ video_codec_f = get_ffmpeg_video_codec(*video_codec, egl->gpu_info.vendor);
+ break;
+ }
+ }
+ }
+
+ (void)video_codec_to_use;
+
+ if(!video_codec_f) {
+ const char *video_codec_name = video_codec_to_string(*video_codec);
+ fprintf(stderr, "Error: your gpu does not support '%s' video codec. If you are sure that your gpu does support '%s' video encoding and you are using an AMD/Intel GPU,\n"
+ " then make sure you have installed the GPU specific vaapi packages (intel-media-driver, libva-intel-driver, libva-mesa-driver and linux-firmware).\n"
+ " It's also possible that your distro has disabled hardware accelerated video encoding for '%s' video codec.\n"
+ " This may be the case on corporate distros such as Manjaro, Fedora or OpenSUSE.\n"
+ " You can test this by running 'vainfo | grep VAEntrypointEncSlice' to see if it matches any H264/HEVC/AV1/VP8/VP9 profile.\n"
+ " On such distros, you need to manually install mesa from source to enable H264/HEVC hardware acceleration, or use a more user friendly distro. Alternatively record with AV1 if supported by your GPU.\n"
+ " You can alternatively use the flatpak version of GPU Screen Recorder (https://flathub.org/apps/com.dec05eba.gpu_screen_recorder) which bypasses system issues with patented H264/HEVC codecs.\n"
+ " Make sure you have mesa-extra freedesktop runtime installed when using the flatpak (this should be the default), which can be installed with this command:\n"
+ " flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra\n"
+ " If your GPU doesn't support hardware accelerated video encoding then you can use '-encoder cpu' option to encode with your cpu instead.\n", video_codec_name, video_codec_name, video_codec_name);
+ _exit(2);
+ }
+
+ *low_power = video_codec_only_supports_low_power_mode(supported_video_codecs, *video_codec);
+
+ return video_codec_f;
+}
+
+static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec, const char *video_codec_to_use, const char *file_extension, bool use_software_video_encoder, gsr_egl *egl, bool *low_power) {
+ const bool video_codec_auto = strcmp(video_codec_to_use, "auto") == 0;
+ if(video_codec_auto) {
+ if(strcmp(file_extension, "webm") == 0) {
+ fprintf(stderr, "Info: using vp8 encoder because a codec was not specified and the file extension is .webm\n");
+ video_codec_to_use = "vp8";
+ *video_codec = VideoCodec::VP8;
+ } else {
+ fprintf(stderr, "Info: using h264 encoder because a codec was not specified\n");
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ }
+ }
+
+ // TODO: Allow hevc, vp9 and av1 in (enhanced) flv (supported since ffmpeg 6.1)
+ const bool is_flv = strcmp(file_extension, "flv") == 0;
+ if(is_flv) {
+ if(*video_codec != VideoCodec::H264) {
+ video_codec_to_use = "h264";
+ *video_codec = VideoCodec::H264;
+ fprintf(stderr, "Warning: hevc/av1 is not compatible with flv, falling back to h264 instead.\n");
+ }
+
+ // if(audio_codec != AudioCodec::AAC) {
+ // audio_codec_to_use = "aac";
+ // audio_codec = AudioCodec::AAC;
+ // fprintf(stderr, "Warning: flv only supports aac, falling back to aac instead.\n");
+ // }
+ }
+
+ const bool is_hls = strcmp(file_extension, "m3u8") == 0;
+ if(is_hls) {
+ if(video_codec_is_av1(*video_codec)) {
+ video_codec_to_use = "hevc";
+ *video_codec = VideoCodec::HEVC;
+ fprintf(stderr, "Warning: av1 is not compatible with hls (m3u8), falling back to hevc instead.\n");
+ }
+
+ // if(audio_codec != AudioCodec::AAC) {
+ // audio_codec_to_use = "aac";
+ // audio_codec = AudioCodec::AAC;
+ // fprintf(stderr, "Warning: hls (m3u8) only supports aac, falling back to aac instead.\n");
+ // }
+ }
+
+ if(use_software_video_encoder && *video_codec != VideoCodec::H264) {
+ fprintf(stderr, "Error: \"-encoder cpu\" option is currently only available when using h264 codec option (-k)\n");
+ usage();
+ }
+
+ return pick_video_codec(video_codec, egl, use_software_video_encoder, video_codec_auto, video_codec_to_use, is_flv, low_power);
+}
+
int main(int argc, char **argv) {
signal(SIGINT, stop_handler);
signal(SIGUSR1, save_replay_handler);
@@ -1803,8 +2585,18 @@ int main(int argc, char **argv) {
if(argc == 2 && (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0))
usage_full();
- if(argc == 2 && strcmp(argv[1], "--list-supported-video-codecs") == 0) {
- list_supported_video_codecs();
+ if(argc == 2 && strcmp(argv[1], "--info") == 0) {
+ info_command();
+ _exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "--list-audio-devices") == 0) {
+ list_audio_devices_command();
+ _exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "--version") == 0) {
+ puts(GSR_VERSION);
_exit(0);
}
@@ -1824,14 +2616,17 @@ int main(int argc, char **argv) {
{ "-ab", Arg { {}, true, false } },
{ "-oc", Arg { {}, true, false } },
{ "-fm", Arg { {}, true, false } },
+ { "-bm", Arg { {}, true, false } },
{ "-pixfmt", Arg { {}, true, false } },
{ "-v", Arg { {}, true, false } },
- { "-mf", Arg { {}, true, false } },
+ { "-mf", Arg { {}, true, false } }, // TODO: Remove, this exists for backwards compatibility. -df should be used instead
+ { "-df", Arg { {}, true, false } },
{ "-sc", Arg { {}, true, false } },
{ "-cr", Arg { {}, true, false } },
{ "-cursor", Arg { {}, true, false } },
- { "-gopm", Arg { {}, true, false } }, // deprecated, used keyint instead
{ "-keyint", Arg { {}, true, false } },
+ { "-restore-portal-session", Arg { {}, true, false } },
+ { "-portal-session-token-filepath", Arg { {}, true, false } },
{ "-encoder", Arg { {}, true, false } },
};
@@ -1873,12 +2668,24 @@ int main(int argc, char **argv) {
video_codec = VideoCodec::HEVC;
} else if(strcmp(video_codec_to_use, "hevc_hdr") == 0) {
video_codec = VideoCodec::HEVC_HDR;
+ } else if(strcmp(video_codec_to_use, "hevc_10bit") == 0) {
+ video_codec = VideoCodec::HEVC_10BIT;
} else if(strcmp(video_codec_to_use, "av1") == 0) {
video_codec = VideoCodec::AV1;
} else if(strcmp(video_codec_to_use, "av1_hdr") == 0) {
video_codec = VideoCodec::AV1_HDR;
+ } else if(strcmp(video_codec_to_use, "av1_10bit") == 0) {
+ video_codec = VideoCodec::AV1_10BIT;
+ } else if(strcmp(video_codec_to_use, "vp8") == 0) {
+ video_codec = VideoCodec::VP8;
+ } else if(strcmp(video_codec_to_use, "vp9") == 0) {
+ video_codec = VideoCodec::VP9;
+ //} else if(strcmp(video_codec_to_use, "h264_vulkan") == 0) {
+ // video_codec = VideoCodec::H264_VULKAN;
+ //} else if(strcmp(video_codec_to_use, "hevc_vulkan") == 0) {
+ // video_codec = VideoCodec::HEVC_VULKAN;
} else if(strcmp(video_codec_to_use, "auto") != 0) {
- fprintf(stderr, "Error: -k should either be either 'auto', 'h264', 'hevc', 'hevc_hdr', 'av1' or 'av1_hdr', got: '%s'\n", video_codec_to_use);
+ fprintf(stderr, "Error: -k should either be either 'auto', 'h264', 'hevc', 'av1', 'vp8', 'vp9', 'hevc_hdr', 'av1_hdr', 'hevc_10bit' or 'av1_10bit', got: '%s'\n", video_codec_to_use);
usage();
}
@@ -1982,20 +2789,48 @@ int main(int argc, char **argv) {
usage();
}
- bool make_folders = false;
- const char *make_folders_str = args["-mf"].value();
- if(!make_folders_str)
- make_folders_str = "no";
+ bool date_folders = false;
+ const char *date_folders_str = args["-df"].value();
+ if(!date_folders_str) {
+ date_folders_str = args["-mf"].value();
+ if(date_folders_str)
+ fprintf(stderr, "Warning: -mf is deprecated, use -df instead\n");
+ }
+ if(!date_folders_str)
+ date_folders_str = "no";
+
+ if(strcmp(date_folders_str, "yes") == 0) {
+ date_folders = true;
+ } else if(strcmp(date_folders_str, "no") == 0) {
+ date_folders = false;
+ } else {
+ fprintf(stderr, "Error: -df should either be either 'yes' or 'no', got: '%s'\n", date_folders_str);
+ usage();
+ }
+
+ bool restore_portal_session = false;
+ const char *restore_portal_session_str = args["-restore-portal-session"].value();
+ if(!restore_portal_session_str)
+ restore_portal_session_str = "no";
- if(strcmp(make_folders_str, "yes") == 0) {
- make_folders = true;
- } else if(strcmp(make_folders_str, "no") == 0) {
- make_folders = false;
+ if(strcmp(restore_portal_session_str, "yes") == 0) {
+ restore_portal_session = true;
+ } else if(strcmp(restore_portal_session_str, "no") == 0) {
+ restore_portal_session = false;
} else {
- fprintf(stderr, "Error: -mf should either be either 'yes' or 'no', got: '%s'\n", make_folders_str);
+ fprintf(stderr, "Error: -restore-portal-session should either be either 'yes' or 'no', got: '%s'\n", restore_portal_session_str);
usage();
}
+ const char *portal_session_token_filepath = args["-portal-session-token-filepath"].value();
+ if(portal_session_token_filepath) {
+ int len = strlen(portal_session_token_filepath);
+ if(len > 0 && portal_session_token_filepath[len - 1] == '/') {
+ fprintf(stderr, "Error: -portal-session-token-filepath should be a path to a file but it ends with a /: %s\n", portal_session_token_filepath);
+ _exit(1);
+ }
+ }
+
const char *recording_saved_script = args["-sc"].value();
if(recording_saved_script) {
struct stat buf;
@@ -2025,44 +2860,12 @@ int main(int argc, char **argv) {
}
const Arg &audio_input_arg = args["-a"];
- std::vector<AudioInput> audio_inputs;
+ AudioDevices audio_devices;
if(!audio_input_arg.values.empty())
- audio_inputs = get_pulseaudio_inputs();
- std::vector<MergedAudioInputs> requested_audio_inputs;
- bool uses_amix = false;
+ audio_devices = get_pulseaudio_inputs();
- // Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
- // Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
- // OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
- for(const char *audio_input : audio_input_arg.values) {
- if(!audio_input || audio_input[0] == '\0')
- continue;
-
- requested_audio_inputs.push_back({parse_audio_input_arg(audio_input)});
- if(requested_audio_inputs.back().audio_inputs.size() > 1)
- uses_amix = true;
-
- for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
- bool match = false;
- for(const auto &existing_audio_input : audio_inputs) {
- if(strcmp(request_audio_input.name.c_str(), existing_audio_input.name.c_str()) == 0) {
- if(request_audio_input.description.empty())
- request_audio_input.description = "gsr-" + existing_audio_input.description;
-
- match = true;
- break;
- }
- }
-
- if(!match) {
- fprintf(stderr, "Error: Audio input device '%s' is not a valid audio device, expected one of:\n", request_audio_input.name.c_str());
- for(const auto &existing_audio_input : audio_inputs) {
- fprintf(stderr, " %s (%s)\n", existing_audio_input.name.c_str(), existing_audio_input.description.c_str());
- }
- _exit(2);
- }
- }
- }
+ bool uses_amix = false;
+ std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, uses_amix);
const char *container_format = args["-c"].value();
if(container_format && strcmp(container_format, "mkv") == 0)
@@ -2105,7 +2908,12 @@ int main(int argc, char **argv) {
replay_buffer_size_secs += std::ceil(keyint); // Add a few seconds to account of lost packets because of non-keyframe packets skipped
}
- const char *window_str = strdup(args["-w"].value());
+ std::string window_str = args["-w"].value();
+ const bool is_portal_capture = strcmp(window_str.c_str(), "portal") == 0;
+
+ if(!restore_portal_session && is_portal_capture) {
+ fprintf(stderr, "gsr info: option '-w portal' was used without '-restore-portal-session yes'. The previous screencast session will be ignored\n");
+ }
bool wayland = false;
Display *dpy = XOpenDisplay(nullptr);
@@ -2120,18 +2928,36 @@ int main(int argc, char **argv) {
if(!wayland)
wayland = is_xwayland(dpy);
+ if(!wayland && is_using_prime_run()) {
+ // Disable prime-run and similar options as it doesn't work, the monitor to capture has to be run on the same device.
+ // This is fine on wayland since nvidia uses drm interface there and the monitor query checks the monitors connected
+ // to the drm device.
+ fprintf(stderr, "Warning: use of prime-run on X11 is not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
+ if(is_portal_capture && is_using_prime_run()) {
+ fprintf(stderr, "Warning: use of prime-run with -w portal option is currently not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
if(video_codec_is_hdr(video_codec) && !wayland) {
fprintf(stderr, "Error: hdr video codec option %s is not available on X11\n", video_codec_to_use);
_exit(1);
}
- const bool is_monitor_capture = strcmp(window_str, "focused") != 0 && contains_non_hex_number(window_str);
+ const bool is_monitor_capture = strcmp(window_str.c_str(), "focused") != 0 && !is_portal_capture && contains_non_hex_number(window_str.c_str());
gsr_egl egl;
if(!gsr_egl_load(&egl, dpy, wayland, is_monitor_capture)) {
fprintf(stderr, "gsr error: failed to load opengl\n");
_exit(1);
}
+ if(egl.gpu_info.is_steam_deck) {
+ fprintf(stderr, "gsr warning: steam deck has multiple driver issues. One of them has been reported here: https://github.com/ValveSoftware/SteamOS/issues/1609\n"
+ "If you have issues with GPU Screen Recorder on steam deck that you don't have on a desktop computer then report the issue to Valve and/or AMD.\n");
+ }
+
bool very_old_gpu = false;
if(egl.gpu_info.vendor == GSR_GPU_VENDOR_NVIDIA && egl.gpu_info.gpu_version != 0 && egl.gpu_info.gpu_version < 900) {
@@ -2150,14 +2976,19 @@ int main(int argc, char **argv) {
}
egl.card_path[0] = '\0';
- if(wayland || egl.gpu_info.vendor != GSR_GPU_VENDOR_NVIDIA) {
+ if(monitor_capture_use_drm(&egl, wayland)) {
// TODO: Allow specifying another card, and in other places
if(!gsr_get_valid_card_path(&egl, egl.card_path, is_monitor_capture)) {
- fprintf(stderr, "Error: no /dev/dri/cardX device found. If you are running GPU Screen Recorder with prime-run then try running without it. Also make sure that you have at least one connected monitor or record a single window instead on X11\n");
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. Make sure that you have at least one monitor connected or record a single window instead on X11 or record with the -w portal option\n");
_exit(2);
}
}
+ // if(wayland && is_monitor_capture) {
+ // fprintf(stderr, "gsr warning: it's not possible to sync video to recorded monitor exactly on wayland when recording a monitor."
+ // " If you experience stutter in the video then record with portal capture option instead (-w portal) or use X11 instead\n");
+ // }
+
// TODO: Fix constant framerate not working properly on amd/intel because capture framerate gets locked to the same framerate as
// game framerate, which doesn't work well when you need to encode multiple duplicate frames (AMD/Intel is slow at encoding!).
// It also appears to skip audio frames on nvidia wayland? why? that should be fine, but it causes video stuttering because of audio/video sync.
@@ -2177,11 +3008,35 @@ int main(int argc, char **argv) {
usage();
}
- if(framerate_mode == FramerateMode::CONTENT && (wayland || is_monitor_capture)) {
- fprintf(stderr, "Error: -fm 'content' is currently only supported on X11 and when capturing a single window.\n");
+ if(framerate_mode == FramerateMode::CONTENT && wayland && !is_portal_capture) {
+ fprintf(stderr, "Error: -fm 'content' is currently only supported on X11 or when using portal capture option\n");
+ usage();
+ }
+
+ BitrateMode bitrate_mode = BitrateMode::QP;
+ const char *bitrate_mode_str = args["-bm"].value();
+ if(!bitrate_mode_str)
+ bitrate_mode_str = "auto";
+
+ if(strcmp(bitrate_mode_str, "qp") == 0) {
+ bitrate_mode = BitrateMode::QP;
+ } else if(strcmp(bitrate_mode_str, "vbr") == 0) {
+ bitrate_mode = BitrateMode::VBR;
+ } else if(strcmp(bitrate_mode_str, "auto") != 0) {
+ fprintf(stderr, "Error: -bm should either be either 'auto', 'qp', 'vbr', got: '%s'\n", bitrate_mode_str);
usage();
}
+ if(strcmp(bitrate_mode_str, "auto") == 0) {
+ // QP is broken on steam deck, see https://github.com/ValveSoftware/SteamOS/issues/1609
+ bitrate_mode = egl.gpu_info.is_steam_deck ? BitrateMode::VBR : BitrateMode::QP;
+ }
+
+ if(use_software_video_encoder && bitrate_mode != BitrateMode::QP) {
+ fprintf(stderr, "Warning: bitrate mode has been forcefully set to qp because software encoding option doesn't support vbr option\n");
+ bitrate_mode = BitrateMode::QP;
+ }
+
gsr_color_range color_range = GSR_COLOR_RANGE_LIMITED;
const char *color_range_str = args["-cr"].value();
if(!color_range_str)
@@ -2198,7 +3053,7 @@ int main(int argc, char **argv) {
const char *screen_region = args["-s"].value();
- if(screen_region && strcmp(window_str, "focused") != 0) {
+ if(screen_region && strcmp(window_str.c_str(), "focused") != 0) {
fprintf(stderr, "Error: option -s is only available when using -w focused\n");
usage();
}
@@ -2215,7 +3070,7 @@ int main(int argc, char **argv) {
} else {
if(replay_buffer_size_secs == -1) {
char directory_buf[PATH_MAX];
- strcpy(directory_buf, filename);
+ snprintf(directory_buf, sizeof(directory_buf), "%s", filename);
char *directory = dirname(directory_buf);
if(strcmp(directory, ".") != 0 && strcmp(directory, "/") != 0) {
if(create_directory_recursive(directory) != 0) {
@@ -2275,172 +3130,19 @@ int main(int argc, char **argv) {
}
const bool force_no_audio_offset = is_livestream || is_output_piped || (file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm");
-
- switch(audio_codec) {
- case AudioCodec::AAC: {
- if(file_extension == "webm") {
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from aac to opus\n");
- }
- break;
- }
- case AudioCodec::OPUS: {
- // TODO: Also check mpegts?
- if(file_extension != "mp4" && file_extension != "mkv" && file_extension != "webm") {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: opus audio codec is only supported by .mp4, .mkv and .webm files, falling back to aac instead\n");
- }
- break;
- }
- case AudioCodec::FLAC: {
- // TODO: Also check mpegts?
- if(file_extension == "webm") {
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: .webm files only support opus audio codec, changing audio codec from flac to opus\n");
- } else if(file_extension != "mp4" && file_extension != "mkv") {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: flac audio codec is only supported by .mp4 and .mkv files, falling back to aac instead\n");
- } else if(uses_amix) {
- // TODO: remove this? is it true anymore?
- audio_codec_to_use = "opus";
- audio_codec = AudioCodec::OPUS;
- fprintf(stderr, "Warning: flac audio codec is not supported when mixing audio sources, falling back to opus instead\n");
- }
- break;
- }
- }
-
const double target_fps = 1.0 / (double)fps;
- const bool video_codec_auto = strcmp(video_codec_to_use, "auto") == 0;
- if(video_codec_auto) {
- fprintf(stderr, "Info: using h264 encoder because a codec was not specified\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
+ if(video_codec_is_hdr(video_codec) && is_portal_capture) {
+ fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (pipewire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
+ video_codec = hdr_video_codec_to_sdr_video_codec(video_codec);
}
- // TODO: Allow hevc, vp9 and av1 in (enhanced) flv (supported since ffmpeg 6.1)
- const bool is_flv = strcmp(file_extension.c_str(), "flv") == 0;
- if(is_flv) {
- if(video_codec != VideoCodec::H264) {
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- fprintf(stderr, "Warning: hevc/av1 is not compatible with flv, falling back to h264 instead.\n");
- }
+ audio_codec = select_audio_codec_with_fallback(audio_codec, file_extension, uses_amix);
+ bool low_power = false;
+ const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power);
- if(audio_codec != AudioCodec::AAC) {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: flv only supports aac, falling back to aac instead.\n");
- }
- }
-
- const bool is_hls = strcmp(file_extension.c_str(), "m3u8") == 0;
- if(is_hls) {
- if(video_codec == VideoCodec::AV1 || video_codec == VideoCodec::AV1_HDR) {
- video_codec_to_use = "hevc";
- video_codec = VideoCodec::HEVC;
- fprintf(stderr, "Warning: av1 is not compatible with hls (m3u8), falling back to hevc instead.\n");
- }
-
- if(audio_codec != AudioCodec::AAC) {
- audio_codec_to_use = "aac";
- audio_codec = AudioCodec::AAC;
- fprintf(stderr, "Warning: hls (m3u8) only supports aac, falling back to aac instead.\n");
- }
- }
-
- if(use_software_video_encoder && video_codec != VideoCodec::H264) {
- fprintf(stderr, "Error: \"-encoder cpu\" option is currently only available when using h264 codec option (-k)\n");
- usage();
- }
-
- const AVCodec *video_codec_f = nullptr;
- switch(video_codec) {
- case VideoCodec::H264: {
- if(use_software_video_encoder) {
- video_codec_f = find_h264_software_encoder();
- } else {
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- }
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR:
- // TODO: software encoder
- video_codec_f = find_hevc_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR:
- // TODO: software encoder
- video_codec_f = find_av1_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
-
- if(!video_codec_auto && !video_codec_f && !is_flv) {
- switch(video_codec) {
- case VideoCodec::H264: {
- fprintf(stderr, "Warning: selected video codec h264 is not supported, trying hevc instead\n");
- video_codec_to_use = "hevc";
- video_codec = VideoCodec::HEVC;
- video_codec_f = find_hevc_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR: {
- fprintf(stderr, "Warning: selected video codec hevc is not supported, trying h264 instead\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR: {
- fprintf(stderr, "Warning: selected video codec av1 is not supported, trying h264 instead\n");
- video_codec_to_use = "h264";
- video_codec = VideoCodec::H264;
- video_codec_f = find_h264_encoder(egl.gpu_info.vendor, egl.card_path);
- break;
- }
- }
- }
-
- if(!video_codec_f) {
- const char *video_codec_name = "";
- switch(video_codec) {
- case VideoCodec::H264: {
- video_codec_name = "h264";
- break;
- }
- case VideoCodec::HEVC:
- case VideoCodec::HEVC_HDR: {
- video_codec_name = "hevc";
- break;
- }
- case VideoCodec::AV1:
- case VideoCodec::AV1_HDR: {
- video_codec_name = "av1";
- break;
- }
- }
-
- fprintf(stderr, "Error: your gpu does not support '%s' video codec. If you are sure that your gpu does support '%s' video encoding and you are using an AMD/Intel GPU,\n"
- " then make sure you have installed the GPU specific vaapi packages (intel-media-driver, libva-intel-driver or libva-mesa-driver).\n"
- " It's also possible that your distro has disabled hardware accelerated video encoding for '%s' video codec.\n"
- " This may be the case on corporate distros such as Manjaro, Fedora or OpenSUSE.\n"
- " You can test this by running 'vainfo | grep VAEntrypointEncSlice' to see if it matches any H264/HEVC profile.\n"
- " On such distros, you need to manually install mesa from source to enable H264/HEVC hardware acceleration, or use a more user friendly distro. Alternatively record with AV1 if supported by your GPU.\n"
- " You can alternatively use the flatpak version of GPU Screen Recorder (https://flathub.org/apps/com.dec05eba.gpu_screen_recorder) which bypasses system issues with patented H264/HEVC codecs.\n"
- " Make sure you have mesa-extra freedesktop runtime installed when using the flatpak (this should be the default), which can be installed with this command:\n"
- " flatpak install --system org.freedesktop.Platform.GL.default//23.08-extra", video_codec_name, video_codec_name, video_codec_name);
- _exit(2);
- }
-
- gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, overclock, video_codec, color_range, record_cursor, framerate_mode == FramerateMode::CONTENT, use_software_video_encoder);
+ const gsr_color_depth color_depth = video_codec_to_bit_depth(video_codec);
+ gsr_capture *capture = create_capture_impl(window_str, screen_region, wayland, &egl, fps, video_codec, color_range, record_cursor, use_software_video_encoder, restore_portal_session, portal_session_token_filepath, color_depth);
// (Some?) livestreaming services require at least one audio track to work.
// If not audio is provided then create one silent audio track.
@@ -2461,7 +3163,8 @@ int main(int argc, char **argv) {
const bool hdr = video_codec_is_hdr(video_codec);
const bool low_latency_recording = is_livestream || is_output_piped;
- AVCodecContext *video_codec_context = create_video_codec_context(get_pixel_format(egl.gpu_info.vendor, use_software_video_encoder), quality, fps, video_codec_f, low_latency_recording, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint);
+ const enum AVPixelFormat video_pix_fmt = get_pixel_format(video_codec, egl.gpu_info.vendor, use_software_video_encoder);
+ AVCodecContext *video_codec_context = create_video_codec_context(video_pix_fmt, quality, fps, video_codec_f, low_latency_recording, egl.gpu_info.vendor, framerate_mode, hdr, color_range, keyint, use_software_video_encoder, bitrate_mode, video_codec);
if(replay_buffer_size_secs == -1)
video_stream = create_stream(av_format_context, video_codec_context);
@@ -2485,7 +3188,7 @@ int main(int argc, char **argv) {
_exit(capture_result);
}
- gsr_video_encoder *video_encoder = create_video_encoder(&egl, overclock, hdr, use_software_video_encoder);
+ gsr_video_encoder *video_encoder = create_video_encoder(&egl, overclock, color_depth, use_software_video_encoder, video_codec);
if(!video_encoder) {
fprintf(stderr, "Error: failed to create video encoder\n");
_exit(1);
@@ -2513,9 +3216,9 @@ int main(int argc, char **argv) {
gsr_color_conversion_clear(&color_conversion);
if(use_software_video_encoder) {
- open_video_software(video_codec_context, quality, pixel_format, hdr);
+ open_video_software(video_codec_context, quality, pixel_format, hdr, color_depth, bitrate_mode);
} else {
- open_video_hardware(video_codec_context, quality, very_old_gpu, egl.gpu_info.vendor, pixel_format, hdr);
+ open_video_hardware(video_codec_context, quality, very_old_gpu, egl.gpu_info.vendor, pixel_format, hdr, color_depth, bitrate_mode, video_codec, low_power);
}
if(video_stream)
avcodec_parameters_from_context(video_stream->codecpar, video_codec_context);
@@ -2561,7 +3264,7 @@ int main(int argc, char **argv) {
const double audio_startup_time_seconds = force_no_audio_offset ? 0 : audio_codec_get_desired_delay(audio_codec, fps);// * ((double)audio_codec_context->frame_size / 1024.0);
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
- std::vector<AudioDevice> audio_devices;
+ std::vector<AudioDevice> audio_track_audio_devices;
for(size_t i = 0; i < merged_audio_inputs.audio_inputs.size(); ++i) {
auto &audio_input = merged_audio_inputs.audio_inputs[i];
AVFilterContext *src_ctx = nullptr;
@@ -2585,13 +3288,13 @@ int main(int argc, char **argv) {
audio_device.frame = create_audio_frame(audio_codec_context);
audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
- audio_devices.push_back(std::move(audio_device));
+ audio_track_audio_devices.push_back(std::move(audio_device));
}
AudioTrack audio_track;
audio_track.codec_context = audio_codec_context;
audio_track.stream = audio_stream;
- audio_track.audio_devices = std::move(audio_devices);
+ audio_track.audio_devices = std::move(audio_track_audio_devices);
audio_track.graph = graph;
audio_track.sink = sink;
audio_track.stream_index = audio_stream_index;
@@ -2627,7 +3330,7 @@ int main(int argc, char **argv) {
}
double fps_start_time = clock_get_monotonic_seconds();
- double frame_timer_start = fps_start_time - target_fps; // We want to capture the first frame immediately
+ double frame_timer_start = fps_start_time;
int fps_counter = 0;
int damage_fps_counter = 0;
@@ -2706,7 +3409,7 @@ int main(int argc, char **argv) {
if(paused) {
if(!audio_device.sound_device.handle)
- usleep(timeout_ms * 1000);
+ av_usleep(timeout_ms * 1000);
continue;
}
@@ -2769,7 +3472,7 @@ int main(int argc, char **argv) {
}
if(!audio_device.sound_device.handle)
- usleep(timeout_ms * 1000);
+ av_usleep(timeout_ms * 1000);
if(got_audio_data) {
// TODO: Instead of converting audio, get float audio from alsa. Or does alsa do conversion internally to get this format?
@@ -2806,57 +3509,105 @@ int main(int argc, char **argv) {
}
}
+ std::thread amix_thread;
+ if(uses_amix) {
+ amix_thread = std::thread([&]() {
+ AVFrame *aframe = av_frame_alloc();
+ while(running) {
+ {
+ std::lock_guard<std::mutex> lock(audio_filter_mutex);
+ for(AudioTrack &audio_track : audio_tracks) {
+ if(!audio_track.sink)
+ continue;
+
+ int err = 0;
+ while ((err = av_buffersink_get_frame(audio_track.sink, aframe)) >= 0) {
+ aframe->pts = audio_track.pts;
+ err = avcodec_send_frame(audio_track.codec_context, aframe);
+ if(err >= 0){
+ // TODO: Move to separate thread because this could write to network (for example when livestreaming)
+ receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
+ } else {
+ fprintf(stderr, "Failed to encode audio!\n");
+ }
+ av_frame_unref(aframe);
+ audio_track.pts += audio_track.codec_context->frame_size;
+ }
+ }
+ }
+ av_usleep(5 * 1000); // 5 milliseconds
+ }
+ av_frame_free(&aframe);
+ });
+ }
+
// Set update_fps to 24 to test if duplicate/delayed frames cause video/audio desync or too fast/slow video.
const double update_fps = fps + 190;
bool should_stop_error = false;
- AVFrame *aframe = av_frame_alloc();
-
int64_t video_pts_counter = 0;
int64_t video_prev_pts = 0;
+ bool hdr_metadata_set = false;
+
+ double damage_timeout_seconds = framerate_mode == FramerateMode::CONTENT ? 0.5 : 0.1;
+ damage_timeout_seconds = std::max(damage_timeout_seconds, target_fps);
+
+ bool use_damage_tracking = false;
+ gsr_damage damage;
+ memset(&damage, 0, sizeof(damage));
+ if(gsr_egl_get_display_server(&egl) == GSR_DISPLAY_SERVER_X11) {
+ gsr_damage_init(&damage, &egl, record_cursor);
+ use_damage_tracking = true;
+ }
+
+ if(is_monitor_capture)
+ gsr_damage_set_target_monitor(&damage, window_str.c_str());
+
while(running) {
- double frame_start = clock_get_monotonic_seconds();
+ const double frame_start = clock_get_monotonic_seconds();
+
+ while(gsr_egl_process_event(&egl)) {
+ gsr_damage_on_event(&damage, gsr_egl_get_event_data(&egl));
+ gsr_capture_on_event(capture, &egl);
+ }
+ gsr_damage_tick(&damage);
+ gsr_capture_tick(capture);
+
+ if(!is_monitor_capture) {
+ Window damage_target_window = 0;
+ if(capture->get_window_id)
+ damage_target_window = capture->get_window_id(capture);
+
+ if(damage_target_window != 0)
+ gsr_damage_set_target_window(&damage, damage_target_window);
+ }
- gsr_capture_tick(capture, video_codec_context);
should_stop_error = false;
if(gsr_capture_should_stop(capture, &should_stop_error)) {
running = 0;
break;
}
- // TODO: Move to another thread, since this shouldn't be locked to video encoding fps
- {
- std::lock_guard<std::mutex> lock(audio_filter_mutex);
- for(AudioTrack &audio_track : audio_tracks) {
- if(!audio_track.sink)
- continue;
+ bool damaged = false;
+ if(use_damage_tracking)
+ damaged = gsr_damage_is_damaged(&damage);
+ else if(capture->is_damaged)
+ damaged = capture->is_damaged(capture);
+ else
+ damaged = true;
- int err = 0;
- while ((err = av_buffersink_get_frame(audio_track.sink, aframe)) >= 0) {
- aframe->pts = audio_track.pts;
- err = avcodec_send_frame(audio_track.codec_context, aframe);
- if(err >= 0){
- // TODO: Move to separate thread because this could write to network (for example when livestreaming)
- receive_frames(audio_track.codec_context, audio_track.stream_index, audio_track.stream, aframe->pts, av_format_context, record_start_time, frame_data_queue, replay_buffer_size_secs, frames_erased, write_output_mutex, paused_time_offset);
- } else {
- fprintf(stderr, "Failed to encode audio!\n");
- }
- av_frame_unref(aframe);
- audio_track.pts += audio_track.codec_context->frame_size;
- }
- }
- }
+ // TODO: Readd wayland sync warning when removing this
+ if(framerate_mode != FramerateMode::CONTENT)
+ damaged = true;
- const bool damaged = !capture->is_damaged || capture->is_damaged(capture);
- if(damaged) {
+ if(damaged)
++damage_fps_counter;
- }
++fps_counter;
- double time_now = clock_get_monotonic_seconds();
- double frame_timer_elapsed = time_now - frame_timer_start;
- double elapsed = time_now - fps_start_time;
+ const double time_now = clock_get_monotonic_seconds();
+ const double frame_timer_elapsed = time_now - frame_timer_start;
+ const double elapsed = time_now - fps_start_time;
if (elapsed >= 1.0) {
if(verbose) {
fprintf(stderr, "update fps: %d, damage fps: %d\n", fps_counter, damage_fps_counter);
@@ -2867,10 +3618,11 @@ int main(int argc, char **argv) {
}
double frame_time_overflow = frame_timer_elapsed - target_fps;
- if (frame_time_overflow >= 0.0 && damaged) {
+ if ((frame_time_overflow >= 0.0 || video_pts_counter == 0) && damaged) {
+ gsr_damage_clear(&damage);
if(capture->clear_damage)
capture->clear_damage(capture);
- frame_time_overflow = std::min(frame_time_overflow, target_fps);
+ frame_time_overflow = std::min(std::max(0.0, frame_time_overflow), target_fps);
frame_timer_start = time_now - frame_time_overflow;
const double this_video_frame_time = clock_get_monotonic_seconds() - paused_time_offset;
@@ -2878,8 +3630,13 @@ int main(int argc, char **argv) {
const int num_frames = framerate_mode == FramerateMode::CONSTANT ? std::max((int64_t)0LL, expected_frames - video_pts_counter) : 1;
if(num_frames > 0 && !paused) {
+ egl.glClear(0);
gsr_capture_capture(capture, video_frame, &color_conversion);
- gsr_video_encoder_copy_textures_to_frame(video_encoder, video_frame);
+ gsr_egl_swap_buffers(&egl);
+ gsr_video_encoder_copy_textures_to_frame(video_encoder, video_frame, &color_conversion);
+
+ if(hdr && !hdr_metadata_set && replay_buffer_size_secs == -1 && add_hdr_metadata_to_video_stream(capture, video_stream))
+ hdr_metadata_set = true;
// TODO: Check if duplicate frame can be saved just by writing it with a different pts instead of sending it again
for(int i = 0; i < num_frames; ++i) {
@@ -2903,7 +3660,6 @@ int main(int argc, char **argv) {
}
}
- gsr_capture_capture_end(capture, video_frame);
video_pts_counter += num_frames;
}
}
@@ -2934,14 +3690,14 @@ int main(int argc, char **argv) {
if(save_replay == 1 && !save_replay_thread.valid() && replay_buffer_size_secs != -1) {
save_replay = 0;
- save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex, make_folders);
+ save_replay_async(video_codec_context, VIDEO_STREAM_INDEX, audio_tracks, frame_data_queue, frames_erased, filename, container_format, file_extension, write_output_mutex, date_folders, hdr, capture);
}
double frame_end = clock_get_monotonic_seconds();
double frame_sleep_fps = 1.0 / update_fps;
double sleep_time = frame_sleep_fps - (frame_end - frame_start);
if(sleep_time > 0.0)
- usleep(sleep_time * 1000.0 * 1000.0);
+ av_usleep(sleep_time * 1000.0 * 1000.0);
}
running = 0;
@@ -2963,7 +3719,8 @@ int main(int argc, char **argv) {
}
}
- av_frame_free(&aframe);
+ if(amix_thread.joinable())
+ amix_thread.join();
if (replay_buffer_size_secs == -1 && av_write_trailer(av_format_context) != 0) {
fprintf(stderr, "Failed to write trailer\n");
@@ -2972,6 +3729,7 @@ int main(int argc, char **argv) {
if(replay_buffer_size_secs == -1 && !(output_format->flags & AVFMT_NOFILE))
avio_close(av_format_context->pb);
+ gsr_damage_deinit(&damage);
gsr_color_conversion_deinit(&color_conversion);
gsr_video_encoder_destroy(video_encoder, video_codec_context);
gsr_capture_destroy(capture, video_codec_context);
@@ -2985,7 +3743,6 @@ int main(int argc, char **argv) {
}
//av_frame_free(&video_frame);
- free((void*)window_str);
free(empty_audio);
// We do an _exit here because cuda uses at_exit to do _something_ that causes the program to freeze,
// but only on some nvidia driver versions on some gpus (RTX?), and _exit exits the program without calling
diff --git a/src/overclock.c b/src/overclock.c
index 2cba623..df2ae66 100644
--- a/src/overclock.c
+++ b/src/overclock.c
@@ -4,12 +4,10 @@
#include <string.h>
#include <stdlib.h>
-// HACK!!!: When a program uses cuda (including nvenc) then the nvidia driver drops to performance level 2 (memory transfer rate is dropped and possibly graphics clock).
+// HACK!!!: When a program uses cuda (including nvenc) then the nvidia driver drops to max performance level - 1 (memory transfer rate is dropped and possibly graphics clock).
// Nvidia does this because in some very extreme cases of cuda there can be memory corruption when running at max memory transfer rate.
// So to get around this we overclock memory transfer rate (maybe this should also be done for graphics clock?) to the best performance level while GPU Screen Recorder is running.
-// TODO: Does it always drop to performance level 2?
-
static int min_int(int a, int b) {
return a < b ? a : b;
}
diff --git a/src/pipewire.c b/src/pipewire.c
new file mode 100644
index 0000000..3bf54db
--- /dev/null
+++ b/src/pipewire.c
@@ -0,0 +1,788 @@
+#include "../include/pipewire.h"
+#include "../include/egl.h"
+#include "../include/utils.h"
+
+#include <pipewire/pipewire.h>
+#include <spa/param/video/format-utils.h>
+#include <spa/debug/types.h>
+
+#include <libdrm/drm_fourcc.h>
+
+#include <fcntl.h>
+#include <unistd.h>
+
+/* This code is partially based on xr-video-player pipewire implementation which is based on obs-studio's pipewire implementation */
+
+/* TODO: Make gsr_pipewire_init asynchronous */
+/* TODO: Support 10-bit capture (hdr) when pipewire supports it */
+/* TODO: Test all of the image formats */
+
+#ifndef SPA_POD_PROP_FLAG_DONT_FIXATE
+#define SPA_POD_PROP_FLAG_DONT_FIXATE (1 << 4)
+#endif
+
+#define CURSOR_META_SIZE(width, height) \
+ (sizeof(struct spa_meta_cursor) + sizeof(struct spa_meta_bitmap) + \
+ width * height * 4)
+
+static bool parse_pw_version(gsr_pipewire_data_version *dst, const char *version) {
+ const int n_matches = sscanf(version, "%d.%d.%d", &dst->major, &dst->minor, &dst->micro);
+ return n_matches == 3;
+}
+
+static bool check_pw_version(const gsr_pipewire_data_version *pw_version, int major, int minor, int micro) {
+ if (pw_version->major != major)
+ return pw_version->major > major;
+ if (pw_version->minor != minor)
+ return pw_version->minor > minor;
+ return pw_version->micro >= micro;
+}
+
+static void update_pw_versions(gsr_pipewire *self, const char *version) {
+ fprintf(stderr, "gsr info: pipewire: server version: %s\n", version);
+ fprintf(stderr, "gsr info: pipewire: library version: %s\n", pw_get_library_version());
+ fprintf(stderr, "gsr info: pipewire: header version: %s\n", pw_get_headers_version());
+ if(!parse_pw_version(&self->server_version, version))
+ fprintf(stderr, "gsr error: pipewire: failed to parse server version\n");
+}
+
+static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
+ gsr_pipewire *self = user_data;
+ update_pw_versions(self, info->version);
+}
+
+static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
+ gsr_pipewire *self = user_data;
+ fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
+ gsr_pipewire *self = user_data;
+ if (id == PW_ID_CORE && self->server_version_sync == seq)
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static bool is_cursor_format_supported(const enum spa_video_format format) {
+ switch(format) {
+ case SPA_VIDEO_FORMAT_RGBx: return true;
+ case SPA_VIDEO_FORMAT_BGRx: return true;
+ case SPA_VIDEO_FORMAT_xRGB: return true;
+ case SPA_VIDEO_FORMAT_xBGR: return true;
+ case SPA_VIDEO_FORMAT_RGBA: return true;
+ case SPA_VIDEO_FORMAT_BGRA: return true;
+ case SPA_VIDEO_FORMAT_ARGB: return true;
+ case SPA_VIDEO_FORMAT_ABGR: return true;
+ default: break;
+ }
+ return false;
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .info = on_core_info_cb,
+ .done = on_core_done_cb,
+ .error = on_core_error_cb,
+};
+
+static void on_process_cb(void *user_data) {
+ gsr_pipewire *self = user_data;
+ struct spa_meta_cursor *cursor = NULL;
+ //struct spa_meta *video_damage = NULL;
+
+ /* Find the most recent buffer */
+ struct pw_buffer *pw_buf = NULL;
+ for(;;) {
+ struct pw_buffer *aux = pw_stream_dequeue_buffer(self->stream);
+ if(!aux)
+ break;
+ if(pw_buf)
+ pw_stream_queue_buffer(self->stream, pw_buf);
+ pw_buf = aux;
+ }
+
+ if(!pw_buf) {
+ fprintf(stderr, "gsr info: pipewire: out of buffers!\n");
+ return;
+ }
+
+ struct spa_buffer *buffer = pw_buf->buffer;
+ const bool has_buffer = buffer->datas[0].chunk->size != 0;
+ if(!has_buffer)
+ goto read_metadata;
+
+ pthread_mutex_lock(&self->mutex);
+
+ if(buffer->datas[0].type == SPA_DATA_DmaBuf) {
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = -1;
+ }
+ }
+
+ self->dmabuf_num_planes = buffer->n_datas;
+ if(self->dmabuf_num_planes > GSR_PIPEWIRE_DMABUF_MAX_PLANES)
+ self->dmabuf_num_planes = GSR_PIPEWIRE_DMABUF_MAX_PLANES;
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ self->dmabuf_data[i].fd = dup(buffer->datas[i].fd);
+ self->dmabuf_data[i].offset = buffer->datas[i].chunk->offset;
+ self->dmabuf_data[i].stride = buffer->datas[i].chunk->stride;
+ }
+
+ self->damaged = true;
+ } else {
+ // TODO:
+ }
+
+ // TODO: Move down to read_metadata
+ struct spa_meta_region *region = spa_buffer_find_meta_data(buffer, SPA_META_VideoCrop, sizeof(*region));
+ if(region && spa_meta_region_is_valid(region)) {
+ // fprintf(stderr, "gsr info: pipewire: crop Region available (%dx%d+%d+%d)\n",
+ // region->region.position.x, region->region.position.y,
+ // region->region.size.width, region->region.size.height);
+ self->crop.x = region->region.position.x;
+ self->crop.y = region->region.position.y;
+ self->crop.width = region->region.size.width;
+ self->crop.height = region->region.size.height;
+ self->crop.valid = true;
+ } else {
+ self->crop.valid = false;
+ }
+
+ pthread_mutex_unlock(&self->mutex);
+
+read_metadata:
+
+ // video_damage = spa_buffer_find_meta(buffer, SPA_META_VideoDamage);
+ // if(video_damage) {
+ // struct spa_meta_region *r = spa_meta_first(video_damage);
+ // if(spa_meta_check(r, video_damage)) {
+ // //fprintf(stderr, "damage: %d,%d %ux%u\n", r->region.position.x, r->region.position.y, r->region.size.width, r->region.size.height);
+ // pthread_mutex_lock(&self->mutex);
+ // self->damaged = true;
+ // pthread_mutex_unlock(&self->mutex);
+ // }
+ // }
+
+ cursor = spa_buffer_find_meta_data(buffer, SPA_META_Cursor, sizeof(*cursor));
+ self->cursor.valid = cursor && spa_meta_cursor_is_valid(cursor);
+
+ if (self->cursor.visible && self->cursor.valid) {
+ pthread_mutex_lock(&self->mutex);
+
+ struct spa_meta_bitmap *bitmap = NULL;
+ if (cursor->bitmap_offset)
+ bitmap = SPA_MEMBER(cursor, cursor->bitmap_offset, struct spa_meta_bitmap);
+
+ if (bitmap && bitmap->size.width > 0 && bitmap->size.height && is_cursor_format_supported(bitmap->format)) {
+ const uint8_t *bitmap_data = SPA_MEMBER(bitmap, bitmap->offset, uint8_t);
+ fprintf(stderr, "gsr info: pipewire: cursor bitmap update, size: %dx%d, format: %s\n",
+ (int)bitmap->size.width, (int)bitmap->size.height, spa_debug_type_find_name(spa_type_video_format, bitmap->format));
+
+ const size_t bitmap_size = bitmap->size.width * bitmap->size.height * 4;
+ uint8_t *new_bitmap_data = realloc(self->cursor.data, bitmap_size);
+ if(new_bitmap_data) {
+ self->cursor.data = new_bitmap_data;
+ /* TODO: Convert bgr and other image formats to rgb here */
+ memcpy(self->cursor.data, bitmap_data, bitmap_size);
+ }
+
+ self->cursor.hotspot_x = cursor->hotspot.x;
+ self->cursor.hotspot_y = cursor->hotspot.y;
+ self->cursor.width = bitmap->size.width;
+ self->cursor.height = bitmap->size.height;
+ }
+
+ self->cursor.x = cursor->position.x;
+ self->cursor.y = cursor->position.y;
+ pthread_mutex_unlock(&self->mutex);
+
+ //fprintf(stderr, "gsr info: pipewire: cursor: %d %d %d %d\n", cursor->hotspot.x, cursor->hotspot.y, cursor->position.x, cursor->position.y);
+ }
+
+ pw_stream_queue_buffer(self->stream, pw_buf);
+}
+
+static void on_param_changed_cb(void *user_data, uint32_t id, const struct spa_pod *param) {
+ gsr_pipewire *self = user_data;
+
+ if (!param || id != SPA_PARAM_Format)
+ return;
+
+ int result = spa_format_parse(param, &self->format.media_type, &self->format.media_subtype);
+ if (result < 0)
+ return;
+
+ if (self->format.media_type != SPA_MEDIA_TYPE_video || self->format.media_subtype != SPA_MEDIA_SUBTYPE_raw)
+ return;
+
+ pthread_mutex_lock(&self->mutex);
+ spa_format_video_raw_parse(param, &self->format.info.raw);
+ pthread_mutex_unlock(&self->mutex);
+
+ uint32_t buffer_types = 0;
+ const bool has_modifier = spa_pod_find_prop(param, NULL, SPA_FORMAT_VIDEO_modifier) != NULL;
+ if(has_modifier || check_pw_version(&self->server_version, 0, 3, 24))
+ buffer_types |= 1 << SPA_DATA_DmaBuf;
+
+ fprintf(stderr, "gsr info: pipewire: negotiated format:\n");
+
+ fprintf(stderr, "gsr info: pipewire: Format: %d (%s)\n",
+ self->format.info.raw.format,
+ spa_debug_type_find_name(spa_type_video_format, self->format.info.raw.format));
+
+ if(has_modifier) {
+ fprintf(stderr, "gsr info: pipewire: Modifier: 0x%" PRIx64 "\n", self->format.info.raw.modifier);
+ }
+
+ fprintf(stderr, "gsr info: pipewire: Size: %dx%d\n", self->format.info.raw.size.width, self->format.info.raw.size.height);
+ fprintf(stderr, "gsr info: pipewire: Framerate: %d/%d\n", self->format.info.raw.framerate.num, self->format.info.raw.framerate.denom);
+
+ uint8_t params_buffer[1024];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ const struct spa_pod *params[4];
+
+ params[0] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoCrop),
+ SPA_PARAM_META_size,
+ SPA_POD_Int(sizeof(struct spa_meta_region)));
+
+ params[1] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_VideoDamage),
+ SPA_PARAM_META_size, SPA_POD_CHOICE_RANGE_Int(
+ sizeof(struct spa_meta_region) * 16,
+ sizeof(struct spa_meta_region) * 1,
+ sizeof(struct spa_meta_region) * 16));
+
+ params[2] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamMeta, SPA_PARAM_Meta,
+ SPA_PARAM_META_type, SPA_POD_Id(SPA_META_Cursor),
+ SPA_PARAM_META_size,
+ SPA_POD_CHOICE_RANGE_Int(CURSOR_META_SIZE(64, 64),
+ CURSOR_META_SIZE(1, 1),
+ CURSOR_META_SIZE(1024, 1024)));
+
+ params[3] = spa_pod_builder_add_object(
+ &pod_builder, SPA_TYPE_OBJECT_ParamBuffers, SPA_PARAM_Buffers,
+ SPA_PARAM_BUFFERS_dataType, SPA_POD_Int(buffer_types));
+
+ pw_stream_update_params(self->stream, params, 4);
+ self->negotiated = true;
+}
+
+static void on_state_changed_cb(void *user_data, enum pw_stream_state old, enum pw_stream_state state, const char *error) {
+ (void)old;
+ gsr_pipewire *self = user_data;
+
+ fprintf(stderr, "gsr info: pipewire: stream %p state: \"%s\" (error: %s)\n",
+ (void*)self->stream, pw_stream_state_as_string(state),
+ error ? error : "none");
+}
+
+static const struct pw_stream_events stream_events = {
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_state_changed_cb,
+ .param_changed = on_param_changed_cb,
+ .process = on_process_cb,
+};
+
+static inline struct spa_pod *build_format(struct spa_pod_builder *b,
+ const gsr_pipewire_video_info *ovi,
+ uint32_t format, const uint64_t *modifiers,
+ size_t modifier_count)
+{
+ struct spa_pod_frame format_frame;
+
+ spa_pod_builder_push_object(b, &format_frame, SPA_TYPE_OBJECT_Format, SPA_PARAM_EnumFormat);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaType, SPA_POD_Id(SPA_MEDIA_TYPE_video), 0);
+ spa_pod_builder_add(b, SPA_FORMAT_mediaSubtype, SPA_POD_Id(SPA_MEDIA_SUBTYPE_raw), 0);
+
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_format, SPA_POD_Id(format), 0);
+
+ if (modifier_count > 0) {
+ struct spa_pod_frame modifier_frame;
+
+ spa_pod_builder_prop(b, SPA_FORMAT_VIDEO_modifier, SPA_POD_PROP_FLAG_MANDATORY | SPA_POD_PROP_FLAG_DONT_FIXATE);
+ spa_pod_builder_push_choice(b, &modifier_frame, SPA_CHOICE_Enum, 0);
+
+ /* The first element of choice pods is the preferred value. Here
+ * we arbitrarily pick the first modifier as the preferred one.
+ */
+ // TODO:
+ spa_pod_builder_long(b, modifiers[0]);
+
+ for(uint32_t i = 0; i < modifier_count; i++)
+ spa_pod_builder_long(b, modifiers[i]);
+
+ spa_pod_builder_pop(b, &modifier_frame);
+ }
+
+ spa_pod_builder_add(b, SPA_FORMAT_VIDEO_size,
+ SPA_POD_CHOICE_RANGE_Rectangle(
+ &SPA_RECTANGLE(32, 32),
+ &SPA_RECTANGLE(1, 1),
+ &SPA_RECTANGLE(16384, 16384)),
+ SPA_FORMAT_VIDEO_framerate,
+ SPA_POD_CHOICE_RANGE_Fraction(
+ &SPA_FRACTION(ovi->fps_num, ovi->fps_den),
+ &SPA_FRACTION(0, 1), &SPA_FRACTION(500, 1)),
+ 0);
+ return spa_pod_builder_pop(b, &format_frame);
+}
+
+/* https://gstreamer.freedesktop.org/documentation/additional/design/mediatype-video-raw.html?gi-language=c#formats */
+/* For some reason gstreamer formats are in opposite order to drm formats */
+static int64_t spa_video_format_to_drm_format(const enum spa_video_format format) {
+ switch(format) {
+ case SPA_VIDEO_FORMAT_RGBx: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGRx: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_RGBA: return DRM_FORMAT_ABGR8888;
+ case SPA_VIDEO_FORMAT_BGRA: return DRM_FORMAT_ARGB8888;
+ case SPA_VIDEO_FORMAT_RGB: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGR: return DRM_FORMAT_XRGB8888;
+ default: break;
+ }
+ return DRM_FORMAT_INVALID;
+}
+
+static const enum spa_video_format video_formats[] = {
+ SPA_VIDEO_FORMAT_BGRA,
+ SPA_VIDEO_FORMAT_BGRx,
+ SPA_VIDEO_FORMAT_BGR,
+ SPA_VIDEO_FORMAT_RGBx,
+ SPA_VIDEO_FORMAT_RGBA,
+ SPA_VIDEO_FORMAT_RGB,
+};
+
+static bool gsr_pipewire_build_format_params(gsr_pipewire *self, struct spa_pod_builder *pod_builder, struct spa_pod **params, uint32_t *num_params) {
+ *num_params = 0;
+
+ if(!check_pw_version(&self->server_version, 0, 3, 33))
+ return false;
+
+ for(size_t i = 0; i < GSR_PIPEWIRE_NUM_VIDEO_FORMATS; i++) {
+ if(self->supported_video_formats[i].modifiers_size == 0)
+ continue;
+ params[i] = build_format(pod_builder, &self->video_info, self->supported_video_formats[i].format, self->modifiers + self->supported_video_formats[i].modifiers_index, self->supported_video_formats[i].modifiers_size);
+ ++(*num_params);
+ }
+
+ return true;
+}
+
+static void renegotiate_format(void *data, uint64_t expirations) {
+ (void)expirations;
+ gsr_pipewire *self = (gsr_pipewire*)data;
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ struct spa_pod *params[GSR_PIPEWIRE_NUM_VIDEO_FORMATS];
+ uint32_t num_video_formats = 0;
+ uint8_t params_buffer[2048];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+ if (!gsr_pipewire_build_format_params(self, &pod_builder, params, &num_video_formats)) {
+ pw_thread_loop_unlock(self->thread_loop);
+ return;
+ }
+
+ pw_stream_update_params(self->stream, (const struct spa_pod**)params, num_video_formats);
+ pw_thread_loop_unlock(self->thread_loop);
+}
+
+static bool spa_video_format_get_modifiers(gsr_pipewire *self, const enum spa_video_format format, uint64_t *modifiers, int32_t max_modifiers, int32_t *num_modifiers) {
+ *num_modifiers = 0;
+
+ if(max_modifiers == 0) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: no space for modifiers left\n");
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ return false;
+ }
+
+ if(!self->egl->eglQueryDmaBufModifiersEXT) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: failed to initialize modifiers because eglQueryDmaBufModifiersEXT is not available\n");
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ return false;
+ }
+
+ const int64_t drm_format = spa_video_format_to_drm_format(format);
+ if(!self->egl->eglQueryDmaBufModifiersEXT(self->egl->egl_display, drm_format, max_modifiers, modifiers, NULL, num_modifiers)) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: eglQueryDmaBufModifiersEXT failed with drm format %d, %" PRIi64 "\n", (int)format, drm_format);
+ //modifiers[0] = DRM_FORMAT_MOD_LINEAR;
+ //modifiers[1] = DRM_FORMAT_MOD_INVALID;
+ //*num_modifiers = 2;
+ *num_modifiers = 0;
+ return false;
+ }
+
+ // if(*num_modifiers + 2 <= max_modifiers) {
+ // modifiers[*num_modifiers + 0] = DRM_FORMAT_MOD_LINEAR;
+ // modifiers[*num_modifiers + 1] = DRM_FORMAT_MOD_INVALID;
+ // *num_modifiers += 2;
+ // }
+ return true;
+}
+
+static void gsr_pipewire_init_modifiers(gsr_pipewire *self) {
+ for(size_t i = 0; i < GSR_PIPEWIRE_NUM_VIDEO_FORMATS; i++) {
+ self->supported_video_formats[i].format = video_formats[i];
+ int32_t num_modifiers = 0;
+ spa_video_format_get_modifiers(self, self->supported_video_formats[i].format, self->modifiers + self->num_modifiers, GSR_PIPEWIRE_MAX_MODIFIERS - self->num_modifiers, &num_modifiers);
+ self->supported_video_formats[i].modifiers_index = self->num_modifiers;
+ self->supported_video_formats[i].modifiers_size = num_modifiers;
+ }
+}
+
+static bool gsr_pipewire_setup_stream(gsr_pipewire *self) {
+ struct spa_pod *params[GSR_PIPEWIRE_NUM_VIDEO_FORMATS];
+ uint32_t num_video_formats = 0;
+ uint8_t params_buffer[2048];
+ struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
+
+ self->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
+ if(!self->thread_loop) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create pipewire thread\n");
+ goto error;
+ }
+
+ self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
+ if(!self->context) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create pipewire context\n");
+ goto error;
+ }
+
+ if(pw_thread_loop_start(self->thread_loop) < 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to start thread\n");
+ goto error;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ // TODO: Why pass 5 to fcntl?
+ self->core = pw_context_connect_fd(self->context, fcntl(self->fd, F_DUPFD_CLOEXEC, 5), NULL, 0);
+ if(!self->core) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to connect to fd %d\n", self->fd);
+ goto error;
+ }
+
+ // TODO: Error check
+ pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
+
+ gsr_pipewire_init_modifiers(self);
+
+ // TODO: Cleanup?
+ self->reneg = pw_loop_add_event(pw_thread_loop_get_loop(self->thread_loop), renegotiate_format, self);
+ if(!self->reneg) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: pw_loop_add_event failed\n");
+ goto error;
+ }
+
+ self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, 0);
+ pw_thread_loop_wait(self->thread_loop);
+
+ self->stream = pw_stream_new(self->core, "com.dec05eba.gpu_screen_recorder",
+ pw_properties_new(PW_KEY_MEDIA_TYPE, "Video",
+ PW_KEY_MEDIA_CATEGORY, "Capture",
+ PW_KEY_MEDIA_ROLE, "Screen", NULL));
+ if(!self->stream) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to create stream\n");
+ goto error;
+ }
+ pw_stream_add_listener(self->stream, &self->stream_listener, &stream_events, self);
+
+ if(!gsr_pipewire_build_format_params(self, &pod_builder, params, &num_video_formats)) {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to build format params\n");
+ goto error;
+ }
+
+ if(pw_stream_connect(
+ self->stream, PW_DIRECTION_INPUT, self->node,
+ PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS, (const struct spa_pod**)params,
+ num_video_formats) < 0)
+ {
+ pw_thread_loop_unlock(self->thread_loop);
+ fprintf(stderr, "gsr error: gsr_pipewire_setup_stream: failed to connect stream\n");
+ goto error;
+ }
+
+ pw_thread_loop_unlock(self->thread_loop);
+ return true;
+
+ error:
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->stream) {
+ pw_stream_disconnect(self->stream);
+ pw_stream_destroy(self->stream);
+ self->stream = NULL;
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+ return false;
+}
+
+static int pw_init_counter = 0;
+bool gsr_pipewire_init(gsr_pipewire *self, int pipewire_fd, uint32_t pipewire_node, int fps, bool capture_cursor, gsr_egl *egl) {
+ if(pw_init_counter == 0)
+ pw_init(NULL, NULL);
+ ++pw_init_counter;
+
+ memset(self, 0, sizeof(*self));
+ self->egl = egl;
+ self->fd = pipewire_fd;
+ self->node = pipewire_node;
+ if(pthread_mutex_init(&self->mutex, NULL) != 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_init: failed to initialize mutex\n");
+ gsr_pipewire_deinit(self);
+ return false;
+ }
+ self->mutex_initialized = true;
+ self->video_info.fps_num = fps;
+ self->video_info.fps_den = 1;
+ self->cursor.visible = capture_cursor;
+
+ if(!gsr_pipewire_setup_stream(self)) {
+ gsr_pipewire_deinit(self);
+ return false;
+ }
+
+ return true;
+}
+
+void gsr_pipewire_deinit(gsr_pipewire *self) {
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->stream) {
+ pw_stream_disconnect(self->stream);
+ pw_stream_destroy(self->stream);
+ self->stream = NULL;
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+
+ if(self->fd > 0) {
+ close(self->fd);
+ self->fd = -1;
+ }
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ if(self->dmabuf_data[i].fd > 0) {
+ close(self->dmabuf_data[i].fd);
+ self->dmabuf_data[i].fd = -1;
+ }
+ }
+ self->dmabuf_num_planes = 0;
+
+ self->negotiated = false;
+
+ if(self->mutex_initialized) {
+ pthread_mutex_destroy(&self->mutex);
+ self->mutex_initialized = false;
+ }
+
+ if(self->cursor.data) {
+ free(self->cursor.data);
+ self->cursor.data = NULL;
+ }
+
+ --pw_init_counter;
+ if(pw_init_counter == 0) {
+#if PW_CHECK_VERSION(0, 3, 49)
+ pw_deinit();
+#endif
+ }
+}
+
+static EGLImage gsr_pipewire_create_egl_image(gsr_pipewire *self, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, bool use_modifiers) {
+ intptr_t img_attr[44];
+ setup_dma_buf_attrs(img_attr, spa_video_format_to_drm_format(self->format.info.raw.format), self->format.info.raw.size.width, self->format.info.raw.size.height,
+ fds, offsets, pitches, modifiers, self->dmabuf_num_planes, use_modifiers);
+ while(self->egl->eglGetError() != EGL_SUCCESS){}
+ EGLImage image = self->egl->eglCreateImage(self->egl->egl_display, 0, EGL_LINUX_DMA_BUF_EXT, NULL, img_attr);
+ if(!image || self->egl->eglGetError() != EGL_SUCCESS) {
+ if(image)
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ return NULL;
+ }
+ return image;
+}
+
+static EGLImage gsr_pipewire_create_egl_image_with_fallback(gsr_pipewire *self) {
+ int fds[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint32_t offsets[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint32_t pitches[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ uint64_t modifiers[GSR_PIPEWIRE_DMABUF_MAX_PLANES];
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ fds[i] = self->dmabuf_data[i].fd;
+ offsets[i] = self->dmabuf_data[i].offset;
+ pitches[i] = self->dmabuf_data[i].stride;
+ modifiers[i] = self->format.info.raw.modifier;
+ }
+
+ EGLImage image = NULL;
+ if(self->no_modifiers_fallback) {
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, false);
+ } else {
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, true);
+ if(!image) {
+ fprintf(stderr, "gsr error: gsr_pipewire_create_egl_image_with_fallback: failed to create egl image with modifiers, trying without modifiers\n");
+ self->no_modifiers_fallback = true;
+ image = gsr_pipewire_create_egl_image(self, fds, offsets, pitches, modifiers, false);
+ }
+ }
+ return image;
+}
+
+static bool gsr_pipewire_bind_image_to_texture(gsr_pipewire *self, EGLImage image, unsigned int texture_id, bool external_texture) {
+ const int texture_target = external_texture ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D;
+ while(self->egl->glGetError() != 0){}
+ self->egl->glBindTexture(texture_target, texture_id);
+ self->egl->glEGLImageTargetTexture2DOES(texture_target, image);
+ const bool success = self->egl->glGetError() == 0;
+ self->egl->glBindTexture(texture_target, 0);
+ return success;
+}
+
+static void gsr_pipewire_bind_image_to_texture_with_fallback(gsr_pipewire *self, gsr_texture_map texture_map, EGLImage image) {
+ if(self->external_texture_fallback) {
+ gsr_pipewire_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
+ } else {
+ if(!gsr_pipewire_bind_image_to_texture(self, image, texture_map.texture_id, false)) {
+ fprintf(stderr, "gsr error: gsr_pipewire_map_texture: failed to bind image to texture, trying with external texture\n");
+ self->external_texture_fallback = true;
+ gsr_pipewire_bind_image_to_texture(self, image, texture_map.external_texture_id, true);
+ }
+ }
+}
+
+static void gsr_pipewire_update_cursor_texture(gsr_pipewire *self, gsr_texture_map texture_map) {
+ if(!self->cursor.data)
+ return;
+
+ self->egl->glBindTexture(GL_TEXTURE_2D, texture_map.cursor_texture_id);
+ // TODO: glTextureSubImage2D if same size
+ self->egl->glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, self->cursor.width, self->cursor.height, 0, GL_RGBA, GL_UNSIGNED_BYTE, self->cursor.data);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ self->egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ self->egl->glBindTexture(GL_TEXTURE_2D, 0);
+
+ free(self->cursor.data);
+ self->cursor.data = NULL;
+}
+
+bool gsr_pipewire_map_texture(gsr_pipewire *self, gsr_texture_map texture_map, gsr_pipewire_region *region, gsr_pipewire_region *cursor_region, gsr_pipewire_dmabuf_data *dmabuf_data, int *num_dmabuf_data, uint32_t *fourcc, uint64_t *modifiers, bool *using_external_image) {
+ for(int i = 0; i < GSR_PIPEWIRE_DMABUF_MAX_PLANES; ++i) {
+ memset(&dmabuf_data[i], 0, sizeof(gsr_pipewire_dmabuf_data));
+ }
+ *num_dmabuf_data = 0;
+ *using_external_image = self->external_texture_fallback;
+ *fourcc = 0;
+ *modifiers = 0;
+ pthread_mutex_lock(&self->mutex);
+
+ if(!self->negotiated || self->dmabuf_data[0].fd <= 0) {
+ pthread_mutex_unlock(&self->mutex);
+ return false;
+ }
+
+ EGLImage image = gsr_pipewire_create_egl_image_with_fallback(self);
+ if(image) {
+ gsr_pipewire_bind_image_to_texture_with_fallback(self, texture_map, image);
+ *using_external_image = self->external_texture_fallback;
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
+ }
+
+ gsr_pipewire_update_cursor_texture(self, texture_map);
+
+ region->x = 0;
+ region->y = 0;
+
+ region->width = self->format.info.raw.size.width;
+ region->height = self->format.info.raw.size.height;
+
+ if(self->crop.valid) {
+ region->x = self->crop.x;
+ region->y = self->crop.y;
+
+ region->width = self->crop.width;
+ region->height = self->crop.height;
+ }
+
+ /* TODO: Test if cursor hotspot is correct */
+ cursor_region->x = self->cursor.x - self->cursor.hotspot_x;
+ cursor_region->y = self->cursor.y - self->cursor.hotspot_y;
+
+ cursor_region->width = self->cursor.width;
+ cursor_region->height = self->cursor.height;
+
+ for(size_t i = 0; i < self->dmabuf_num_planes; ++i) {
+ dmabuf_data[i] = self->dmabuf_data[i];
+ self->dmabuf_data[i].fd = -1;
+ }
+ *num_dmabuf_data = self->dmabuf_num_planes;
+ *fourcc = spa_video_format_to_drm_format(self->format.info.raw.format);
+ *modifiers = self->format.info.raw.modifier;
+ self->dmabuf_num_planes = 0;
+
+ pthread_mutex_unlock(&self->mutex);
+ return true;
+}
+
+bool gsr_pipewire_is_damaged(gsr_pipewire *self) {
+ bool damaged = false;
+ pthread_mutex_lock(&self->mutex);
+ damaged = self->damaged;
+ pthread_mutex_unlock(&self->mutex);
+ return damaged;
+}
+
+void gsr_pipewire_clear_damage(gsr_pipewire *self) {
+ pthread_mutex_lock(&self->mutex);
+ self->damaged = false;
+ pthread_mutex_unlock(&self->mutex);
+}
diff --git a/src/sound.cpp b/src/sound.cpp
index 53000bd..d0f2a80 100644
--- a/src/sound.cpp
+++ b/src/sound.cpp
@@ -327,12 +327,66 @@ static void pa_sourcelist_cb(pa_context *ctx, const pa_source_info *source_info,
if(eol > 0)
return;
- std::vector<AudioInput> *inputs = (std::vector<AudioInput>*)userdata;
- inputs->push_back({ source_info->name, source_info->description });
+ AudioDevices *audio_devices = (AudioDevices*)userdata;
+ audio_devices->audio_inputs.push_back({ source_info->name, source_info->description });
}
-std::vector<AudioInput> get_pulseaudio_inputs() {
- std::vector<AudioInput> inputs;
+static void pa_server_info_cb(pa_context*, const pa_server_info *server_info, void *userdata) {
+ AudioDevices *audio_devices = (AudioDevices*)userdata;
+ if(server_info->default_sink_name)
+ audio_devices->default_output = std::string(server_info->default_sink_name) + ".monitor";
+ if(server_info->default_source_name)
+ audio_devices->default_input = server_info->default_source_name;
+}
+
+static void get_pulseaudio_default_inputs(AudioDevices &audio_devices) {
+ pa_mainloop *main_loop = pa_mainloop_new();
+
+ pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder-gtk");
+ pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL);
+ int state = 0;
+ int pa_ready = 0;
+ pa_context_set_state_callback(ctx, pa_state_cb, &pa_ready);
+
+ pa_operation *pa_op = NULL;
+
+ for(;;) {
+ // Not ready
+ if(pa_ready == 0) {
+ pa_mainloop_iterate(main_loop, 1, NULL);
+ continue;
+ }
+
+ switch(state) {
+ case 0: {
+ pa_op = pa_context_get_server_info(ctx, pa_server_info_cb, &audio_devices);
+ ++state;
+ break;
+ }
+ }
+
+ // Couldn't get connection to the server
+ if(pa_ready == 2 || (state == 1 && pa_op && pa_operation_get_state(pa_op) == PA_OPERATION_DONE)) {
+ if(pa_op)
+ pa_operation_unref(pa_op);
+ pa_context_disconnect(ctx);
+ pa_context_unref(ctx);
+ pa_mainloop_free(main_loop);
+ return;
+ }
+
+ pa_mainloop_iterate(main_loop, 1, NULL);
+ }
+
+ pa_mainloop_free(main_loop);
+}
+
+AudioDevices get_pulseaudio_inputs() {
+ AudioDevices audio_devices;
+
+ // TODO: Do this in the same connection below instead of two separate connections
+ get_pulseaudio_default_inputs(audio_devices);
+
pa_mainloop *main_loop = pa_mainloop_new();
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
@@ -352,7 +406,7 @@ std::vector<AudioInput> get_pulseaudio_inputs() {
switch(state) {
case 0: {
- pa_op = pa_context_get_source_info_list(ctx, pa_sourcelist_cb, &inputs);
+ pa_op = pa_context_get_source_info_list(ctx, pa_sourcelist_cb, &audio_devices);
++state;
break;
}
@@ -371,5 +425,5 @@ std::vector<AudioInput> get_pulseaudio_inputs() {
}
pa_mainloop_free(main_loop);
- return inputs;
+ return audio_devices;
}
diff --git a/src/utils.c b/src/utils.c
index e00f3c5..42f4c40 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -1,13 +1,23 @@
#include "../include/utils.h"
+
#include <time.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <assert.h>
+
#include <xf86drmMode.h>
#include <xf86drm.h>
-#include <stdlib.h>
+#include <libdrm/drm_fourcc.h>
#include <X11/Xatom.h>
+#include <X11/extensions/Xrandr.h>
+#include <va/va_drmcommon.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/hwcontext_vaapi.h>
double clock_get_monotonic_seconds(void) {
struct timespec ts;
@@ -17,6 +27,16 @@ double clock_get_monotonic_seconds(void) {
return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
}
+static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
+ switch(rot) {
+ case 0: return GSR_MONITOR_ROT_0;
+ case 1: return GSR_MONITOR_ROT_90;
+ case 2: return GSR_MONITOR_ROT_180;
+ case 3: return GSR_MONITOR_ROT_270;
+ }
+ return GSR_MONITOR_ROT_0;
+}
+
static const XRRModeInfo* get_mode_info(const XRRScreenResources *sr, RRMode id) {
for(int i = 0; i < sr->nmode; ++i) {
if(sr->modes[i].id == id)
@@ -35,16 +55,6 @@ static gsr_monitor_rotation x11_rotation_to_gsr_rotation(int rot) {
return GSR_MONITOR_ROT_0;
}
-static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
- switch(rot) {
- case 0: return GSR_MONITOR_ROT_0;
- case 1: return GSR_MONITOR_ROT_90;
- case 2: return GSR_MONITOR_ROT_180;
- case 3: return GSR_MONITOR_ROT_270;
- }
- return GSR_MONITOR_ROT_0;
-}
-
static uint32_t x11_output_get_connector_id(Display *dpy, RROutput output, Atom randr_connector_id_atom) {
Atom type = 0;
int format = 0;
@@ -61,7 +71,7 @@ static uint32_t x11_output_get_connector_id(Display *dpy, RROutput output, Atom
return result;
}
-void for_each_active_monitor_output_x11(Display *display, active_monitor_callback callback, void *userdata) {
+void for_each_active_monitor_output_x11_not_cached(Display *display, active_monitor_callback callback, void *userdata) {
XRRScreenResources *screen_res = XRRGetScreenResources(display, DefaultRootWindow(display));
if(!screen_res)
return;
@@ -76,18 +86,15 @@ void for_each_active_monitor_output_x11(Display *display, active_monitor_callbac
if(crt_info && crt_info->mode) {
const XRRModeInfo *mode_info = get_mode_info(screen_res, crt_info->mode);
if(mode_info && out_info->nameLen < (int)sizeof(display_name)) {
- memcpy(display_name, out_info->name, out_info->nameLen);
- display_name[out_info->nameLen] = '\0';
-
+ snprintf(display_name, sizeof(display_name), "%.*s", (int)out_info->nameLen, out_info->name);
const gsr_monitor monitor = {
.name = display_name,
.name_len = out_info->nameLen,
.pos = { .x = crt_info->x, .y = crt_info->y },
.size = { .x = (int)crt_info->width, .y = (int)crt_info->height },
- .crt_info = crt_info,
.connector_id = x11_output_get_connector_id(display, screen_res->outputs[i], randr_connector_id_atom),
.rotation = x11_rotation_to_gsr_rotation(crt_info->rotation),
- .monitor_identifier = 0
+ .monitor_identifier = out_info->crtc
};
callback(&monitor, userdata);
}
@@ -102,6 +109,22 @@ void for_each_active_monitor_output_x11(Display *display, active_monitor_callbac
XRRFreeScreenResources(screen_res);
}
+void for_each_active_monitor_output_x11(const gsr_egl *egl, active_monitor_callback callback, void *userdata) {
+ for(int i = 0; i < egl->x11.num_outputs; ++i) {
+ const gsr_x11_output *output = &egl->x11.outputs[i];
+ const gsr_monitor monitor = {
+ .name = output->name,
+ .name_len = strlen(output->name),
+ .pos = output->pos,
+ .size = output->size,
+ .connector_id = output->connector_id,
+ .rotation = output->rotation,
+ .monitor_identifier = output->monitor_identifier
+ };
+ callback(&monitor, userdata);
+ }
+}
+
typedef struct {
int type;
int count;
@@ -142,7 +165,7 @@ static bool connector_get_property_by_name(int drmfd, drmModeConnectorPtr props,
return false;
}
-/* TODO: Support more connector types*/
+/* TODO: Support more connector types */
static int get_connector_type_by_name(const char *name) {
int len = strlen(name);
if(len >= 5 && strncmp(name, "HDMI-", 5) == 0)
@@ -185,7 +208,6 @@ static void for_each_active_monitor_output_wayland(const gsr_egl *egl, active_mo
.name_len = strlen(output->name),
.pos = { .x = output->pos.x, .y = output->pos.y },
.size = { .x = output->size.x, .y = output->size.y },
- .crt_info = NULL,
.connector_id = 0,
.rotation = wayland_transform_to_gsr_rotation(output->transform),
.monitor_identifier = connector_type ? monitor_identifier_from_type_and_count(connector_type_index, connector_type->count_active) : 0
@@ -233,12 +255,11 @@ static void for_each_active_monitor_output_drm(const gsr_egl *egl, active_monito
if(connector_type && crtc_id > 0 && crtc && connection_name_len + 5 < (int)sizeof(display_name)) {
const int display_name_len = snprintf(display_name, sizeof(display_name), "%s-%d", connection_name, connector_type->count);
const int connector_type_index_name = get_connector_type_by_name(display_name);
- const gsr_monitor monitor = {
+ gsr_monitor monitor = {
.name = display_name,
.name_len = display_name_len,
.pos = { .x = crtc->x, .y = crtc->y },
.size = { .x = (int)crtc->width, .y = (int)crtc->height },
- .crt_info = NULL,
.connector_id = connector->connector_id,
.rotation = GSR_MONITOR_ROT_0,
.monitor_identifier = connector_type_index_name != -1 ? monitor_identifier_from_type_and_count(connector_type_index_name, connector_type->count_active) : 0
@@ -260,7 +281,7 @@ static void for_each_active_monitor_output_drm(const gsr_egl *egl, active_monito
void for_each_active_monitor_output(const gsr_egl *egl, gsr_connection_type connection_type, active_monitor_callback callback, void *userdata) {
switch(connection_type) {
case GSR_CONNECTION_X11:
- for_each_active_monitor_output_x11(egl->x11.dpy, callback, userdata);
+ for_each_active_monitor_output_x11(egl, callback, userdata);
break;
case GSR_CONNECTION_WAYLAND:
for_each_active_monitor_output_wayland(egl, callback, userdata);
@@ -322,7 +343,7 @@ static void get_monitor_by_connector_id_callback(const gsr_monitor *monitor, voi
}
gsr_monitor_rotation drm_monitor_get_display_server_rotation(const gsr_egl *egl, const gsr_monitor *monitor) {
- if(egl->wayland.dpy) {
+ if(gsr_egl_get_display_server(egl) == GSR_DISPLAY_SERVER_WAYLAND) {
{
get_monitor_by_connector_id_userdata userdata;
userdata.monitor = monitor;
@@ -345,7 +366,7 @@ gsr_monitor_rotation drm_monitor_get_display_server_rotation(const gsr_egl *egl,
userdata.monitor = monitor;
userdata.rotation = GSR_MONITOR_ROT_0;
userdata.match_found = false;
- for_each_active_monitor_output_x11(egl->x11.dpy, get_monitor_by_connector_id_callback, &userdata);
+ for_each_active_monitor_output_x11(egl, get_monitor_by_connector_id_callback, &userdata);
return userdata.rotation;
}
@@ -359,6 +380,7 @@ bool gl_get_gpu_info(gsr_egl *egl, gsr_gpu_info *info) {
const unsigned char *gl_renderer = egl->glGetString(GL_RENDERER);
info->gpu_version = 0;
+ info->is_steam_deck = false;
if(!gl_vendor) {
fprintf(stderr, "gsr error: failed to get gpu vendor\n");
@@ -391,6 +413,7 @@ bool gl_get_gpu_info(gsr_egl *egl, gsr_gpu_info *info) {
if(gl_renderer) {
if(info->vendor == GSR_GPU_VENDOR_NVIDIA)
sscanf((const char*)gl_renderer, "%*s %*s %*s %d", &info->gpu_version);
+ info->is_steam_deck = strstr((const char*)gl_renderer, "vangogh") != NULL;
}
end:
@@ -480,3 +503,384 @@ bool gsr_card_path_get_render_path(const char *card_path, char *render_path) {
close(fd);
return false;
}
+
+int create_directory_recursive(char *path) {
+ int path_len = strlen(path);
+ char *p = path;
+ char *end = path + path_len;
+ for(;;) {
+ char *slash_p = strchr(p, '/');
+
+ // Skips first '/', we don't want to try and create the root directory
+ if(slash_p == path) {
+ ++p;
+ continue;
+ }
+
+ if(!slash_p)
+ slash_p = end;
+
+ char prev_char = *slash_p;
+ *slash_p = '\0';
+ int err = mkdir(path, S_IRWXU);
+ *slash_p = prev_char;
+
+ if(err == -1 && errno != EEXIST)
+ return err;
+
+ if(slash_p == end)
+ break;
+ else
+ p = slash_p + 1;
+ }
+ return 0;
+}
+
+void setup_dma_buf_attrs(intptr_t *img_attr, uint32_t format, uint32_t width, uint32_t height, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes, bool use_modifier) {
+ size_t img_attr_index = 0;
+
+ img_attr[img_attr_index++] = EGL_LINUX_DRM_FOURCC_EXT;
+ img_attr[img_attr_index++] = format;
+
+ img_attr[img_attr_index++] = EGL_WIDTH;
+ img_attr[img_attr_index++] = width;
+
+ img_attr[img_attr_index++] = EGL_HEIGHT;
+ img_attr[img_attr_index++] = height;
+
+ if(num_planes >= 1) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_FD_EXT;
+ img_attr[img_attr_index++] = fds[0];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[0];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[0];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[0] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[0] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 2) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_FD_EXT;
+ img_attr[img_attr_index++] = fds[1];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[1];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[1];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[1] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[1] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 3) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_FD_EXT;
+ img_attr[img_attr_index++] = fds[2];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[2];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[2];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[2] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[2] >> 32ULL;
+ }
+ }
+
+ if(num_planes >= 4) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_FD_EXT;
+ img_attr[img_attr_index++] = fds[3];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
+ img_attr[img_attr_index++] = offsets[3];
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
+ img_attr[img_attr_index++] = pitches[3];
+
+ if(use_modifier) {
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
+ img_attr[img_attr_index++] = modifiers[3] & 0xFFFFFFFFULL;
+
+ img_attr[img_attr_index++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
+ img_attr[img_attr_index++] = modifiers[3] >> 32ULL;
+ }
+ }
+
+ img_attr[img_attr_index++] = EGL_NONE;
+ assert(img_attr_index <= 44);
+}
+
+static VADisplay video_codec_context_get_vaapi_display(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ if(device_context->type != AV_HWDEVICE_TYPE_VAAPI)
+ return NULL;
+
+ AVVAAPIDeviceContext *vactx = device_context->hwctx;
+ return vactx->display;
+}
+
+bool video_codec_context_is_vaapi(AVCodecContext *video_codec_context) {
+ AVBufferRef *hw_frames_ctx = video_codec_context->hw_frames_ctx;
+ if(!hw_frames_ctx)
+ return NULL;
+
+ AVHWFramesContext *hw_frame_context = (AVHWFramesContext*)hw_frames_ctx->data;
+ AVHWDeviceContext *device_context = (AVHWDeviceContext*)hw_frame_context->device_ctx;
+ return device_context->type == AV_HWDEVICE_TYPE_VAAPI;
+}
+
+static uint32_t drm_fourcc_to_va_fourcc(uint32_t drm_fourcc) {
+ switch(drm_fourcc) {
+ case DRM_FORMAT_XRGB8888: return VA_FOURCC_BGRX;
+ case DRM_FORMAT_XBGR8888: return VA_FOURCC_RGBX;
+ case DRM_FORMAT_RGBX8888: return VA_FOURCC_XBGR;
+ case DRM_FORMAT_BGRX8888: return VA_FOURCC_XRGB;
+ case DRM_FORMAT_ARGB8888: return VA_FOURCC_BGRA;
+ case DRM_FORMAT_ABGR8888: return VA_FOURCC_RGBA;
+ case DRM_FORMAT_RGBA8888: return VA_FOURCC_ABGR;
+ case DRM_FORMAT_BGRA8888: return VA_FOURCC_ARGB;
+ default: return drm_fourcc;
+ }
+}
+
+bool vaapi_copy_drm_planes_to_video_surface(AVCodecContext *video_codec_context, AVFrame *video_frame, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, uint32_t format, vec2i size, const int *fds, const uint32_t *offsets, const uint32_t *pitches, const uint64_t *modifiers, int num_planes) {
+ VAConfigID config_id = 0;
+ VAContextID context_id = 0;
+ VASurfaceID input_surface_id = 0;
+ VABufferID buffer_id = 0;
+ bool success = true;
+
+ VADisplay va_dpy = video_codec_context_get_vaapi_display(video_codec_context);
+ if(!va_dpy) {
+ success = false;
+ goto done;
+ }
+
+ VAStatus va_status = vaCreateConfig(va_dpy, VAProfileNone, VAEntrypointVideoProc, NULL, 0, &config_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateConfig failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ VASurfaceID output_surface_id = (uintptr_t)video_frame->data[3];
+ va_status = vaCreateContext(va_dpy, config_id, size.x, size.y, VA_PROGRESSIVE, &output_surface_id, 1, &context_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateContext failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ VADRMPRIMESurfaceDescriptor buf = {0};
+ buf.fourcc = drm_fourcc_to_va_fourcc(format);//VA_FOURCC_BGRX; // TODO: VA_FOURCC_BGRA, VA_FOURCC_X2R10G10B10
+ buf.width = size.x;
+ buf.height = size.y;
+ buf.num_objects = num_planes;
+ buf.num_layers = 1;
+ buf.layers[0].drm_format = format;
+ buf.layers[0].num_planes = buf.num_objects;
+ for(int i = 0; i < num_planes; ++i) {
+ buf.objects[i].fd = fds[i];
+ buf.objects[i].size = size.y * pitches[i]; // TODO:
+ buf.objects[i].drm_format_modifier = modifiers[i];
+
+ buf.layers[0].object_index[i] = i;
+ buf.layers[0].offset[i] = offsets[i];
+ buf.layers[0].pitch[i] = pitches[i];
+ }
+
+ VASurfaceAttrib attribs[2] = {0};
+ attribs[0].type = VASurfaceAttribMemoryType;
+ attribs[0].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[0].value.type = VAGenericValueTypeInteger;
+ attribs[0].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2;
+ attribs[1].type = VASurfaceAttribExternalBufferDescriptor;
+ attribs[1].flags = VA_SURFACE_ATTRIB_SETTABLE;
+ attribs[1].value.type = VAGenericValueTypePointer;
+ attribs[1].value.value.p = &buf;
+
+ // TODO: RT_FORMAT with 10 bit/hdr, VA_RT_FORMAT_RGB32_10
+ // TODO: Max size same as source_size
+ va_status = vaCreateSurfaces(va_dpy, VA_RT_FORMAT_RGB32, size.x, size.y, &input_surface_id, 1, attribs, 2);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateSurfaces failed, error: %s\n", vaErrorStr(va_status));
+ success = false;
+ goto done;
+ }
+
+ const VARectangle source_region = {
+ .x = source_pos.x,
+ .y = source_pos.y,
+ .width = source_size.x,
+ .height = source_size.y
+ };
+
+ const VARectangle output_region = {
+ .x = dest_pos.x,
+ .y = dest_pos.y,
+ .width = dest_size.x,
+ .height = dest_size.y
+ };
+
+ // Copying a surface to another surface will automatically perform the color conversion. Thanks vaapi!
+ VAProcPipelineParameterBuffer params = {0};
+ params.surface = input_surface_id;
+ params.surface_region = NULL;
+ params.surface_region = &source_region;
+ params.output_region = &output_region;
+ params.output_background_color = 0;
+ params.filter_flags = VA_FRAME_PICTURE;
+ params.pipeline_flags = VA_PROC_PIPELINE_FAST;
+
+ params.input_color_properties.colour_primaries = 1;
+ params.input_color_properties.transfer_characteristics = 1;
+ params.input_color_properties.matrix_coefficients = 1;
+ params.surface_color_standard = VAProcColorStandardBT709; // TODO:
+ params.input_color_properties.color_range = video_frame->color_range == AVCOL_RANGE_JPEG ? VA_SOURCE_RANGE_FULL : VA_SOURCE_RANGE_REDUCED;
+
+ params.output_color_properties.colour_primaries = 1;
+ params.output_color_properties.transfer_characteristics = 1;
+ params.output_color_properties.matrix_coefficients = 1;
+ params.output_color_standard = VAProcColorStandardBT709; // TODO:
+ params.output_color_properties.color_range = video_frame->color_range == AVCOL_RANGE_JPEG ? VA_SOURCE_RANGE_FULL : VA_SOURCE_RANGE_REDUCED;
+
+ params.processing_mode = VAProcPerformanceMode;
+
+ // VAProcPipelineCaps pipeline_caps = {0};
+ // va_status = vaQueryVideoProcPipelineCaps(self->va_dpy,
+ // self->context_id,
+ // NULL, 0,
+ // &pipeline_caps);
+ // if(va_status == VA_STATUS_SUCCESS) {
+ // fprintf(stderr, "pipeline_caps: %u, %u\n", (unsigned int)pipeline_caps.rotation_flags, pipeline_caps.blend_flags);
+ // }
+
+ // TODO: params.output_hdr_metadata
+
+ // TODO:
+ // if (first surface to render)
+ // pipeline_param->output_background_color = 0xff000000; // black
+
+ va_status = vaCreateBuffer(va_dpy, context_id, VAProcPipelineParameterBufferType, sizeof(params), 1, &params, &buffer_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaCreateBuffer failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaBeginPicture(va_dpy, context_id, output_surface_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaBeginPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaRenderPicture(va_dpy, context_id, &buffer_id, 1);
+ if(va_status != VA_STATUS_SUCCESS) {
+ vaEndPicture(va_dpy, context_id);
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaRenderPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ va_status = vaEndPicture(va_dpy, context_id);
+ if(va_status != VA_STATUS_SUCCESS) {
+ fprintf(stderr, "gsr error: vaapi_copy_drm_planes_to_video_surface: vaEndPicture failed, error: %d\n", va_status);
+ success = false;
+ goto done;
+ }
+
+ // vaSyncBuffer(va_dpy, buffer_id, 1000 * 1000 * 1000);
+ // vaSyncSurface(va_dpy, input_surface_id);
+ // vaSyncSurface(va_dpy, output_surface_id);
+
+ done:
+ if(buffer_id)
+ vaDestroyBuffer(va_dpy, buffer_id);
+
+ if(input_surface_id)
+ vaDestroySurfaces(va_dpy, &input_surface_id, 1);
+
+ if(context_id)
+ vaDestroyContext(va_dpy, context_id);
+
+ if(config_id)
+ vaDestroyConfig(va_dpy, config_id);
+
+ return success;
+}
+
+bool vaapi_copy_egl_image_to_video_surface(gsr_egl *egl, EGLImage image, vec2i source_pos, vec2i source_size, vec2i dest_pos, vec2i dest_size, AVCodecContext *video_codec_context, AVFrame *video_frame) {
+ if(!image)
+ return false;
+
+ int texture_fourcc = 0;
+ int texture_num_planes = 0;
+ uint64_t texture_modifiers = 0;
+ if(!egl->eglExportDMABUFImageQueryMESA(egl->egl_display, image, &texture_fourcc, &texture_num_planes, &texture_modifiers)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: eglExportDMABUFImageQueryMESA failed\n");
+ return false;
+ }
+
+ if(texture_num_planes <= 0 || texture_num_planes > 8) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: expected planes size to be 0<planes<8 for drm buf, got %d planes\n", texture_num_planes);
+ return false;
+ }
+
+ int texture_fds[8];
+ int32_t texture_strides[8];
+ int32_t texture_offsets[8];
+
+ while(egl->eglGetError() != EGL_SUCCESS){}
+ if(!egl->eglExportDMABUFImageMESA(egl->egl_display, image, texture_fds, texture_strides, texture_offsets)) {
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_vaapi_tick: eglExportDMABUFImageMESA failed, error: %d\n", egl->eglGetError());
+ return false;
+ }
+
+ int fds[8];
+ uint32_t offsets[8];
+ uint32_t pitches[8];
+ uint64_t modifiers[8];
+ for(int i = 0; i < texture_num_planes; ++i) {
+ fds[i] = texture_fds[i];
+ offsets[i] = texture_offsets[i];
+ pitches[i] = texture_strides[i];
+ modifiers[i] = texture_modifiers;
+
+ if(fds[i] == -1)
+ texture_num_planes = i;
+ }
+ const bool success = texture_num_planes > 0 && vaapi_copy_drm_planes_to_video_surface(video_codec_context, video_frame, source_pos, source_size, dest_pos, dest_size, texture_fourcc, source_size, fds, offsets, pitches, modifiers, texture_num_planes);
+
+ for(int i = 0; i < texture_num_planes; ++i) {
+ if(texture_fds[i] > 0) {
+ close(texture_fds[i]);
+ texture_fds[i] = -1;
+ }
+ }
+
+ return success;
+}
diff --git a/src/window_texture.c b/src/window_texture.c
index 0f4aa2c..8eef4c9 100644
--- a/src/window_texture.c
+++ b/src/window_texture.c
@@ -16,6 +16,7 @@ int window_texture_init(WindowTexture *window_texture, Display *display, Window
window_texture->display = display;
window_texture->window = window;
window_texture->pixmap = None;
+ window_texture->image = NULL;
window_texture->texture_id = 0;
window_texture->redirected = 0;
window_texture->egl = egl;
@@ -34,6 +35,11 @@ static void window_texture_cleanup(WindowTexture *self, int delete_texture) {
self->texture_id = 0;
}
+ if(self->image) {
+ self->egl->eglDestroyImage(self->egl->egl_display, self->image);
+ self->image = NULL;
+ }
+
if(self->pixmap) {
XFreePixmap(self->display, self->pixmap);
self->pixmap = None;
@@ -101,14 +107,14 @@ int window_texture_on_resize(WindowTexture *self) {
self->pixmap = pixmap;
self->texture_id = texture_id;
+ self->image = image;
cleanup:
self->egl->glBindTexture(GL_TEXTURE_2D, 0);
- if(image)
- self->egl->eglDestroyImage(self->egl->egl_display, image);
-
if(result != 0) {
+ if(image)
+ self->egl->eglDestroyImage(self->egl->egl_display, image);
if(texture_id != 0)
self->egl->glDeleteTextures(1, &texture_id);
if(pixmap)