aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--TODO19
-rw-r--r--include/pipewire_audio.h108
-rw-r--r--include/sound.hpp21
-rw-r--r--include/utils.h2
-rw-r--r--kms/client/kms_client.c19
-rw-r--r--src/color_conversion.c8
-rw-r--r--src/dbus.c19
-rw-r--r--src/main.cpp263
-rw-r--r--src/pipewire_audio.c406
-rw-r--r--src/pipewire_video.c2
-rw-r--r--src/sound.cpp108
-rw-r--r--src/utils.c20
12 files changed, 877 insertions, 118 deletions
diff --git a/TODO b/TODO
index de803f8..cee04ac 100644
--- a/TODO
+++ b/TODO
@@ -176,14 +176,19 @@ Default to hevc if capture size is larger than 4096 in width or height.
Set low latency mode on vulkan encoding.
-Support pipewire audio capture which also allows capturing audio from a single application. This can also be done with pulseaudio by creating a virtual sink:
- pactl load-module module-combine-sink sink_name=gsr2 slaves=$(pactl get-default-sink) sink_properties=device.description="gsr"
- pactl move-sink-input 2944 gsr2 # 2944 comes from 'pactl list sink-inputs'
- and then record gsr2.monitor.
- Or use pa_stream_set_monitor_stream, which also takes the sink-input as input. However need to track when the sink disconnects to mute and then reconnect again.
-
Support recording/replay/livestreaming at the same time by allowing commands to be run on an existing gpu screen recorder instance.
Test if `xrandr --output DP-1 --scale 1.5` captures correct size on nvidia.
-Fix cursor position and scale when scaling x11 display. \ No newline at end of file
+Fix cursor position and scale when scaling x11 display.
+
+Support surround audio in application audio recording. Right now only stereo sound is supported.
+
+Support application audio recording without pulseaudio combined sink.
+
+If recording application audio setup gsr_pipewire_audio and add gsr_pipewire_audio_add_link_from_app_to_stream for application and the created combined sink.
+ Use gsr_pipewire_audio_add_link_from_app_to_stream_inverted for inverted scenario. Parse inverted scenario as well.
+
+Support transposing (rotating) with vaapi. This isn't supported on many devices with rgb buffer, but its supported with nv12 buffer (on intel at least).
+
+Link name for stream_name input is wrong. It should be Playback_FL/Playback_FR now with combined sink. Add function for combined sink, or rewrite existing code. \ No newline at end of file
diff --git a/include/pipewire_audio.h b/include/pipewire_audio.h
index ae14cb3..8cfb2d2 100644
--- a/include/pipewire_audio.h
+++ b/include/pipewire_audio.h
@@ -1,4 +1,112 @@
#ifndef GSR_PIPEWIRE_AUDIO_H
#define GSR_PIPEWIRE_AUDIO_H
+#include <pipewire/thread-loop.h>
+#include <pipewire/context.h>
+#include <pipewire/core.h>
+#include <spa/utils/hook.h>
+
+#include <stdbool.h>
+
+#define GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES 64
+#define GSR_PIPEWIRE_AUDIO_MAX_PORTS 64
+#define GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS 32
+
+typedef enum {
+ GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, /* Application audio */
+ GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT, /* Audio recording input */
+ GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK
+} gsr_pipewire_audio_node_type;
+
+typedef struct {
+ uint32_t id;
+ char *name;
+ gsr_pipewire_audio_node_type type;
+} gsr_pipewire_audio_node;
+
+typedef enum {
+ GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_INPUT,
+ GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_OUTPUT
+} gsr_pipewire_audio_port_direction;
+
+typedef struct {
+ uint32_t id;
+ uint32_t node_id;
+ gsr_pipewire_audio_port_direction direction;
+ char *name;
+} gsr_pipewire_audio_port;
+
+typedef enum {
+ GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM,
+ GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK
+} gsr_pipewire_audio_link_output_type;
+
+typedef struct {
+ char **app_names;
+ int num_app_names;
+ char *output_name;
+ bool inverted;
+ gsr_pipewire_audio_link_output_type output_type;
+} gsr_pipewire_audio_requested_link;
+
+typedef struct {
+ struct pw_thread_loop *thread_loop;
+ struct pw_context *context;
+ struct pw_core *core;
+ struct spa_hook core_listener;
+ struct pw_registry *registry;
+ struct spa_hook registry_listener;
+ int server_version_sync;
+
+ gsr_pipewire_audio_node stream_nodes[GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES];
+ int num_stream_nodes;
+
+ gsr_pipewire_audio_port ports[GSR_PIPEWIRE_AUDIO_MAX_PORTS];
+ int num_ports;
+
+ gsr_pipewire_audio_requested_link requested_links[GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS];
+ int num_requested_links;
+} gsr_pipewire_audio;
+
+bool gsr_pipewire_audio_init(gsr_pipewire_audio *self);
+void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self);
+
+/*
+ This function links audio source outputs from applications that match the name |app_names_output| to the input
+ that matches the name |stream_name_input|.
+ If an application or a new application starts outputting audio after this function is called and the app name matches
+ then it will automatically link the audio sources.
+ |app_names_output| and |stream_name_input| are case-insensitive matches.
+*/
+bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input);
+/*
+ This function links audio source outputs from all applications except the ones that match the name |app_names_output| to the input
+ that matches the name |stream_name_input|.
+ If an application or a new application starts outputting audio after this function is called and the app name doesn't matche
+ then it will automatically link the audio sources.
+ |app_names_output| and |stream_name_input| are case-insensitive matches.
+*/
+bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input);
+
+/*
+ This function links audio source outputs from applications that match the name |app_names_output| to the input
+ that matches the name |sink_name_input|.
+ If an application or a new application starts outputting audio after this function is called and the app name matches
+ then it will automatically link the audio sources.
+ |app_names_output| and |sink_name_input| are case-insensitive matches.
+*/
+bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input);
+/*
+ This function links audio source outputs from all applications except the ones that match the name |app_names_output| to the input
+ that matches the name |sink_name_input|.
+ If an application or a new application starts outputting audio after this function is called and the app name doesn't matche
+ then it will automatically link the audio sources.
+ |app_names_output| and |sink_name_input| are case-insensitive matches.
+*/
+bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input);
+
+/* Return true to continue */
+typedef bool (*gsr_pipewire_audio_app_query_callback)(const char *app_name, void *userdata);
+void gsr_pipewire_audio_for_each_app(gsr_pipewire_audio *self, gsr_pipewire_audio_app_query_callback callback, void *userdata);
+
#endif /* GSR_PIPEWIRE_AUDIO_H */
diff --git a/include/sound.hpp b/include/sound.hpp
index 7bcc120..018ff4a 100644
--- a/include/sound.hpp
+++ b/include/sound.hpp
@@ -26,6 +26,11 @@ typedef struct {
unsigned int frames;
} SoundDevice;
+enum class AudioInputType {
+ DEVICE,
+ APPLICATION
+};
+
struct AudioInput {
std::string name;
std::string description;
@@ -37,8 +42,14 @@ struct AudioDevices {
std::vector<AudioInput> audio_inputs;
};
+struct ApplicationAudio {
+ std::string name;
+};
+
struct MergedAudioInputs {
std::vector<AudioInput> audio_inputs;
+ AudioInputType type = AudioInputType::DEVICE;
+ bool inverted = false;
};
typedef enum {
@@ -48,12 +59,15 @@ typedef enum {
} AudioFormat;
/*
- Get a sound device by name, returning the device into the @device parameter.
- The device should be closed with @sound_device_close after it has been used
- to clean up internal resources.
+ Get a sound device by name, returning the device into the |device| parameter.
Returns 0 on success, or a negative value on failure.
*/
int sound_device_get_by_name(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format);
+/*
+ Creates a module-combine-sink and connects to it for recording, returning the device into the |device| parameter.
+ Returns 0 on success, or a negative value on failure.
+*/
+int sound_device_create_combined_sink_connect(SoundDevice *device, const char *combined_sink_name, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format);
void sound_device_close(SoundDevice *device);
@@ -64,5 +78,6 @@ void sound_device_close(SoundDevice *device);
int sound_device_read_next_chunk(SoundDevice *device, void **buffer, double timeout_sec, double *latency_seconds);
AudioDevices get_pulseaudio_inputs();
+std::vector<ApplicationAudio> get_pulseaudio_applications();
#endif /* GPU_SCREEN_RECORDER_H */
diff --git a/include/utils.h b/include/utils.h
index 9ccd26e..984b963 100644
--- a/include/utils.h
+++ b/include/utils.h
@@ -28,6 +28,8 @@ typedef struct {
} get_monitor_by_name_userdata;
double clock_get_monotonic_seconds(void);
+bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size);
+bool generate_random_characters_standard_alphabet(char *buffer, int buffer_size);
typedef void (*active_monitor_callback)(const gsr_monitor *monitor, void *userdata);
void for_each_active_monitor_output_x11_not_cached(Display *display, active_monitor_callback callback, void *userdata);
diff --git a/kms/client/kms_client.c b/kms/client/kms_client.c
index 6f11244..9fadf05 100644
--- a/kms/client/kms_client.c
+++ b/kms/client/kms_client.c
@@ -1,4 +1,5 @@
#include "kms_client.h"
+#include "../../include/utils.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
@@ -12,7 +13,6 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <sys/capability.h>
-#include <sys/random.h>
#define GSR_SOCKET_PAIR_LOCAL 0
#define GSR_SOCKET_PAIR_REMOTE 1
@@ -20,21 +20,6 @@
static void cleanup_socket(gsr_kms_client *self, bool kill_server);
static int gsr_kms_client_replace_connection(gsr_kms_client *self);
-static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
- /* TODO: Use other functions on other platforms than linux */
- if(getrandom(buffer, buffer_size, 0) < buffer_size) {
- fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
- return false;
- }
-
- for(int i = 0; i < buffer_size; ++i) {
- unsigned char c = *(unsigned char*)&buffer[i];
- buffer[i] = alphabet[c % alphabet_size];
- }
-
- return true;
-}
-
static void close_fds(gsr_kms_response *response) {
for(int i = 0; i < response->num_items; ++i) {
for(int j = 0; j < response->items[i].num_dma_bufs; ++j) {
@@ -139,7 +124,7 @@ static bool create_socket_path(char *output_path, size_t output_path_size) {
char random_characters[11];
random_characters[10] = '\0';
- if(!generate_random_characters(random_characters, 10, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62))
+ if(!generate_random_characters_standard_alphabet(random_characters, 10))
return false;
snprintf(output_path, output_path_size, "%s/.gsr-kms-socket-%s", home, random_characters);
diff --git a/src/color_conversion.c b/src/color_conversion.c
index f34512c..f3748b4 100644
--- a/src/color_conversion.c
+++ b/src/color_conversion.c
@@ -28,26 +28,26 @@ static float abs_f(float v) {
#define RGB_TO_P010_FULL "const mat4 RGBtoYUV = mat4(0.262700, -0.139630, 0.500000, 0.000000,\n" \
" 0.678000, -0.360370, -0.459786, 0.000000,\n" \
" 0.059300, 0.500000, -0.040214, 0.000000,\n" \
- " 0.000000, 0.500000, 0.500000, 1.000000);"
+ " 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT2020, limited (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_P010_LIMITED "const mat4 RGBtoYUV = mat4(0.225613, -0.119918, 0.429412, 0.000000,\n" \
" 0.582282, -0.309494, -0.394875, 0.000000,\n" \
" 0.050928, 0.429412, -0.034537, 0.000000,\n" \
- " 0.062745, 0.500000, 0.500000, 1.000000);"
+ " 0.062745, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, full, custom values: 0.2110 0.7110 0.0710 */
/* https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.709-6-201506-I!!PDF-E.pdf */
#define RGB_TO_NV12_FULL "const mat4 RGBtoYUV = mat4(0.211000, -0.113563, 0.500000, 0.000000,\n" \
" 0.711000, -0.382670, -0.450570, 0.000000,\n" \
" 0.071000, 0.500000, -0.044994, 0.000000,\n" \
- " 0.000000, 0.500000, 0.500000, 1.000000);"
+ " 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, limited, custom values: 0.2100 0.7100 0.0700 (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_NV12_LIMITED "const mat4 RGBtoYUV = mat4(0.180353, -0.096964, 0.429412, 0.000000,\n" \
" 0.609765, -0.327830, -0.385927, 0.000000,\n" \
" 0.060118, 0.429412, -0.038049, 0.000000,\n" \
- " 0.062745, 0.500000, 0.500000, 1.000000);"
+ " 0.062745, 0.500000, 0.500000, 1.000000);\n"
static const char* color_format_range_get_transform_matrix(gsr_destination_color color_format, gsr_color_range color_range) {
switch(color_format) {
diff --git a/src/dbus.c b/src/dbus.c
index 5757b8b..2087c35 100644
--- a/src/dbus.c
+++ b/src/dbus.c
@@ -1,11 +1,11 @@
#include "../include/dbus.h"
+#include "../include/utils.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
-#include <sys/random.h>
/* TODO: Make non-blocking when GPU Screen Recorder is turned into a library */
/* TODO: Make sure responses matches the requests */
@@ -37,27 +37,12 @@ static const char* dict_value_type_to_string(dict_value_type type) {
return "(unknown)";
}
-static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
- /* TODO: Use other functions on other platforms than linux */
- if(getrandom(buffer, buffer_size, 0) < buffer_size) {
- fprintf(stderr, "gsr error: generate_random_characters: failed to get random bytes, error: %s\n", strerror(errno));
- return false;
- }
-
- for(int i = 0; i < buffer_size; ++i) {
- unsigned char c = *(unsigned char*)&buffer[i];
- buffer[i] = alphabet[c % alphabet_size];
- }
-
- return true;
-}
-
bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token) {
memset(self, 0, sizeof(*self));
dbus_error_init(&self->err);
self->random_str[DBUS_RANDOM_STR_SIZE] = '\0';
- if(!generate_random_characters(self->random_str, DBUS_RANDOM_STR_SIZE, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62)) {
+ if(!generate_random_characters_standard_alphabet(self->random_str, DBUS_RANDOM_STR_SIZE)) {
fprintf(stderr, "gsr error: gsr_dbus_init: failed to generate random string\n");
return false;
}
diff --git a/src/main.cpp b/src/main.cpp
index 21425d9..b5c5a72 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -6,6 +6,9 @@ extern "C" {
#include "../include/capture/portal.h"
#include "../include/dbus.h"
#endif
+#ifdef GSR_APP_AUDIO
+#include "../include/pipewire_audio.h"
+#endif
#include "../include/encoder/video/nvenc.h"
#include "../include/encoder/video/vaapi.h"
#include "../include/encoder/video/vulkan.h"
@@ -1075,7 +1078,7 @@ static void usage_full() {
fprintf(stderr, "\n");
fprintf(stderr, "OPTIONS:\n");
fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\", \"focused\" or \"portal\".\n");
- fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with pipewire will be used. Portal option is only available on Wayland.\n");
+ fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with PipeWire will be used. Portal option is only available on Wayland.\n");
fprintf(stderr, " If you select to save the session (token) in the desktop portal capture popup then the session will be saved for the next time you use \"portal\",\n");
fprintf(stderr, " but the session will be ignored unless you run GPU Screen Recorder with the '-restore-portal-session yes' option.\n");
fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded on Nvidia X11. On AMD/Intel or wayland \"screen\" will record the first monitor found.\n");
@@ -1099,16 +1102,27 @@ static void usage_full() {
fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
- fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
+ fprintf(stderr, " If the audio device is an empty string then the argument is ignored.\n");
fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-audio-devices option to list valid audio devices to use with this -a option.\n");
fprintf(stderr, "\n");
#ifdef GSR_APP_AUDIO
- fprintf(stderr, " -aa Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
- fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
- fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
- fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
- fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
- fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, " -aa Application to record audio from (case-insensitive). Can be specified multiple times. Each time this is specified a new audio track is added for the specified application audio.\n");
+ fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
+ fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
+ fprintf(stderr, " Optional, no application audio is added by default.\n");
+ fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aa option.\n");
+ fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio from an application that hasn't started yet.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -aai Record audio from all applications except the ones specified with this option (case-insensitive). Can be specified multiple times.\n");
+ fprintf(stderr, " Each time this is specified a new audio track is added that records all applications except the ones specified.\n");
+ fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
+ fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
+ fprintf(stderr, " Optional, no application audio is added by default.\n");
+ fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aai option.\n");
+ fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio and the target application hasn't started yet.\n");
fprintf(stderr, "\n");
#endif
fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra' when using '-bm qp' or '-bm vbr' options, and '-bm qp' is the default option used.\n");
@@ -1181,18 +1195,27 @@ static void usage_full() {
fprintf(stderr, " Optional, set to 'gpu' by default.\n");
fprintf(stderr, "\n");
fprintf(stderr, " --info\n");
- fprintf(stderr, " List info about the system (for use by GPU Screen Recorder UI). Lists the following information (prints them to stdout and exits):\n");
+ fprintf(stderr, " List info about the system. Lists the following information (prints them to stdout and exits):\n");
fprintf(stderr, " Supported video codecs (h264, h264_software, hevc, hevc_hdr, hevc_10bit, av1, av1_hdr, av1_10bit, vp8, vp9 (if supported)).\n");
fprintf(stderr, " Supported capture options (window, focused, screen, monitors and portal, if supported by the system).\n");
fprintf(stderr, " If opengl initialization fails then the program exits with 22, if no usable drm device is found then it exits with 23. On success it exits with 0.\n");
fprintf(stderr, "\n");
fprintf(stderr, " --list-audio-devices\n");
- fprintf(stderr, " List audio devices (for use by GPU Screen Recorder UI). Lists audio devices in the following format (prints them to stdout and exits):\n");
+ fprintf(stderr, " List audio devices. Lists audio devices in the following format (prints them to stdout and exits):\n");
fprintf(stderr, " <audio_device_name>|<audio_device_name_in_human_readable_format>\n");
fprintf(stderr, " For example:\n");
fprintf(stderr, " bluez_input.88:C9:E8:66:A2:27|WH-1000XM4\n");
- fprintf(stderr, " The <audio_device_name> is the name to pass to GPU Screen Recorder in a -a option.\n");
+ fprintf(stderr, " alsa_output.pci-0000_0c_00.4.iec958-stereo|Monitor of Starship/Matisse HD Audio Controller Digital Stereo (IEC958)\n");
+ fprintf(stderr, " The <audio_device_name> is the name that can be passed to GPU Screen Recorder with the -a option.\n");
fprintf(stderr, "\n");
+#ifdef GSR_APP_AUDIO
+ fprintf(stderr, " --list-application-audio\n");
+ fprintf(stderr, " List application audio sources. Lists application audio sources (prints them to stdout and exits), for example:\n");
+ fprintf(stderr, " firefox\n");
+ fprintf(stderr, " csgo\n");
+ fprintf(stderr, " These names are the application audio names that can be passed to GPU Screen Recorder with the -aa option.\n");
+ fprintf(stderr, "\n");
+#endif
fprintf(stderr, " --version\n");
fprintf(stderr, " Print version (%s) and exit\n", GSR_VERSION);
fprintf(stderr, "\n");
@@ -1219,6 +1242,10 @@ static void usage_full() {
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
fprintf(stderr, " %s -w portal -f 60 -a default_output -restore-portal-session yes -o \"$HOME/Videos/video.mp4\"\n", program_name);
fprintf(stderr, " %s -w screen -f 60 -a default_output -bm cbr -q 15000 -o \"$HOME/Videos/video.mp4\"\n", program_name);
+#ifdef GSR_APP_AUDIO
+ fprintf(stderr, " %s -w screen -f 60 -a \"firefox|csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"-firefox|-csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+#endif
//fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
_exit(1);
}
@@ -1375,6 +1402,7 @@ struct AudioDevice {
AVFilterContext *src_filter_ctx = nullptr;
AVFrame *frame = nullptr;
std::thread thread; // TODO: Instead of having a thread for each track, have one thread for all threads and read the data with non-blocking read
+ std::string combined_sink_name;
};
// TODO: Cleanup
@@ -1624,6 +1652,18 @@ static std::vector<AudioInput> parse_audio_input_arg(const char *str, const Audi
return audio_inputs;
}
+static std::vector<AudioInput> parse_app_audio_input_arg(const char *str) {
+ std::vector<AudioInput> audio_inputs;
+ split_string(str, '|', [&](const char *sub, size_t size) {
+ AudioInput audio_input;
+ audio_input.name.assign(sub, size);
+ audio_input.description = audio_input.name;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ });
+ return audio_inputs;
+}
+
// TODO: Does this match all livestreaming cases?
static bool is_livestream_path(const char *str) {
const int len = strlen(str);
@@ -2091,6 +2131,22 @@ static void list_audio_devices_command() {
_exit(0);
}
+static bool app_audio_query_callback(const char *app_name, void*) {
+ puts(app_name);
+ return true;
+}
+
+static void list_application_audio_command() {
+ gsr_pipewire_audio audio;
+ if(gsr_pipewire_audio_init(&audio)) {
+ gsr_pipewire_audio_for_each_app(&audio, app_audio_query_callback, NULL);
+ gsr_pipewire_audio_deinit(&audio);
+ }
+
+ fflush(stdout);
+ _exit(0);
+}
+
static gsr_capture* create_capture_impl(std::string &window_str, vec2i output_resolution, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
gsr_color_depth color_depth)
@@ -2273,10 +2329,30 @@ struct Arg {
}
};
+static void match_app_audio_input_to_available_apps(const std::vector<AudioInput> &requested_audio_inputs, const std::vector<std::string> &app_audio_names) {
+ for(const AudioInput &request_audio_input : requested_audio_inputs) {
+ bool match = false;
+ for(const std::string &app_name : app_audio_names) {
+ if(strcasecmp(app_name.c_str(), request_audio_input.name.c_str()) == 0) {
+ match = true;
+ break;
+ }
+ }
+
+ if(!match) {
+ fprintf(stderr, "gsr warning: no audio application with the name \"%s\" was found, expected one of the following:\n", request_audio_input.name.c_str());
+ for(const std::string &app_name : app_audio_names) {
+ fprintf(stderr, " * %s\n", app_name.c_str());
+ }
+ fprintf(stderr, " assuming this is intentional (if you are trying to record audio for applications that haven't started yet).\n");
+ }
+ }
+}
+
// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
-static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, bool &uses_amix) {
+static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, const Arg &app_audio_input_arg, const Arg &app_audio_input_inverted_arg, bool &uses_amix, const std::vector<std::string> &app_audio_names) {
std::vector<MergedAudioInputs> requested_audio_inputs;
uses_amix = false;
@@ -2284,7 +2360,7 @@ static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &aud
if(!audio_input || audio_input[0] == '\0')
continue;
- requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices)});
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices), AudioInputType::DEVICE, false});
if(requested_audio_inputs.back().audio_inputs.size() > 1)
uses_amix = true;
@@ -2326,6 +2402,22 @@ static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &aud
}
}
+ for(const char *app_audio_input : app_audio_input_arg.values) {
+ if(!app_audio_input || app_audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, false});
+ match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ }
+
+ for(const char *app_audio_input : app_audio_input_inverted_arg.values) {
+ if(!app_audio_input || app_audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, true});
+ match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ }
+
return requested_audio_inputs;
}
@@ -2614,6 +2706,75 @@ static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec,
return pick_video_codec(video_codec, egl, use_software_video_encoder, video_codec_auto, video_codec_to_use, is_flv, low_power);
}
+static std::vector<AudioDevice> create_device_audio_inputs(const std::vector<AudioInput> &audio_inputs, AVCodecContext *audio_codec_context, int num_channels, double num_audio_frames_shift, std::vector<AVFilterContext*> &src_filter_ctx, bool use_amix) {
+ std::vector<AudioDevice> audio_track_audio_devices;
+ for(size_t i = 0; i < audio_inputs.size(); ++i) {
+ const auto &audio_input = audio_inputs[i];
+ AVFilterContext *src_ctx = nullptr;
+ if(use_amix)
+ src_ctx = src_filter_ctx[i];
+
+ AudioDevice audio_device;
+ audio_device.audio_input = audio_input;
+ audio_device.src_filter_ctx = src_ctx;
+
+ if(audio_input.name.empty()) {
+ audio_device.sound_device.handle = NULL;
+ audio_device.sound_device.frames = 0;
+ } else {
+ if(sound_device_get_by_name(&audio_device.sound_device, audio_input.name.c_str(), audio_input.description.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
+ fprintf(stderr, "Error: failed to get \"%s\" audio device\n", audio_input.name.c_str());
+ _exit(1);
+ }
+ }
+
+ audio_device.frame = create_audio_frame(audio_codec_context);
+ audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
+
+ audio_track_audio_devices.push_back(std::move(audio_device));
+ }
+ return audio_track_audio_devices;
+}
+
+static AudioDevice create_application_audio_audio_input(const MergedAudioInputs &merged_audio_inputs, AVCodecContext *audio_codec_context, int num_channels, double num_audio_frames_shift, gsr_pipewire_audio *pipewire_audio) {
+ AudioDevice audio_device;
+ audio_device.frame = create_audio_frame(audio_codec_context);
+ audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
+
+ char random_str[8];
+ if(!generate_random_characters_standard_alphabet(random_str, sizeof(random_str))) {
+ fprintf(stderr, "gsr error: ailed to generate random string\n");
+ _exit(1);
+ }
+ audio_device.combined_sink_name = "gsr-combined-";
+ audio_device.combined_sink_name.append(random_str, sizeof(random_str));
+
+ if(sound_device_create_combined_sink_connect(&audio_device.sound_device, audio_device.combined_sink_name.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
+ fprintf(stderr, "Error: failed to setup audio recording to combined sink\n");
+ _exit(1);
+ }
+
+ std::vector<const char*> app_names;
+ app_names.reserve(merged_audio_inputs.audio_inputs.size());
+ for(const auto &audio_input : merged_audio_inputs.audio_inputs) {
+ app_names.push_back(audio_input.name.c_str());
+ }
+
+ if(merged_audio_inputs.inverted) {
+ if(!gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(pipewire_audio, app_names.data(), app_names.size(), audio_device.combined_sink_name.c_str())) {
+ fprintf(stderr, "gsr error: failed to add application audio link\n");
+ _exit(1);
+ }
+ } else {
+ if(!gsr_pipewire_audio_add_link_from_apps_to_sink(pipewire_audio, app_names.data(), app_names.size(), audio_device.combined_sink_name.c_str())) {
+ fprintf(stderr, "gsr error: failed to add application audio link\n");
+ _exit(1);
+ }
+ }
+
+ return audio_device;
+}
+
int main(int argc, char **argv) {
setlocale(LC_ALL, "C"); // Sigh... stupid C
@@ -2658,6 +2819,11 @@ int main(int argc, char **argv) {
_exit(0);
}
+ if(argc == 2 && strcmp(argv[1], "--list-application-audio") == 0) {
+ list_application_audio_command();
+ _exit(0);
+ }
+
if(argc == 2 && strcmp(argv[1], "--version") == 0) {
puts(GSR_VERSION);
_exit(0);
@@ -2671,6 +2837,10 @@ int main(int argc, char **argv) {
{ "-f", Arg { {}, false, false } },
{ "-s", Arg { {}, true, false } },
{ "-a", Arg { {}, true, true } },
+#ifdef GSR_APP_AUDIO
+ { "-aa", Arg { {}, true, true } },
+ { "-aai", Arg { {}, true, true } },
+#endif
{ "-q", Arg { {}, true, false } },
{ "-o", Arg { {}, true, false } },
{ "-r", Arg { {}, true, false } },
@@ -2682,7 +2852,6 @@ int main(int argc, char **argv) {
{ "-bm", Arg { {}, true, false } },
{ "-pixfmt", Arg { {}, true, false } },
{ "-v", Arg { {}, true, false } },
- { "-mf", Arg { {}, true, false } }, // TODO: Remove, this exists for backwards compatibility. -df should be used instead
{ "-df", Arg { {}, true, false } },
{ "-sc", Arg { {}, true, false } },
{ "-cr", Arg { {}, true, false } },
@@ -2866,11 +3035,6 @@ int main(int argc, char **argv) {
bool date_folders = false;
const char *date_folders_str = args["-df"].value();
- if(!date_folders_str) {
- date_folders_str = args["-mf"].value();
- if(date_folders_str)
- fprintf(stderr, "Warning: -mf is deprecated, use -df instead\n");
- }
if(!date_folders_str)
date_folders_str = "no";
@@ -2935,12 +3099,35 @@ int main(int argc, char **argv) {
}
const Arg &audio_input_arg = args["-a"];
+ const Arg &app_audio_input_arg = args["-aa"];
+ const Arg &app_audio_input_inverted_arg = args["-aai"];
+
AudioDevices audio_devices;
if(!audio_input_arg.values.empty())
audio_devices = get_pulseaudio_inputs();
+ bool uses_app_audio = false;
+ if(!app_audio_input_arg.values.empty() || !app_audio_input_inverted_arg.values.empty())
+ uses_app_audio = true;
+
+ std::vector<std::string> app_audio_names;
+ gsr_pipewire_audio pipewire_audio;
+ memset(&pipewire_audio, 0, sizeof(pipewire_audio));
+ if(uses_app_audio) {
+ if(!gsr_pipewire_audio_init(&pipewire_audio)) {
+ fprintf(stderr, "gsr error: failed to setup PipeWire audio for application audio capture. The likely reason for this failure is that your sound server is not PipeWire\n");
+ _exit(2);
+ }
+
+ gsr_pipewire_audio_for_each_app(&pipewire_audio, [](const char *app_name, void *userdata) {
+ std::vector<std::string> *app_audio_names = (std::vector<std::string>*)userdata;
+ app_audio_names->push_back(app_name);
+ return true;
+ }, &app_audio_names);
+ }
+
bool uses_amix = false;
- std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, uses_amix);
+ std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, app_audio_input_arg, app_audio_input_inverted_arg, uses_amix, app_audio_names);
const char *container_format = args["-c"].value();
if(container_format && strcmp(container_format, "mkv") == 0)
@@ -3248,7 +3435,7 @@ int main(int argc, char **argv) {
const double target_fps = 1.0 / (double)fps;
if(video_codec_is_hdr(video_codec) && is_portal_capture) {
- fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (pipewire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
+ fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (PipeWire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
video_codec = hdr_video_codec_to_sdr_video_codec(video_codec);
}
@@ -3362,7 +3549,7 @@ int main(int argc, char **argv) {
std::vector<AVFilterContext*> src_filter_ctx;
AVFilterGraph *graph = nullptr;
AVFilterContext *sink = nullptr;
- if(use_amix) {
+ if(use_amix && merged_audio_inputs.type == AudioInputType::DEVICE) {
int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size());
if(err < 0) {
fprintf(stderr, "Error: failed to create audio filter\n");
@@ -3379,30 +3566,13 @@ int main(int argc, char **argv) {
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
std::vector<AudioDevice> audio_track_audio_devices;
- for(size_t i = 0; i < merged_audio_inputs.audio_inputs.size(); ++i) {
- auto &audio_input = merged_audio_inputs.audio_inputs[i];
- AVFilterContext *src_ctx = nullptr;
- if(use_amix)
- src_ctx = src_filter_ctx[i];
-
- AudioDevice audio_device;
- audio_device.audio_input = audio_input;
- audio_device.src_filter_ctx = src_ctx;
-
- if(audio_input.name.empty()) {
- audio_device.sound_device.handle = NULL;
- audio_device.sound_device.frames = 0;
- } else {
- if(sound_device_get_by_name(&audio_device.sound_device, audio_input.name.c_str(), audio_input.description.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
- fprintf(stderr, "Error: failed to get \"%s\" sound device\n", audio_input.name.c_str());
- _exit(1);
- }
- }
-
- audio_device.frame = create_audio_frame(audio_codec_context);
- audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
-
- audio_track_audio_devices.push_back(std::move(audio_device));
+ switch(merged_audio_inputs.type) {
+ case AudioInputType::DEVICE:
+ audio_track_audio_devices = create_device_audio_inputs(merged_audio_inputs.audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, src_filter_ctx, use_amix);
+ break;
+ case AudioInputType::APPLICATION:
+ audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio));
+ break;
}
AudioTrack audio_track;
@@ -3883,6 +4053,7 @@ int main(int argc, char **argv) {
gsr_color_conversion_deinit(&color_conversion);
gsr_video_encoder_destroy(video_encoder, video_codec_context);
gsr_capture_destroy(capture, video_codec_context);
+ gsr_pipewire_audio_deinit(&pipewire_audio);
if(replay_buffer_size_secs == -1 && recording_saved_script)
run_recording_saved_script_async(recording_saved_script, filename, "regular");
diff --git a/src/pipewire_audio.c b/src/pipewire_audio.c
index 2c18432..122895a 100644
--- a/src/pipewire_audio.c
+++ b/src/pipewire_audio.c
@@ -1 +1,405 @@
-#include "../include/pipewire_audio.h" \ No newline at end of file
+#include "../include/pipewire_audio.h"
+
+#include <pipewire/pipewire.h>
+
+static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
+ gsr_pipewire_audio *self = user_data;
+ //fprintf(stderr, "server name: %s\n", info->name);
+}
+
+static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
+ gsr_pipewire_audio *self = user_data;
+ //fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
+ gsr_pipewire_audio *self = user_data;
+ if(id == PW_ID_CORE && self->server_version_sync == seq)
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .info = on_core_info_cb,
+ .done = on_core_done_cb,
+ .error = on_core_error_cb,
+};
+
+static gsr_pipewire_audio_node* gsr_pipewire_audio_get_node_by_name_case_insensitive(gsr_pipewire_audio *self, const char *node_name, gsr_pipewire_audio_node_type node_type) {
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
+ if(node->type == node_type && strcasecmp(node->name, node_name) == 0)
+ return &self->stream_nodes[i];
+ }
+ return NULL;
+}
+
+static gsr_pipewire_audio_port* gsr_pipewire_audio_get_node_port_by_name(gsr_pipewire_audio *self, uint32_t node_id, const char *port_name) {
+ for(int i = 0; i < self->num_ports; ++i) {
+ if(self->ports[i].node_id == node_id && strcmp(self->ports[i].name, port_name) == 0)
+ return &self->ports[i];
+ }
+ return NULL;
+}
+
+static bool requested_link_matches_name_case_insensitive(const gsr_pipewire_audio_requested_link *requested_link, const char *name) {
+ for(int i = 0; i < requested_link->num_app_names; ++i) {
+ if(strcasecmp(requested_link->app_names[i], name) == 0)
+ return true;
+ }
+ return false;
+}
+
+static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link) {
+ const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->output_type == GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
+ const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->output_name, requested_link_node_type);
+ if(!stream_input_node)
+ return;
+
+ const gsr_pipewire_audio_port *stream_input_fl_port = NULL;
+ const gsr_pipewire_audio_port *stream_input_fr_port = NULL;
+
+ switch(requested_link->output_type) {
+ case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM: {
+ stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FL");
+ stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FR");
+ break;
+ }
+ case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK: {
+ stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FL");
+ stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FR");
+ break;
+ }
+ }
+
+ if(!stream_input_fl_port || !stream_input_fr_port)
+ return;
+
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *app_node = &self->stream_nodes[i];
+ if(app_node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
+ continue;
+
+ const bool requested_link_matches_app = requested_link_matches_name_case_insensitive(requested_link, app_node->name);
+ if(requested_link->inverted) {
+ if(requested_link_matches_app)
+ continue;
+ } else {
+ if(!requested_link_matches_app)
+ continue;
+ }
+
+ const gsr_pipewire_audio_port *app_output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FL");
+ const gsr_pipewire_audio_port *app_output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FR");
+ if(!app_output_fl_port || !app_output_fr_port)
+ continue;
+
+ // TODO: Detect if link already exists before so we dont create these proxies when not needed
+
+ //fprintf(stderr, "linking!\n");
+ // TODO: error check and cleanup
+ {
+ struct pw_properties *props = pw_properties_new(NULL, NULL);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fl_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fl_port->id);
+ // TODO: Clean this up when removing node
+ struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
+ //self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
+ pw_properties_free(props);
+ }
+
+ {
+ struct pw_properties *props = pw_properties_new(NULL, NULL);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fr_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fr_port->id);
+ // TODO: Clean this up when removing node
+ struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
+ //self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
+ pw_properties_free(props);
+ }
+ }
+}
+
+static void gsr_pipewire_audio_create_links(gsr_pipewire_audio *self) {
+ for(int j = 0; j < self->num_requested_links; ++j) {
+ gsr_pipewire_audio_create_link(self, &self->requested_links[j]);
+ }
+}
+
+static void registry_event_global(void *data, uint32_t id, uint32_t permissions,
+ const char *type, uint32_t version,
+ const struct spa_dict *props)
+{
+ //fprintf(stderr, "add: id: %d, type: %s\n", (int)id, type);
+ if (props == NULL)
+ return;
+
+ //pw_properties_new_dict(props);
+
+ gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
+ if(strcmp(type, PW_TYPE_INTERFACE_Node) == 0) {
+ const char *node_name = spa_dict_lookup(props, PW_KEY_NODE_NAME);
+ const char *media_class = spa_dict_lookup(props, PW_KEY_MEDIA_CLASS);
+ //fprintf(stderr, " node name: %s, media class: %s\n", node_name, media_class);
+ const bool is_stream_output = media_class && strcmp(media_class, "Stream/Output/Audio") == 0;
+ const bool is_stream_input = media_class && strcmp(media_class, "Stream/Input/Audio") == 0;
+ const bool is_sink = media_class && strcmp(media_class, "Audio/Sink") == 0;
+ if(self->num_stream_nodes < GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES && node_name && (is_stream_output || is_stream_input || is_sink)) {
+ //const char *application_binary = spa_dict_lookup(props, PW_KEY_APP_PROCESS_BINARY);
+ //const char *application_name = spa_dict_lookup(props, PW_KEY_APP_NAME);
+ //fprintf(stderr, " node name: %s, app binary: %s, app name: %s\n", node_name, application_binary, application_name);
+
+ char *node_name_copy = strdup(node_name);
+ if(node_name_copy) {
+ self->stream_nodes[self->num_stream_nodes].id = id;
+ self->stream_nodes[self->num_stream_nodes].name = node_name_copy;
+ if(is_stream_output)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT;
+ else if(is_stream_input)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT;
+ else if(is_sink)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
+ ++self->num_stream_nodes;
+
+ gsr_pipewire_audio_create_links(self);
+ }
+ }
+ } else if(strcmp(type, PW_TYPE_INTERFACE_Port) == 0) {
+ const char *port_name = spa_dict_lookup(props, PW_KEY_PORT_NAME);
+
+ const char *port_direction = spa_dict_lookup(props, PW_KEY_PORT_DIRECTION);
+ gsr_pipewire_audio_port_direction direction = -1;
+ if(port_direction && strcmp(port_direction, "in") == 0)
+ direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_INPUT;
+ else if(port_direction && strcmp(port_direction, "out") == 0)
+ direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_OUTPUT;
+
+ const char *node_id = spa_dict_lookup(props, PW_KEY_NODE_ID);
+ const int node_id_num = node_id ? atoi(node_id) : 0;
+
+ if(self->num_ports < GSR_PIPEWIRE_AUDIO_MAX_PORTS && port_name && direction >= 0 && node_id_num > 0) {
+ //fprintf(stderr, " port name: %s, node id: %d, direction: %s\n", port_name, node_id_num, port_direction);
+ char *port_name_copy = strdup(port_name);
+ if(port_name_copy) {
+ self->ports[self->num_ports].id = id;
+ self->ports[self->num_ports].node_id = node_id_num;
+ self->ports[self->num_ports].direction = direction;
+ self->ports[self->num_ports].name = port_name_copy;
+ ++self->num_ports;
+
+ gsr_pipewire_audio_create_links(self);
+ }
+ }
+ }
+}
+
+static bool gsr_pipewire_audio_remove_node_by_id(gsr_pipewire_audio *self, uint32_t node_id) {
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ if(self->stream_nodes[i].id != node_id)
+ continue;
+
+ free(self->stream_nodes[i].name);
+ for(int j = i + 1; j < self->num_stream_nodes; ++j) {
+ self->stream_nodes[j - 1] = self->stream_nodes[j];
+ }
+ --self->num_stream_nodes;
+ return true;
+ }
+ return false;
+}
+
+static bool gsr_pipewire_audio_remove_port_by_id(gsr_pipewire_audio *self, uint32_t port_id) {
+ for(int i = 0; i < self->num_ports; ++i) {
+ if(self->ports[i].id != port_id)
+ continue;
+
+ free(self->ports[i].name);
+ for(int j = i + 1; j < self->num_ports; ++j) {
+ self->ports[j - 1] = self->ports[j];
+ }
+ --self->num_ports;
+ return true;
+ }
+ return false;
+}
+
+static void registry_event_global_remove(void *data, uint32_t id) {
+ //fprintf(stderr, "remove: %d\n", (int)id);
+ gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
+ if(gsr_pipewire_audio_remove_node_by_id(self, id)) {
+ //fprintf(stderr, "removed node\n");
+ return;
+ }
+
+ if(gsr_pipewire_audio_remove_port_by_id(self, id)) {
+ //fprintf(stderr, "removed port\n");
+ return;
+ }
+}
+
+static const struct pw_registry_events registry_events = {
+ PW_VERSION_REGISTRY_EVENTS,
+ .global = registry_event_global,
+ .global_remove = registry_event_global_remove,
+};
+
+bool gsr_pipewire_audio_init(gsr_pipewire_audio *self) {
+ memset(self, 0, sizeof(*self));
+
+ pw_init(NULL, NULL);
+
+ self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
+ if(!self->thread_loop) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire thread\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
+ if(!self->context) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire context\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ if(pw_thread_loop_start(self->thread_loop) < 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to start thread\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ self->core = pw_context_connect(self->context, pw_properties_new(PW_KEY_REMOTE_NAME, NULL, NULL), 0);
+ if(!self->core) {
+ pw_thread_loop_unlock(self->thread_loop);
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ // TODO: Error check
+ pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
+
+ self->registry = pw_core_get_registry(self->core, PW_VERSION_REGISTRY, 0);
+ pw_registry_add_listener(self->registry, &self->registry_listener, &registry_events, self);
+
+ self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, 0);
+ pw_thread_loop_wait(self->thread_loop);
+
+ pw_thread_loop_unlock(self->thread_loop);
+ return true;
+}
+
+void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self) {
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ free(self->stream_nodes[i].name);
+ }
+ self->num_stream_nodes = 0;
+
+ for(int i = 0; i < self->num_ports; ++i) {
+ free(self->ports[i].name);
+ }
+ self->num_ports = 0;
+
+ for(int i = 0; i < self->num_requested_links; ++i) {
+ for(int j = 0; j < self->requested_links[i].num_app_names; ++j) {
+ free(self->requested_links[i].app_names[j]);
+ }
+ free(self->requested_links[i].app_names);
+ free(self->requested_links[i].output_name);
+ }
+ self->num_requested_links = 0;
+
+#if PW_CHECK_VERSION(0, 3, 49)
+ pw_deinit();
+#endif
+}
+
+static bool gsr_pipewire_audio_add_link_from_apps_to_output(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *output_name, gsr_pipewire_audio_link_output_type output_type, bool inverted) {
+ if(self->num_requested_links >= GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS)
+ return false;
+
+ char **app_names_output_copy = calloc(num_app_names_output, sizeof(char*));
+ if(!app_names_output_copy)
+ return false;
+
+ char *output_name_copy = strdup(output_name);
+ if(!output_name_copy)
+ goto error;
+
+ for(int i = 0; i < num_app_names_output; ++i) {
+ app_names_output_copy[i] = strdup(app_names_output[i]);
+ if(!app_names_output_copy[i])
+ goto error;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+ self->requested_links[self->num_requested_links].app_names = app_names_output_copy;
+ self->requested_links[self->num_requested_links].num_app_names = num_app_names_output;
+ self->requested_links[self->num_requested_links].output_name = output_name_copy;
+ self->requested_links[self->num_requested_links].output_type = output_type;
+ self->requested_links[self->num_requested_links].inverted = inverted;
+ ++self->num_requested_links;
+ gsr_pipewire_audio_create_link(self, &self->requested_links[self->num_requested_links - 1]);
+ pw_thread_loop_unlock(self->thread_loop);
+
+ return true;
+
+ error:
+ free(output_name_copy);
+ for(int i = 0; i < num_app_names_output; ++i) {
+ free(app_names_output_copy[i]);
+ }
+ free(app_names_output_copy);
+ return false;
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, false);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, true);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, false);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, true);
+}
+
+void gsr_pipewire_audio_for_each_app(gsr_pipewire_audio *self, gsr_pipewire_audio_app_query_callback callback, void *userdata) {
+ pw_thread_loop_lock(self->thread_loop);
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
+ if(node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
+ continue;
+
+ if(!callback(node->name, userdata))
+ break;
+ }
+ pw_thread_loop_unlock(self->thread_loop);
+}
diff --git a/src/pipewire_video.c b/src/pipewire_video.c
index b5f1562..3c6965e 100644
--- a/src/pipewire_video.c
+++ b/src/pipewire_video.c
@@ -446,7 +446,7 @@ static bool gsr_pipewire_video_setup_stream(gsr_pipewire_video *self) {
uint8_t params_buffer[2048];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
- self->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
+ self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
if(!self->thread_loop) {
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire thread\n");
goto error;
diff --git a/src/sound.cpp b/src/sound.cpp
index aea5b91..a5f37af 100644
--- a/src/sound.cpp
+++ b/src/sound.cpp
@@ -42,34 +42,79 @@ struct pa_handle {
int operation_success;
double latency_seconds;
+
+ uint32_t combined_sink_module_index;
};
-static void pa_sound_device_free(pa_handle *s) {
- assert(s);
+static void destroy_combined_sink(pa_handle *p) {
+ // TODO: error handling
+ pa_operation *module_pa = pa_context_unload_module(p->context, p->combined_sink_module_index, NULL, NULL);
+ for(;;) {
+ if(pa_operation_get_state(module_pa) == PA_OPERATION_DONE) {
+ pa_operation_unref(module_pa);
+ break;
+ }
+ pa_mainloop_iterate(p->mainloop, 1, NULL);
+ }
+}
- if (s->stream)
- pa_stream_unref(s->stream);
+static void pa_sound_device_free(pa_handle *p) {
+ assert(p);
- if (s->context) {
- pa_context_disconnect(s->context);
- pa_context_unref(s->context);
+ if(p->combined_sink_module_index != PA_INVALID_INDEX) {
+ destroy_combined_sink(p);
+ p->combined_sink_module_index = PA_INVALID_INDEX;
}
- if (s->mainloop)
- pa_mainloop_free(s->mainloop);
+ if (p->stream) {
+ pa_stream_unref(p->stream);
+ p->stream = NULL;
+ }
- if (s->output_data) {
- free(s->output_data);
- s->output_data = NULL;
+ if (p->context) {
+ pa_context_disconnect(p->context);
+ pa_context_unref(p->context);
+ p->context = NULL;
}
- pa_xfree(s);
+ if (p->mainloop) {
+ pa_mainloop_free(p->mainloop);
+ p->mainloop = NULL;
+ }
+
+ if (p->output_data) {
+ free(p->output_data);
+ p->output_data = NULL;
+ }
+
+ pa_xfree(p);
+}
+
+static void module_index_callback(pa_context*, uint32_t idx, void *userdata) {
+ pa_handle *p = (pa_handle*)userdata;
+ p->combined_sink_module_index = idx;
+}
+
+static bool create_combined_sink(pa_handle *p, const char *combined_sink_name) {
+ // TODO: Error handling
+ char module_argument[256];
+ snprintf(module_argument, sizeof(module_argument), "sink_name=\"%s\" slaves= adjust_time=0", combined_sink_name);
+ pa_operation *module_pa = pa_context_load_module(p->context, "module-combine-sink", module_argument, module_index_callback, p);
+ for(;;) {
+ if(pa_operation_get_state(module_pa) == PA_OPERATION_DONE) {
+ pa_operation_unref(module_pa);
+ break;
+ }
+ pa_mainloop_iterate(p->mainloop, 1, NULL);
+ }
+ return p->combined_sink_module_index != PA_INVALID_INDEX;
}
static pa_handle* pa_sound_device_new(const char *server,
const char *name,
const char *dev,
const char *stream_name,
+ const char *combined_sink_name,
const pa_sample_spec *ss,
const pa_buffer_attr *attr,
int *rerror) {
@@ -77,10 +122,7 @@ static pa_handle* pa_sound_device_new(const char *server,
int error = PA_ERR_INTERNAL, r;
p = pa_xnew0(pa_handle, 1);
- p->read_data = NULL;
- p->read_length = 0;
- p->read_index = 0;
- p->latency_seconds = 0.0;
+ p->combined_sink_module_index = PA_INVALID_INDEX;
const int buffer_size = attr->fragsize;
void *buffer = malloc(buffer_size);
@@ -119,12 +161,23 @@ static pa_handle* pa_sound_device_new(const char *server,
pa_mainloop_iterate(p->mainloop, 1, NULL);
}
+ char device_to_record[256];
+ if(combined_sink_name) {
+ if(!create_combined_sink(p, combined_sink_name)) {
+ fprintf(stderr, "gsr error: pa_sound_device_new: failed to create module-combine-sink\n");
+ goto fail;
+ }
+ snprintf(device_to_record, sizeof(device_to_record), "%s.monitor", combined_sink_name);
+ } else {
+ snprintf(device_to_record, sizeof(device_to_record), "%s", dev);
+ }
+
if (!(p->stream = pa_stream_new(p->context, stream_name, ss, NULL))) {
error = pa_context_errno(p->context);
goto fail;
}
- r = pa_stream_connect_record(p->stream, dev, attr,
+ r = pa_stream_connect_record(p->stream, device_to_record, attr,
(pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_ADJUST_LATENCY|PA_STREAM_AUTO_TIMING_UPDATE));
if (r < 0) {
@@ -259,7 +312,7 @@ static int audio_format_to_get_bytes_per_sample(AudioFormat audio_format) {
return 2;
}
-int sound_device_get_by_name(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+static int sound_device_setup_record(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format, const char *combined_sink_name) {
pa_sample_spec ss;
ss.format = audio_format_to_pulse_audio_format(audio_format);
ss.rate = 48000;
@@ -273,7 +326,7 @@ int sound_device_get_by_name(SoundDevice *device, const char *device_name, const
buffer_attr.maxlength = buffer_attr.fragsize;
int error = 0;
- pa_handle *handle = pa_sound_device_new(nullptr, description, device_name, description, &ss, &buffer_attr, &error);
+ pa_handle *handle = pa_sound_device_new(nullptr, description, device_name, description, combined_sink_name, &ss, &buffer_attr, &error);
if(!handle) {
fprintf(stderr, "pa_sound_device_new() failed: %s. Audio input device %s might not be valid\n", pa_strerror(error), description);
return -1;
@@ -284,6 +337,14 @@ int sound_device_get_by_name(SoundDevice *device, const char *device_name, const
return 0;
}
+int sound_device_get_by_name(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+ return sound_device_setup_record(device, device_name, description, num_channels, period_frame_size, audio_format, NULL);
+}
+
+int sound_device_create_combined_sink_connect(SoundDevice *device, const char *combined_sink_name, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+ return sound_device_setup_record(device, "gpu-screen-recorder", "gpu-screen-recorder", num_channels, period_frame_size, audio_format, combined_sink_name);
+}
+
void sound_device_close(SoundDevice *device) {
if(device->handle)
pa_sound_device_free((pa_handle*)device->handle);
@@ -322,8 +383,7 @@ static void pa_state_cb(pa_context *c, void *userdata) {
}
}
-static void pa_sourcelist_cb(pa_context *ctx, const pa_source_info *source_info, int eol, void *userdata) {
- (void)ctx;
+static void pa_sourcelist_cb(pa_context*, const pa_source_info *source_info, int eol, void *userdata) {
if(eol > 0)
return;
@@ -345,6 +405,8 @@ static void get_pulseaudio_default_inputs(AudioDevices &audio_devices) {
pa_operation *pa_op = NULL;
pa_mainloop *main_loop = pa_mainloop_new();
+ if(!main_loop)
+ return;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
@@ -392,6 +454,8 @@ AudioDevices get_pulseaudio_inputs() {
get_pulseaudio_default_inputs(audio_devices);
pa_mainloop *main_loop = pa_mainloop_new();
+ if(!main_loop)
+ return audio_devices;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
diff --git a/src/utils.c b/src/utils.c
index a8e4dc2..e871126 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -7,6 +7,7 @@
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
+#include <sys/random.h>
#include <errno.h>
#include <assert.h>
@@ -27,6 +28,25 @@ double clock_get_monotonic_seconds(void) {
return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
}
+bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
+ /* TODO: Use other functions on other platforms than linux */
+ if(getrandom(buffer, buffer_size, 0) < buffer_size) {
+ fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
+ return false;
+ }
+
+ for(int i = 0; i < buffer_size; ++i) {
+ unsigned char c = *(unsigned char*)&buffer[i];
+ buffer[i] = alphabet[c % alphabet_size];
+ }
+
+ return true;
+}
+
+bool generate_random_characters_standard_alphabet(char *buffer, int buffer_size) {
+ return generate_random_characters(buffer, buffer_size, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62);
+}
+
static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
switch(rot) {
case 0: return GSR_MONITOR_ROT_0;