aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md9
-rw-r--r--include/pipewire_audio.h58
-rw-r--r--include/sound.hpp4
-rw-r--r--src/main.cpp240
-rw-r--r--src/pipewire_audio.c149
5 files changed, 300 insertions, 160 deletions
diff --git a/README.md b/README.md
index 8982bdd..559e0db 100644
--- a/README.md
+++ b/README.md
@@ -129,15 +129,6 @@ The replay buffer is stored in ram (as encoded video), so don't use a too large
To save a video in replay mode, you need to send signal SIGUSR1 to gpu screen recorder. You can do this by running `killall -SIGUSR1 gpu-screen-recorder`.\
To stop recording send SIGINT to gpu screen recorder. You can do this by running `killall -SIGINT gpu-screen-recorder` or pressing `Ctrl-C` in the terminal that runs gpu screen recorder. When recording a regular non-replay video this will also save the video.\
To pause/unpause recording send SIGUSR2 to gpu screen recorder. You can do this by running `killall -SIGUSR2 gpu-screen-recorder`. This is only applicable and useful when recording (not streaming nor replay).\
-## Audio device name
-To record the default output device (desktop audio) you can use the `default_output` option, for example `-a default_output`.\
-To record the default input device (microphone) you can use the `default_input` option, for example `-a default_input`.\
-To list all available audio devices run `gpu-screen-recorder --list-audio-devices`. The name to use with GPU Screen Recorder will be on the left side and the human readable name is on the right side.\
-To record multiple audio devices to multiple audio tracks specify the `-a` option multiple times, for example `-a default_output -a default_input`.\
-To record multiple audio devices into one audio track (merged) specify the `-a` option once split with `|` for each audio device, for example `-a "default_output|default_input"`.\
-In wireplumber the name of the audio will be in the format `gsr-<audio_device>`, but you can change that name by prefixing the audio device with a name and then a forward slash, for example: `-a "name/default_output"`.\
-You can also record audio from specific applications instead of from audio devices by using the `-aa` option to only record the specified applications or `-aai` to record all applications except the ones provided.\
-To list all available application to record from run `gpu-screen-recorder --list-application-audio`.
## Simple way to run replay without gui
Run the script `scripts/start-replay.sh` to start replay and then `scripts/save-replay.sh` to save a replay and `scripts/stop-replay.sh` to stop the replay. The videos are saved to `$HOME/Videos`.
You can use these scripts to start replay at system startup if you add `scripts/start-replay.sh` to startup (this can be done differently depending on your desktop environment / window manager) and then go into
diff --git a/include/pipewire_audio.h b/include/pipewire_audio.h
index 8cfb2d2..1d37eb8 100644
--- a/include/pipewire_audio.h
+++ b/include/pipewire_audio.h
@@ -8,14 +8,14 @@
#include <stdbool.h>
-#define GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES 64
-#define GSR_PIPEWIRE_AUDIO_MAX_PORTS 64
+#define GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES 128
+#define GSR_PIPEWIRE_AUDIO_MAX_PORTS 128
#define GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS 32
typedef enum {
GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, /* Application audio */
GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT, /* Audio recording input */
- GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK
+ GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE /* Audio output or input device or combined (virtual) sink */
} gsr_pipewire_audio_node_type;
typedef struct {
@@ -37,16 +37,17 @@ typedef struct {
} gsr_pipewire_audio_port;
typedef enum {
- GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM,
- GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK
-} gsr_pipewire_audio_link_output_type;
+ GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, /* Application */
+ GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK /* Combined (virtual) sink */
+} gsr_pipewire_audio_link_input_type;
typedef struct {
- char **app_names;
- int num_app_names;
- char *output_name;
+ char **output_names;
+ int num_output_names;
+ char *input_name;
bool inverted;
- gsr_pipewire_audio_link_output_type output_type;
+ gsr_pipewire_audio_node_type output_type;
+ gsr_pipewire_audio_link_input_type input_type;
} gsr_pipewire_audio_requested_link;
typedef struct {
@@ -72,38 +73,47 @@ bool gsr_pipewire_audio_init(gsr_pipewire_audio *self);
void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self);
/*
- This function links audio source outputs from applications that match the name |app_names_output| to the input
+ This function links audio source outputs from applications that match the name |app_names| to the input
that matches the name |stream_name_input|.
If an application or a new application starts outputting audio after this function is called and the app name matches
then it will automatically link the audio sources.
- |app_names_output| and |stream_name_input| are case-insensitive matches.
+ |app_names| and |stream_name_input| are case-insensitive matches.
*/
-bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input);
+bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input);
/*
- This function links audio source outputs from all applications except the ones that match the name |app_names_output| to the input
+ This function links audio source outputs from all applications except the ones that match the name |app_names| to the input
that matches the name |stream_name_input|.
- If an application or a new application starts outputting audio after this function is called and the app name doesn't matche
+ If an application or a new application starts outputting audio after this function is called and the app name doesn't match
then it will automatically link the audio sources.
- |app_names_output| and |stream_name_input| are case-insensitive matches.
+ |app_names| and |stream_name_input| are case-insensitive matches.
*/
-bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input);
+bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input);
/*
- This function links audio source outputs from applications that match the name |app_names_output| to the input
+ This function links audio source outputs from applications that match the name |app_names| to the input
that matches the name |sink_name_input|.
If an application or a new application starts outputting audio after this function is called and the app name matches
then it will automatically link the audio sources.
- |app_names_output| and |sink_name_input| are case-insensitive matches.
+ |app_names| and |sink_name_input| are case-insensitive matches.
*/
-bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input);
+bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input);
/*
- This function links audio source outputs from all applications except the ones that match the name |app_names_output| to the input
+ This function links audio source outputs from all applications except the ones that match the name |app_names| to the input
that matches the name |sink_name_input|.
- If an application or a new application starts outputting audio after this function is called and the app name doesn't matche
+ If an application or a new application starts outputting audio after this function is called and the app name doesn't match
then it will automatically link the audio sources.
- |app_names_output| and |sink_name_input| are case-insensitive matches.
+ |app_names| and |sink_name_input| are case-insensitive matches.
*/
-bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input);
+bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input);
+
+/*
+ This function links audio source outputs from devices that match the name |source_names| to the input
+ that matches the name |sink_name_input|.
+ If a device or a new device starts outputting audio after this function is called and the device name matches
+ then it will automatically link the audio sources.
+ |source_names| and |sink_name_input| are case-insensitive matches.
+*/
+bool gsr_pipewire_audio_add_link_from_sources_to_sink(gsr_pipewire_audio *self, const char **source_names, int num_source_names, const char *sink_name_input);
/* Return true to continue */
typedef bool (*gsr_pipewire_audio_app_query_callback)(const char *app_name, void *userdata);
diff --git a/include/sound.hpp b/include/sound.hpp
index f71b84d..b3e34cc 100644
--- a/include/sound.hpp
+++ b/include/sound.hpp
@@ -34,6 +34,8 @@ enum class AudioInputType {
struct AudioInput {
std::string name;
std::string description;
+ AudioInputType type = AudioInputType::DEVICE;
+ bool inverted = false;
};
struct AudioDevices {
@@ -44,8 +46,6 @@ struct AudioDevices {
struct MergedAudioInputs {
std::vector<AudioInput> audio_inputs;
- AudioInputType type = AudioInputType::DEVICE;
- bool inverted = false;
};
typedef enum {
diff --git a/src/main.cpp b/src/main.cpp
index 55ad7fa..81b325c 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1067,12 +1067,7 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
static void usage_header() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
-#ifdef GSR_APP_AUDIO
- const char *app_audio_options = " [-aa <application_name>] [-aai <application_name>] ";
-#else
- const char *app_audio_options = "";
-#endif
- fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>]%s[-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr|cbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name, app_audio_options);
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr|cbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name);
}
// TODO: Update with portal info
@@ -1106,33 +1101,24 @@ static void usage_full() {
fprintf(stderr, " For variable frame rate mode this option is the max frame rate and if the capture frame rate is below this target frame rate then frames will not be duplicated.\n");
fprintf(stderr, " Content frame rate is similar to variable frame rate mode, except the frame rate will match the frame rate of the captured content when possible, but not capturing above the frame rate set in this -f option.\n");
fprintf(stderr, "\n");
- fprintf(stderr, " -a Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
+ fprintf(stderr, " -a Audio device or application to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device or application.\n");
fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
- fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
+ fprintf(stderr, " Multiple audio sources can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
- fprintf(stderr, " If the audio device is an empty string then the argument is ignored.\n");
- fprintf(stderr, " Optional, no audio track is added by default.\n");
- fprintf(stderr, " Run GPU Screen Recorder with the --list-audio-devices option to list valid audio devices to use with this -a option.\n");
- fprintf(stderr, "\n");
+ fprintf(stderr, " The audio name can also be prefixed with \"device:\", for example: -a \"device:alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
#ifdef GSR_APP_AUDIO
- fprintf(stderr, " -aa Application to record audio from (case-insensitive). Can be specified multiple times. Each time this is specified a new audio track is added for the specified application audio.\n");
- fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
- fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
- fprintf(stderr, " Optional, no application audio is added by default.\n");
- fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
- fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aa option.\n");
- fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio from an application that hasn't started yet.\n");
- fprintf(stderr, "\n");
- fprintf(stderr, " -aai Record audio from all applications except the ones specified with this option (case-insensitive). Can be specified multiple times.\n");
- fprintf(stderr, " Each time this is specified a new audio track is added that records audio from all applications except the ones specified.\n");
- fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
- fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
- fprintf(stderr, " Optional, no application audio is added by default.\n");
- fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
- fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aai option.\n");
- fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio and the target application hasn't started yet.\n");
- fprintf(stderr, "\n");
+ fprintf(stderr, " To record audio from an application then prefix the audio name with \"app:\", for example: -a \"app:Brave\".\n");
+ fprintf(stderr, " To record audio from all applications except the provided use prefix the audio name with \"app-inverse:\", for example: -a \"app-inverse:Brave\".\n");
+ fprintf(stderr, " \"app:\" and \"app-inverse:\" can't be mixed in one audio track.\n");
+ fprintf(stderr, " One audio track can contain both audio devices and application audio, for example: -a \"default_output|device:alsa_output.pci-0000_00_1b.0.analog-stereo.monitor|app:Brave\".\n");
+ fprintf(stderr, " Recording application audio is only possible when the sound server on the system is PipeWire.\n");
#endif
+ fprintf(stderr, " If the audio name is an empty string then the argument is ignored.\n");
+ fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-audio-devices option to list valid audio device names.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names. It's possible to use an application name that is not listed in --list-application-audio,\n");
+ fprintf(stderr, " for example when trying to record audio from an application that hasn't started yet.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra' when using '-bm qp' or '-bm vbr' options, and '-bm qp' is the default option used.\n");
fprintf(stderr, " 'high' is the recommended option when live streaming or when you have a slower harddrive.\n");
fprintf(stderr, " When using '-bm cbr' option then this is option is instead used to specify the video bitrate in kbps.\n");
@@ -1260,8 +1246,9 @@ static void usage_full() {
fprintf(stderr, " %s -w portal -f 60 -a default_output -restore-portal-session yes -o \"$HOME/Videos/video.mp4\"\n", program_name);
fprintf(stderr, " %s -w screen -f 60 -a default_output -bm cbr -q 15000 -o \"$HOME/Videos/video.mp4\"\n", program_name);
#ifdef GSR_APP_AUDIO
- fprintf(stderr, " %s -w screen -f 60 -aa \"firefox|csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
- fprintf(stderr, " %s -w screen -f 60 -aai \"firefox|csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"app:firefox|app:csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"app-inverse:firefox|app-inverse:csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -a \"default-input|app-inverse:Brave\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
#endif
//fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
_exit(1);
@@ -1642,6 +1629,11 @@ static void split_string(const std::string &str, char delimiter, std::function<b
}
}
+static bool string_starts_with(const std::string &str, const char *substr) {
+ int len = strlen(substr);
+ return (int)str.size() >= len && memcmp(str.data(), substr, len) == 0;
+}
+
static const AudioInput* get_audio_device_by_name(const std::vector<AudioInput> &audio_inputs, const std::string &name) {
for(const auto &audio_input : audio_inputs) {
if(audio_input.name == name)
@@ -1656,15 +1648,36 @@ static std::vector<AudioInput> parse_audio_input_arg(const char *str, const Audi
AudioInput audio_input;
audio_input.name.assign(sub, size);
- const bool name_is_existing_audio_device = get_audio_device_by_name(audio_devices.audio_inputs, audio_input.name);
- const size_t index = audio_input.name.find('/');
- if(!name_is_existing_audio_device && index != std::string::npos) {
- audio_input.description = audio_input.name.substr(0, index);
- audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + index + 1);
+ if(string_starts_with(audio_input.name.c_str(), "app:")) {
+ audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + 4);
+ audio_input.description = audio_input.name;
+ audio_input.type = AudioInputType::APPLICATION;
+ audio_input.inverted = false;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ } else if(string_starts_with(audio_input.name.c_str(), "app-inverse:")) {
+ audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + 12);
+ audio_input.description = audio_input.name;
+ audio_input.type = AudioInputType::APPLICATION;
+ audio_input.inverted = true;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ } else if(string_starts_with(audio_input.name.c_str(), "device:")) {
+ audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + 7);
+ audio_input.type = AudioInputType::DEVICE;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ } else {
+ const bool name_is_existing_audio_device = get_audio_device_by_name(audio_devices.audio_inputs, audio_input.name);
+ const size_t index = audio_input.name.find('/');
+ if(!name_is_existing_audio_device && index != std::string::npos) {
+ audio_input.description = audio_input.name.substr(0, index);
+ audio_input.name.erase(audio_input.name.begin(), audio_input.name.begin() + index + 1);
+ }
+ audio_input.type = AudioInputType::DEVICE;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
}
-
- audio_inputs.push_back(std::move(audio_input));
- return true;
});
return audio_inputs;
}
@@ -1675,6 +1688,7 @@ static std::vector<AudioInput> parse_app_audio_input_arg(const char *str) {
AudioInput audio_input;
audio_input.name.assign(sub, size);
audio_input.description = audio_input.name;
+ audio_input.type = AudioInputType::APPLICATION;
audio_inputs.push_back(std::move(audio_input));
return true;
});
@@ -2410,6 +2424,9 @@ struct Arg {
static void match_app_audio_input_to_available_apps(const std::vector<AudioInput> &requested_audio_inputs, const std::vector<std::string> &app_audio_names) {
for(const AudioInput &request_audio_input : requested_audio_inputs) {
+ if(request_audio_input.type != AudioInputType::APPLICATION || request_audio_input.inverted)
+ continue;
+
bool match = false;
for(const std::string &app_name : app_audio_names) {
if(strcasecmp(app_name.c_str(), request_audio_input.name.c_str()) == 0) {
@@ -2431,19 +2448,18 @@ static void match_app_audio_input_to_available_apps(const std::vector<AudioInput
// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
-static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, const Arg &app_audio_input_arg, const Arg &app_audio_input_inverted_arg, bool &uses_amix, const std::vector<std::string> &app_audio_names) {
+static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, const Arg &app_audio_input_arg, const Arg &app_audio_input_inverted_arg) {
std::vector<MergedAudioInputs> requested_audio_inputs;
- uses_amix = false;
for(const char *audio_input : audio_input_arg.values) {
if(!audio_input || audio_input[0] == '\0')
continue;
- requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices), AudioInputType::DEVICE, false});
- if(requested_audio_inputs.back().audio_inputs.size() > 1)
- uses_amix = true;
-
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices)});
for(AudioInput &request_audio_input : requested_audio_inputs.back().audio_inputs) {
+ if(request_audio_input.type != AudioInputType::DEVICE)
+ continue;
+
bool match = false;
if(!audio_devices.default_output.empty() && request_audio_input.name == "default_output") {
@@ -2485,22 +2501,85 @@ static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &aud
if(!app_audio_input || app_audio_input[0] == '\0')
continue;
- requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, false});
- match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input)});
}
for(const char *app_audio_input : app_audio_input_inverted_arg.values) {
if(!app_audio_input || app_audio_input[0] == '\0')
continue;
- requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, true});
- match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input)});
+ for(auto &audio_input : requested_audio_inputs.back().audio_inputs) {
+ audio_input.inverted = true;
+ }
}
return requested_audio_inputs;
}
-static AudioCodec select_audio_codec_with_fallback(AudioCodec audio_codec, const std::string &file_extension,bool uses_amix) {
+static bool audio_inputs_has_app_audio(const std::vector<AudioInput> &audio_inputs) {
+ for(const auto &audio_input : audio_inputs) {
+ if(audio_input.type == AudioInputType::APPLICATION)
+ return true;
+ }
+ return false;
+}
+
+static bool merged_audio_inputs_has_app_audio(const std::vector<MergedAudioInputs> &merged_audio_inputs) {
+ for(const auto &merged_audio_input : merged_audio_inputs) {
+ if(audio_inputs_has_app_audio(merged_audio_input.audio_inputs))
+ return true;
+ }
+ return false;
+}
+
+// Should use amix if more than 1 audio device and 0 application audio, merged
+static bool audio_inputs_should_use_amix(const std::vector<AudioInput> &audio_inputs) {
+ int num_audio_devices = 0;
+ int num_app_audio = 0;
+
+ for(const auto &audio_input : audio_inputs) {
+ if(audio_input.type == AudioInputType::DEVICE)
+ ++num_audio_devices;
+ else if(audio_input.type == AudioInputType::APPLICATION)
+ ++num_app_audio;
+ }
+
+ return num_audio_devices > 1 && num_app_audio == 0;
+}
+
+static bool merged_audio_inputs_should_use_amix(const std::vector<MergedAudioInputs> &merged_audio_inputs) {
+ for(const auto &merged_audio_input : merged_audio_inputs) {
+ if(audio_inputs_should_use_amix(merged_audio_input.audio_inputs))
+ return true;
+ }
+ return false;
+}
+
+static void validate_merged_audio_inputs_app_audio(const std::vector<MergedAudioInputs> &merged_audio_inputs, const std::vector<std::string> &app_audio_names) {
+ for(const auto &merged_audio_input : merged_audio_inputs) {
+ int num_app_audio = 0;
+ int num_app_inverted_audio = 0;
+
+ for(const auto &audio_input : merged_audio_input.audio_inputs) {
+ if(audio_input.type == AudioInputType::APPLICATION) {
+ if(audio_input.inverted)
+ ++num_app_inverted_audio;
+ else
+ ++num_app_audio;
+ }
+ }
+
+ match_app_audio_input_to_available_apps(merged_audio_input.audio_inputs, app_audio_names);
+
+ if(num_app_audio > 0 && num_app_inverted_audio > 0) {
+ fprintf(stderr, "gsr error: argument -a was provided with both app: and app-inverse:, only one of them can be used for one audio track\n");
+ _exit(2);
+ }
+ }
+}
+
+static AudioCodec select_audio_codec_with_fallback(AudioCodec audio_codec, const std::string &file_extension, bool uses_amix) {
switch(audio_codec) {
case AudioCodec::AAC: {
if(file_extension == "webm") {
@@ -2834,13 +2913,29 @@ static AudioDevice create_application_audio_audio_input(const MergedAudioInputs
_exit(1);
}
+ std::vector<const char*> audio_sources;
+ for(const auto &audio_input : merged_audio_inputs.audio_inputs) {
+ if(audio_input.type == AudioInputType::DEVICE)
+ audio_sources.push_back(audio_input.name.c_str());
+ }
+
+ bool app_audio_inverted = false;
std::vector<const char*> app_names;
- app_names.reserve(merged_audio_inputs.audio_inputs.size());
for(const auto &audio_input : merged_audio_inputs.audio_inputs) {
- app_names.push_back(audio_input.name.c_str());
+ if(audio_input.type == AudioInputType::APPLICATION) {
+ app_names.push_back(audio_input.name.c_str());
+ app_audio_inverted = audio_input.inverted;
+ }
+ }
+
+ if(!audio_sources.empty()) {
+ if(!gsr_pipewire_audio_add_link_from_sources_to_sink(pipewire_audio, audio_sources.data(), audio_sources.size(), audio_device.combined_sink_name.c_str())) {
+ fprintf(stderr, "gsr error: failed to add application audio link\n");
+ _exit(1);
+ }
}
- if(merged_audio_inputs.inverted) {
+ if(app_audio_inverted) {
if(!gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(pipewire_audio, app_names.data(), app_names.size(), audio_device.combined_sink_name.c_str())) {
fprintf(stderr, "gsr error: failed to add application audio link\n");
_exit(1);
@@ -2924,8 +3019,8 @@ int main(int argc, char **argv) {
{ "-s", Arg { {}, true, false } },
{ "-a", Arg { {}, true, true } },
#ifdef GSR_APP_AUDIO
- { "-aa", Arg { {}, true, true } },
- { "-aai", Arg { {}, true, true } },
+ { "-aa", Arg { {}, true, true } }, // TODO: Remove soon since this is deprecated. User should use -a with app: instead
+ { "-aai", Arg { {}, true, true } }, // TODO: Remove soon since this is deprecated. User should use -a with app-inverse: instead
#endif
{ "-q", Arg { {}, true, false } },
{ "-o", Arg { {}, true, false } },
@@ -3191,23 +3286,28 @@ int main(int argc, char **argv) {
AudioDevices audio_devices;
if(!audio_input_arg.values.empty())
audio_devices = get_pulseaudio_inputs();
+
+ if(!app_audio_input_arg.values.empty())
+ fprintf(stderr, "gsr warning: argument -aa is deprecated, use -a with app: prefix instead, for example: -a \"app:Brave\"\n");
- bool uses_app_audio = false;
- if(!app_audio_input_arg.values.empty() || !app_audio_input_inverted_arg.values.empty())
- uses_app_audio = true;
+ if(!app_audio_input_inverted_arg.values.empty())
+ fprintf(stderr, "gsr warning: argument -aai is deprecated, use -a with app-inverse: prefix instead, for example: -a \"app-inverse:Brave\"\n");
+ std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, app_audio_input_arg, app_audio_input_inverted_arg);
+
+ const bool uses_app_audio = merged_audio_inputs_has_app_audio(requested_audio_inputs);
std::vector<std::string> app_audio_names;
#ifdef GSR_APP_AUDIO
gsr_pipewire_audio pipewire_audio;
memset(&pipewire_audio, 0, sizeof(pipewire_audio));
if(uses_app_audio) {
if(!gsr_pipewire_audio_init(&pipewire_audio)) {
- fprintf(stderr, "gsr error: failed to setup PipeWire audio for application audio capture. The likely reason for this failure is that your sound server is not PipeWire. The options -aa and -aai are only available when running PipeWire audio server.\n");
+ fprintf(stderr, "gsr error: failed to setup PipeWire audio for application audio capture. The likely reason for this failure is that your sound server is not PipeWire. Application audio is only available when running PipeWire audio server.\n");
_exit(2);
}
if(!pulseaudio_server_is_pipewire()) {
- fprintf(stderr, "gsr error: your sound server is not PipeWire. The options -aa and -aai are only available when running PipeWire audio server.\n");
+ fprintf(stderr, "gsr error: your sound server is not PipeWire. Application audio is only available when running PipeWire audio server.\n");
_exit(2);
}
@@ -3219,8 +3319,7 @@ int main(int argc, char **argv) {
}
#endif
- bool uses_amix = false;
- std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, app_audio_input_arg, app_audio_input_inverted_arg, uses_amix, app_audio_names);
+ validate_merged_audio_inputs_app_audio(requested_audio_inputs, app_audio_names);
const char *container_format = args["-c"].value();
if(container_format && strcmp(container_format, "mkv") == 0)
@@ -3532,6 +3631,7 @@ int main(int argc, char **argv) {
video_codec = hdr_video_codec_to_sdr_video_codec(video_codec);
}
+ const bool uses_amix = merged_audio_inputs_should_use_amix(requested_audio_inputs);
audio_codec = select_audio_codec_with_fallback(audio_codec, file_extension, uses_amix);
bool low_power = false;
const AVCodec *video_codec_f = select_video_codec_with_fallback(&video_codec, video_codec_to_use, file_extension.c_str(), use_software_video_encoder, &egl, &low_power);
@@ -3620,7 +3720,7 @@ int main(int argc, char **argv) {
int audio_max_frame_size = 1024;
int audio_stream_index = VIDEO_STREAM_INDEX + 1;
for(const MergedAudioInputs &merged_audio_inputs : requested_audio_inputs) {
- const bool use_amix = merged_audio_inputs.audio_inputs.size() > 1;
+ const bool use_amix = audio_inputs_should_use_amix(merged_audio_inputs.audio_inputs);
AVCodecContext *audio_codec_context = create_audio_codec_context(fps, audio_codec, use_amix, audio_bitrate);
AVStream *audio_stream = nullptr;
@@ -3642,7 +3742,7 @@ int main(int argc, char **argv) {
std::vector<AVFilterContext*> src_filter_ctx;
AVFilterGraph *graph = nullptr;
AVFilterContext *sink = nullptr;
- if(use_amix && merged_audio_inputs.type == AudioInputType::DEVICE) {
+ if(use_amix) {
int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size());
if(err < 0) {
fprintf(stderr, "Error: failed to create audio filter\n");
@@ -3659,15 +3759,13 @@ int main(int argc, char **argv) {
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
std::vector<AudioDevice> audio_track_audio_devices;
- switch(merged_audio_inputs.type) {
- case AudioInputType::DEVICE:
- audio_track_audio_devices = create_device_audio_inputs(merged_audio_inputs.audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, src_filter_ctx, use_amix);
- break;
- case AudioInputType::APPLICATION:
+ if(audio_inputs_has_app_audio(merged_audio_inputs.audio_inputs)) {
+ assert(!use_amix);
#ifdef GSR_APP_AUDIO
- audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio));
+ audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio));
#endif
- break;
+ } else {
+ audio_track_audio_devices = create_device_audio_inputs(merged_audio_inputs.audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, src_filter_ctx, use_amix);
}
AudioTrack audio_track;
diff --git a/src/pipewire_audio.c b/src/pipewire_audio.c
index 4f9f05f..4af41e4 100644
--- a/src/pipewire_audio.c
+++ b/src/pipewire_audio.c
@@ -44,44 +44,44 @@ static gsr_pipewire_audio_port* gsr_pipewire_audio_get_node_port_by_name(gsr_pip
}
static bool requested_link_matches_name_case_insensitive(const gsr_pipewire_audio_requested_link *requested_link, const char *name) {
- for(int i = 0; i < requested_link->num_app_names; ++i) {
- if(strcasecmp(requested_link->app_names[i], name) == 0)
+ for(int i = 0; i < requested_link->num_output_names; ++i) {
+ if(strcasecmp(requested_link->output_names[i], name) == 0)
return true;
}
return false;
}
static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link) {
- const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->output_type == GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
- const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->output_name, requested_link_node_type);
+ const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->input_type == GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
+ const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->input_name, requested_link_node_type);
if(!stream_input_node)
return;
- const gsr_pipewire_audio_port *stream_input_fl_port = NULL;
- const gsr_pipewire_audio_port *stream_input_fr_port = NULL;
+ const gsr_pipewire_audio_port *input_fl_port = NULL;
+ const gsr_pipewire_audio_port *input_fr_port = NULL;
- switch(requested_link->output_type) {
- case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM: {
- stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FL");
- stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FR");
+ switch(requested_link->input_type) {
+ case GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM: {
+ input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FL");
+ input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FR");
break;
}
- case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK: {
- stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FL");
- stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FR");
+ case GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK: {
+ input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FL");
+ input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FR");
break;
}
}
- if(!stream_input_fl_port || !stream_input_fr_port)
+ if(!input_fl_port || !input_fr_port)
return;
for(int i = 0; i < self->num_stream_nodes; ++i) {
- const gsr_pipewire_audio_node *app_node = &self->stream_nodes[i];
- if(app_node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
+ const gsr_pipewire_audio_node *output_node = &self->stream_nodes[i];
+ if(output_node->type != requested_link->output_type)
continue;
- const bool requested_link_matches_app = requested_link_matches_name_case_insensitive(requested_link, app_node->name);
+ const bool requested_link_matches_app = requested_link_matches_name_case_insensitive(requested_link, output_node->name);
if(requested_link->inverted) {
if(requested_link_matches_app)
continue;
@@ -90,9 +90,30 @@ static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_p
continue;
}
- const gsr_pipewire_audio_port *app_output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FL");
- const gsr_pipewire_audio_port *app_output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FR");
- if(!app_output_fl_port || !app_output_fr_port)
+ const gsr_pipewire_audio_port *output_fl_port = NULL;
+ const gsr_pipewire_audio_port *output_fr_port = NULL;
+
+ switch(requested_link->output_type) {
+ case GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT:
+ output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "output_FL");
+ output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "output_FR");
+ break;
+ case GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT:
+ output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "monitor_FL");
+ output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "monitor_FR");
+ break;
+ case GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE: {
+ output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "monitor_FL");
+ output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "monitor_FR");
+ if(!output_fl_port || !output_fr_port) {
+ output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "capture_FL");
+ output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, output_node->id, "capture_FR");
+ }
+ break;
+ }
+ }
+
+ if(!output_fl_port || !output_fr_port)
continue;
// TODO: Detect if link already exists before so we dont create these proxies when not needed
@@ -101,8 +122,8 @@ static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_p
// TODO: error check and cleanup
{
struct pw_properties *props = pw_properties_new(NULL, NULL);
- pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fl_port->id);
- pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fl_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", output_fl_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", input_fl_port->id);
// TODO: Clean this up when removing node
struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
//self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
@@ -111,8 +132,8 @@ static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_p
{
struct pw_properties *props = pw_properties_new(NULL, NULL);
- pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fr_port->id);
- pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fr_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", output_fr_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", input_fr_port->id);
// TODO: Clean this up when removing node
struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
//self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
@@ -145,7 +166,8 @@ static void registry_event_global(void *data, uint32_t id, uint32_t permissions,
const bool is_stream_output = media_class && strcmp(media_class, "Stream/Output/Audio") == 0;
const bool is_stream_input = media_class && strcmp(media_class, "Stream/Input/Audio") == 0;
const bool is_sink = media_class && strcmp(media_class, "Audio/Sink") == 0;
- if(self->num_stream_nodes < GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES && node_name && (is_stream_output || is_stream_input || is_sink)) {
+ const bool is_source = media_class && strcmp(media_class, "Audio/Source") == 0;
+ if(self->num_stream_nodes < GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES && node_name && (is_stream_output || is_stream_input || is_sink || is_source)) {
//const char *application_binary = spa_dict_lookup(props, PW_KEY_APP_PROCESS_BINARY);
//const char *application_name = spa_dict_lookup(props, PW_KEY_APP_NAME);
//fprintf(stderr, " node name: %s, app binary: %s, app name: %s\n", node_name, application_binary, application_name);
@@ -158,8 +180,8 @@ static void registry_event_global(void *data, uint32_t id, uint32_t permissions,
self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT;
else if(is_stream_input)
self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT;
- else if(is_sink)
- self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
+ else if(is_sink || is_source)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE;
++self->num_stream_nodes;
gsr_pipewire_audio_create_links(self);
@@ -323,11 +345,11 @@ void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self) {
self->num_ports = 0;
for(int i = 0; i < self->num_requested_links; ++i) {
- for(int j = 0; j < self->requested_links[i].num_app_names; ++j) {
- free(self->requested_links[i].app_names[j]);
+ for(int j = 0; j < self->requested_links[i].num_output_names; ++j) {
+ free(self->requested_links[i].output_names[j]);
}
- free(self->requested_links[i].app_names);
- free(self->requested_links[i].output_name);
+ free(self->requested_links[i].output_names);
+ free(self->requested_links[i].input_name);
}
self->num_requested_links = 0;
@@ -336,29 +358,44 @@ void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self) {
#endif
}
-static bool gsr_pipewire_audio_add_link_from_apps_to_output(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *output_name, gsr_pipewire_audio_link_output_type output_type, bool inverted) {
+static bool string_remove_suffix(char *str, const char *suffix) {
+ int str_len = strlen(str);
+ int suffix_len = strlen(suffix);
+ if(str_len >= suffix_len && memcmp(str + str_len - suffix_len, suffix, suffix_len) == 0) {
+ str[str_len - suffix_len] = '\0';
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool gsr_pipewire_audio_add_link_from_apps_to_output(gsr_pipewire_audio *self, const char **output_names, int num_output_names, const char *input_name, gsr_pipewire_audio_node_type output_type, gsr_pipewire_audio_link_input_type input_type, bool inverted) {
if(self->num_requested_links >= GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS)
return false;
- char **app_names_output_copy = calloc(num_app_names_output, sizeof(char*));
- if(!app_names_output_copy)
+ char **output_names_copy = calloc(num_output_names, sizeof(char*));
+ if(!output_names_copy)
return false;
- char *output_name_copy = strdup(output_name);
- if(!output_name_copy)
+ char *input_name_copy = strdup(input_name);
+ if(!input_name_copy)
goto error;
- for(int i = 0; i < num_app_names_output; ++i) {
- app_names_output_copy[i] = strdup(app_names_output[i]);
- if(!app_names_output_copy[i])
+ for(int i = 0; i < num_output_names; ++i) {
+ output_names_copy[i] = strdup(output_names[i]);
+ if(!output_names_copy[i])
goto error;
+
+ if(output_type == GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE)
+ string_remove_suffix(output_names_copy[i], ".monitor");
}
pw_thread_loop_lock(self->thread_loop);
- self->requested_links[self->num_requested_links].app_names = app_names_output_copy;
- self->requested_links[self->num_requested_links].num_app_names = num_app_names_output;
- self->requested_links[self->num_requested_links].output_name = output_name_copy;
+ self->requested_links[self->num_requested_links].output_names = output_names_copy;
+ self->requested_links[self->num_requested_links].num_output_names = num_output_names;
+ self->requested_links[self->num_requested_links].input_name = input_name_copy;
self->requested_links[self->num_requested_links].output_type = output_type;
+ self->requested_links[self->num_requested_links].input_type = input_type;
self->requested_links[self->num_requested_links].inverted = inverted;
++self->num_requested_links;
gsr_pipewire_audio_create_link(self, &self->requested_links[self->num_requested_links - 1]);
@@ -367,28 +404,32 @@ static bool gsr_pipewire_audio_add_link_from_apps_to_output(gsr_pipewire_audio *
return true;
error:
- free(output_name_copy);
- for(int i = 0; i < num_app_names_output; ++i) {
- free(app_names_output_copy[i]);
+ free(input_name_copy);
+ for(int i = 0; i < num_output_names; ++i) {
+ free(output_names_copy[i]);
}
- free(app_names_output_copy);
+ free(output_names_copy);
return false;
}
-bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
- return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, false);
+bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names, num_app_names, stream_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, false);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names, num_app_names, stream_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_STREAM, true);
}
-bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
- return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, true);
+bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names, num_app_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, false);
}
-bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
- return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, false);
+bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names, int num_app_names, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names, num_app_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, true);
}
-bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
- return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, true);
+bool gsr_pipewire_audio_add_link_from_sources_to_sink(gsr_pipewire_audio *self, const char **source_names, int num_source_names, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, source_names, num_source_names, sink_name_input, GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK_OR_SOURCE, GSR_PIPEWIRE_AUDIO_LINK_INPUT_TYPE_SINK, false);
}
void gsr_pipewire_audio_for_each_app(gsr_pipewire_audio *self, gsr_pipewire_audio_app_query_callback callback, void *userdata) {