aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/capture/capture.c4
-rw-r--r--src/capture/kms.c12
-rw-r--r--src/capture/nvfbc.c8
-rw-r--r--src/capture/portal.c10
-rw-r--r--src/capture/xcomposite.c10
-rw-r--r--src/color_conversion.c22
-rw-r--r--src/cursor.c10
-rw-r--r--src/dbus.c19
-rw-r--r--src/main.cpp352
-rw-r--r--src/pipewire_audio.c406
-rw-r--r--src/pipewire_video.c2
-rw-r--r--src/sound.cpp108
-rw-r--r--src/utils.c20
13 files changed, 841 insertions, 142 deletions
diff --git a/src/capture/capture.c b/src/capture/capture.c
index ec10854..2a4a689 100644
--- a/src/capture/capture.c
+++ b/src/capture/capture.c
@@ -34,10 +34,6 @@ int gsr_capture_capture(gsr_capture *cap, AVFrame *frame, gsr_color_conversion *
return cap->capture(cap, frame, color_conversion);
}
-gsr_source_color gsr_capture_get_source_color(gsr_capture *cap) {
- return cap->get_source_color(cap);
-}
-
bool gsr_capture_uses_external_image(gsr_capture *cap) {
if(cap->uses_external_image)
return cap->uses_external_image(cap);
diff --git a/src/capture/kms.c b/src/capture/kms.c
index 6f021b9..c85811e 100644
--- a/src/capture/kms.c
+++ b/src/capture/kms.c
@@ -507,7 +507,7 @@ static void render_drm_cursor(gsr_capture_kms *self, gsr_color_conversion *color
gsr_color_conversion_draw(color_conversion, self->cursor_texture_id,
cursor_pos, (vec2i){cursor_size.x * scale.x, cursor_size.y * scale.y},
(vec2i){0, 0}, cursor_size,
- texture_rotation, cursor_texture_id_is_external);
+ texture_rotation, cursor_texture_id_is_external, GSR_SOURCE_COLOR_RGB);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
@@ -534,7 +534,7 @@ static void render_x11_cursor(gsr_capture_kms *self, gsr_color_conversion *color
gsr_color_conversion_draw(color_conversion, self->x11_cursor.texture_id,
cursor_pos, (vec2i){self->x11_cursor.size.x * scale.x, self->x11_cursor.size.y * scale.y},
(vec2i){0, 0}, self->x11_cursor.size,
- 0.0f, false);
+ 0.0f, false, GSR_SOURCE_COLOR_RGB);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
@@ -629,7 +629,7 @@ static int gsr_capture_kms_capture(gsr_capture *cap, AVFrame *frame, gsr_color_c
gsr_color_conversion_draw(color_conversion, self->external_texture_fallback ? self->external_input_texture_id : self->input_texture_id,
target_pos, output_size,
capture_pos, self->capture_size,
- texture_rotation, self->external_texture_fallback);
+ texture_rotation, self->external_texture_fallback, GSR_SOURCE_COLOR_RGB);
}
if(self->params.record_cursor) {
@@ -661,11 +661,6 @@ static bool gsr_capture_kms_should_stop(gsr_capture *cap, bool *err) {
return false;
}
-static gsr_source_color gsr_capture_kms_get_source_color(gsr_capture *cap) {
- (void)cap;
- return GSR_SOURCE_COLOR_RGB;
-}
-
static bool gsr_capture_kms_uses_external_image(gsr_capture *cap) {
(void)cap;
return true;
@@ -752,7 +747,6 @@ gsr_capture* gsr_capture_kms_create(const gsr_capture_kms_params *params) {
//.tick = gsr_capture_kms_tick,
.should_stop = gsr_capture_kms_should_stop,
.capture = gsr_capture_kms_capture,
- .get_source_color = gsr_capture_kms_get_source_color,
.uses_external_image = gsr_capture_kms_uses_external_image,
.set_hdr_metadata = gsr_capture_kms_set_hdr_metadata,
//.is_damaged = gsr_capture_kms_is_damaged,
diff --git a/src/capture/nvfbc.c b/src/capture/nvfbc.c
index 96f3894..d5a270e 100644
--- a/src/capture/nvfbc.c
+++ b/src/capture/nvfbc.c
@@ -434,7 +434,7 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
gsr_color_conversion_draw(color_conversion, self->setup_params.dwTextures[grab_params.dwTextureIndex],
target_pos, (vec2i){output_size.x, output_size.y},
(vec2i){0, 0}, frame_size,
- 0.0f, false);
+ 0.0f, false, GSR_SOURCE_COLOR_BGR);
self->params.egl->glFlush();
self->params.egl->glFinish();
@@ -442,11 +442,6 @@ static int gsr_capture_nvfbc_capture(gsr_capture *cap, AVFrame *frame, gsr_color
return 0;
}
-static gsr_source_color gsr_capture_nvfbc_get_source_color(gsr_capture *cap) {
- (void)cap;
- return GSR_SOURCE_COLOR_BGR;
-}
-
static void gsr_capture_nvfbc_destroy(gsr_capture *cap, AVCodecContext *video_codec_context) {
(void)video_codec_context;
gsr_capture_nvfbc *self = cap->priv;
@@ -492,7 +487,6 @@ gsr_capture* gsr_capture_nvfbc_create(const gsr_capture_nvfbc_params *params) {
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_nvfbc_capture,
- .get_source_color = gsr_capture_nvfbc_get_source_color,
.uses_external_image = NULL,
.destroy = gsr_capture_nvfbc_destroy,
.priv = cap_nvfbc
diff --git a/src/capture/portal.c b/src/capture/portal.c
index a42bac0..d68e86f 100644
--- a/src/capture/portal.c
+++ b/src/capture/portal.c
@@ -377,7 +377,7 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
gsr_color_conversion_draw(color_conversion, using_external_image ? self->texture_map.external_texture_id : self->texture_map.texture_id,
target_pos, output_size,
(vec2i){region.x, region.y}, self->capture_size,
- 0.0f, using_external_image);
+ 0.0f, using_external_image, GSR_SOURCE_COLOR_RGB);
}
if(self->params.record_cursor && self->texture_map.cursor_texture_id > 0 && cursor_region.width > 0) {
@@ -396,7 +396,7 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
gsr_color_conversion_draw(color_conversion, self->texture_map.cursor_texture_id,
(vec2i){cursor_pos.x, cursor_pos.y}, (vec2i){cursor_region.width * scale.x, cursor_region.height * scale.y},
(vec2i){0, 0}, (vec2i){cursor_region.width, cursor_region.height},
- 0.0f, false);
+ 0.0f, false, GSR_SOURCE_COLOR_RGB);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
@@ -408,11 +408,6 @@ static int gsr_capture_portal_capture(gsr_capture *cap, AVFrame *frame, gsr_colo
return 0;
}
-static gsr_source_color gsr_capture_portal_get_source_color(gsr_capture *cap) {
- (void)cap;
- return GSR_SOURCE_COLOR_RGB;
-}
-
static bool gsr_capture_portal_uses_external_image(gsr_capture *cap) {
(void)cap;
return true;
@@ -462,7 +457,6 @@ gsr_capture* gsr_capture_portal_create(const gsr_capture_portal_params *params)
.tick = NULL,
.should_stop = NULL,
.capture = gsr_capture_portal_capture,
- .get_source_color = gsr_capture_portal_get_source_color,
.uses_external_image = gsr_capture_portal_uses_external_image,
.is_damaged = gsr_capture_portal_is_damaged,
.clear_damage = gsr_capture_portal_clear_damage,
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
index 6a3be16..2867b45 100644
--- a/src/capture/xcomposite.c
+++ b/src/capture/xcomposite.c
@@ -281,7 +281,7 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
gsr_color_conversion_draw(color_conversion, window_texture_get_opengl_texture_id(&self->window_texture),
target_pos, output_size,
(vec2i){0, 0}, self->texture_size,
- 0.0f, false);
+ 0.0f, false, GSR_SOURCE_COLOR_RGB);
}
if(self->params.record_cursor && self->cursor.visible) {
@@ -303,7 +303,7 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
gsr_color_conversion_draw(color_conversion, self->cursor.texture_id,
cursor_pos, (vec2i){self->cursor.size.x * scale.x, self->cursor.size.y * scale.y},
(vec2i){0, 0}, self->cursor.size,
- 0.0f, false);
+ 0.0f, false, GSR_SOURCE_COLOR_RGB);
self->params.egl->glDisable(GL_SCISSOR_TEST);
}
@@ -314,11 +314,6 @@ static int gsr_capture_xcomposite_capture(gsr_capture *cap, AVFrame *frame, gsr_
return 0;
}
-static gsr_source_color gsr_capture_xcomposite_get_source_color(gsr_capture *cap) {
- (void)cap;
- return GSR_SOURCE_COLOR_RGB;
-}
-
static uint64_t gsr_capture_xcomposite_get_window_id(gsr_capture *cap) {
gsr_capture_xcomposite *self = cap->priv;
return self->window;
@@ -358,7 +353,6 @@ gsr_capture* gsr_capture_xcomposite_create(const gsr_capture_xcomposite_params *
.tick = gsr_capture_xcomposite_tick,
.should_stop = gsr_capture_xcomposite_should_stop,
.capture = gsr_capture_xcomposite_capture,
- .get_source_color = gsr_capture_xcomposite_get_source_color,
.uses_external_image = NULL,
.get_window_id = gsr_capture_xcomposite_get_window_id,
.destroy = gsr_capture_xcomposite_destroy,
diff --git a/src/color_conversion.c b/src/color_conversion.c
index 3962dd4..65f3775 100644
--- a/src/color_conversion.c
+++ b/src/color_conversion.c
@@ -28,26 +28,26 @@ static float abs_f(float v) {
#define RGB_TO_P010_FULL "const mat4 RGBtoYUV = mat4(0.262700, -0.139630, 0.500000, 0.000000,\n" \
" 0.678000, -0.360370, -0.459786, 0.000000,\n" \
" 0.059300, 0.500000, -0.040214, 0.000000,\n" \
- " 0.000000, 0.500000, 0.500000, 1.000000);"
+ " 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT2020, limited (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_P010_LIMITED "const mat4 RGBtoYUV = mat4(0.225613, -0.119918, 0.429412, 0.000000,\n" \
" 0.582282, -0.309494, -0.394875, 0.000000,\n" \
" 0.050928, 0.429412, -0.034537, 0.000000,\n" \
- " 0.062745, 0.500000, 0.500000, 1.000000);"
+ " 0.062745, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, full, custom values: 0.2110 0.7110 0.0710 */
/* https://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.709-6-201506-I!!PDF-E.pdf */
#define RGB_TO_NV12_FULL "const mat4 RGBtoYUV = mat4(0.211000, -0.113563, 0.500000, 0.000000,\n" \
" 0.711000, -0.382670, -0.450570, 0.000000,\n" \
" 0.071000, 0.500000, -0.044994, 0.000000,\n" \
- " 0.000000, 0.500000, 0.500000, 1.000000);"
+ " 0.000000, 0.500000, 0.500000, 1.000000);\n"
/* ITU-R BT709, limited, custom values: 0.2100 0.7100 0.0700 (full multiplied by (235-16)/255, adding 16/255 to luma) */
#define RGB_TO_NV12_LIMITED "const mat4 RGBtoYUV = mat4(0.180353, -0.096964, 0.429412, 0.000000,\n" \
" 0.609765, -0.327830, -0.385927, 0.000000,\n" \
" 0.060118, 0.429412, -0.038049, 0.000000,\n" \
- " 0.062745, 0.500000, 0.500000, 1.000000);"
+ " 0.062745, 0.500000, 0.500000, 1.000000);\n"
static const char* color_format_range_get_transform_matrix(gsr_destination_color color_format, gsr_color_range color_range) {
switch(color_format) {
@@ -325,22 +325,22 @@ void gsr_color_conversion_deinit(gsr_color_conversion *self) {
self->params.egl = NULL;
}
-static void gsr_color_conversion_swizzle_texture_source(gsr_color_conversion *self) {
- if(self->params.source_color == GSR_SOURCE_COLOR_BGR) {
+static void gsr_color_conversion_swizzle_texture_source(gsr_color_conversion *self, gsr_source_color source_color) {
+ if(source_color == GSR_SOURCE_COLOR_BGR) {
const int swizzle_mask[] = { GL_BLUE, GL_GREEN, GL_RED, 1 };
self->params.egl->glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzle_mask);
}
}
-static void gsr_color_conversion_swizzle_reset(gsr_color_conversion *self) {
- if(self->params.source_color == GSR_SOURCE_COLOR_BGR) {
+static void gsr_color_conversion_swizzle_reset(gsr_color_conversion *self, gsr_source_color source_color) {
+ if(source_color == GSR_SOURCE_COLOR_BGR) {
const int swizzle_mask[] = { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA };
self->params.egl->glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_RGBA, swizzle_mask);
}
}
/* |source_pos| is in pixel coordinates and |source_size| */
-void gsr_color_conversion_draw(gsr_color_conversion *self, unsigned int texture_id, vec2i source_pos, vec2i source_size, vec2i texture_pos, vec2i texture_size, float rotation, bool external_texture) {
+void gsr_color_conversion_draw(gsr_color_conversion *self, unsigned int texture_id, vec2i source_pos, vec2i source_size, vec2i texture_pos, vec2i texture_size, float rotation, bool external_texture, gsr_source_color source_color) {
// TODO: Remove this crap
rotation = M_PI*2.0f - rotation;
@@ -402,7 +402,7 @@ void gsr_color_conversion_draw(gsr_color_conversion *self, unsigned int texture_
-1.0f + 0.0f + size_norm.x, -1.0f + 0.0f + size_norm.y, texture_pos_norm.x + texture_size_norm.x, texture_pos_norm.y + texture_size_norm.y
};
- gsr_color_conversion_swizzle_texture_source(self);
+ gsr_color_conversion_swizzle_texture_source(self, source_color);
self->params.egl->glBindVertexArray(self->vertex_array_object_id);
self->params.egl->glViewport(0, 0, dest_texture_size.x, dest_texture_size.y);
@@ -438,7 +438,7 @@ void gsr_color_conversion_draw(gsr_color_conversion *self, unsigned int texture_
self->params.egl->glBindTexture(texture_target, 0);
self->params.egl->glBindFramebuffer(GL_FRAMEBUFFER, 0);
- gsr_color_conversion_swizzle_reset(self);
+ gsr_color_conversion_swizzle_reset(self, source_color);
}
void gsr_color_conversion_clear(gsr_color_conversion *self) {
diff --git a/src/cursor.c b/src/cursor.c
index 3dca0c6..56b9694 100644
--- a/src/cursor.c
+++ b/src/cursor.c
@@ -43,10 +43,12 @@ static bool gsr_cursor_set_from_x11_cursor_image(gsr_cursor *self, XFixesCursorI
*visible = true;
}
- *out++ = (unsigned)*in++ * 255/alpha;
- *out++ = (unsigned)*in++ * 255/alpha;
- *out++ = (unsigned)*in++ * 255/alpha;
- *out++ = *in++;
+ out[0] = (float)in[2] * 255.0/(float)alpha;
+ out[1] = (float)in[1] * 255.0/(float)alpha;
+ out[2] = (float)in[0] * 255.0/(float)alpha;
+ out[3] = in[3];
+ out += 4;
+ in += 4;
}
}
diff --git a/src/dbus.c b/src/dbus.c
index 5757b8b..2087c35 100644
--- a/src/dbus.c
+++ b/src/dbus.c
@@ -1,11 +1,11 @@
#include "../include/dbus.h"
+#include "../include/utils.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
-#include <sys/random.h>
/* TODO: Make non-blocking when GPU Screen Recorder is turned into a library */
/* TODO: Make sure responses matches the requests */
@@ -37,27 +37,12 @@ static const char* dict_value_type_to_string(dict_value_type type) {
return "(unknown)";
}
-static bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
- /* TODO: Use other functions on other platforms than linux */
- if(getrandom(buffer, buffer_size, 0) < buffer_size) {
- fprintf(stderr, "gsr error: generate_random_characters: failed to get random bytes, error: %s\n", strerror(errno));
- return false;
- }
-
- for(int i = 0; i < buffer_size; ++i) {
- unsigned char c = *(unsigned char*)&buffer[i];
- buffer[i] = alphabet[c % alphabet_size];
- }
-
- return true;
-}
-
bool gsr_dbus_init(gsr_dbus *self, const char *screencast_restore_token) {
memset(self, 0, sizeof(*self));
dbus_error_init(&self->err);
self->random_str[DBUS_RANDOM_STR_SIZE] = '\0';
- if(!generate_random_characters(self->random_str, DBUS_RANDOM_STR_SIZE, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62)) {
+ if(!generate_random_characters_standard_alphabet(self->random_str, DBUS_RANDOM_STR_SIZE)) {
fprintf(stderr, "gsr error: gsr_dbus_init: failed to generate random string\n");
return false;
}
diff --git a/src/main.cpp b/src/main.cpp
index 35b5119..f987f0e 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -6,6 +6,9 @@ extern "C" {
#include "../include/capture/portal.h"
#include "../include/dbus.h"
#endif
+#ifdef GSR_APP_AUDIO
+#include "../include/pipewire_audio.h"
+#endif
#include "../include/encoder/video/nvenc.h"
#include "../include/encoder/video/vaapi.h"
#include "../include/encoder/video/vulkan.h"
@@ -1064,7 +1067,12 @@ static void open_video_hardware(AVCodecContext *codec_context, VideoQuality vide
static void usage_header() {
const bool inside_flatpak = getenv("FLATPAK_ID") != NULL;
const char *program_name = inside_flatpak ? "flatpak run --command=gpu-screen-recorder com.dec05eba.gpu_screen_recorder" : "gpu-screen-recorder";
- fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>] [-aa <application_name>] [-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr|cbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name);
+#ifdef GSR_APP_AUDIO
+ const char *app_audio_options = " [-aa <application_name>] [-aai <application_name>] ";
+#else
+ const char *app_audio_options = "";
+#endif
+ fprintf(stderr, "usage: %s -w <window_id|monitor|focused|portal> [-c <container_format>] [-s WxH] -f <fps> [-a <audio_input>]%s[-q <quality>] [-r <replay_buffer_size_sec>] [-k h264|hevc|av1|vp8|vp9|hevc_hdr|av1_hdr|hevc_10bit|av1_10bit] [-ac aac|opus|flac] [-ab <bitrate>] [-oc yes|no] [-fm cfr|vfr|content] [-bm auto|qp|vbr|cbr] [-cr limited|full] [-df yes|no] [-sc <script_path>] [-cursor yes|no] [-keyint <value>] [-restore-portal-session yes|no] [-portal-session-token-filepath filepath] [-encoder gpu|cpu] [-o <output_file>] [-v yes|no] [--version] [-h|--help]\n", program_name, app_audio_options);
}
// TODO: Update with portal info
@@ -1075,12 +1083,15 @@ static void usage_full() {
fprintf(stderr, "\n");
fprintf(stderr, "OPTIONS:\n");
fprintf(stderr, " -w Window id to record, a display (monitor name), \"screen\", \"screen-direct-force\", \"focused\" or \"portal\".\n");
- fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with pipewire will be used. Portal option is only available on Wayland.\n");
+ fprintf(stderr, " If this is \"portal\" then xdg desktop screencast portal with PipeWire will be used. Portal option is only available on Wayland.\n");
fprintf(stderr, " If you select to save the session (token) in the desktop portal capture popup then the session will be saved for the next time you use \"portal\",\n");
fprintf(stderr, " but the session will be ignored unless you run GPU Screen Recorder with the '-restore-portal-session yes' option.\n");
- fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded on Nvidia X11. On AMD/Intel or wayland \"screen\" will record the first monitor found.\n");
- fprintf(stderr, " \"screen-direct-force\" is not recommended unless you use a VRR (G-SYNC) monitor on Nvidia X11 and you are aware that using this option can cause games to freeze/crash or other issues because of Nvidia driver issues.\n");
+ fprintf(stderr, " If this is \"screen\" or \"screen-direct-force\" then all monitors are recorded on Nvidia X11.\n");
+ fprintf(stderr, " On AMD/Intel or wayland \"screen\" will record the first monitor found.\n");
+ fprintf(stderr, " \"screen-direct-force\" is not recommended unless you use a VRR (G-SYNC) monitor on Nvidia X11 and you are aware that using this option can cause\n");
+ fprintf(stderr, " games to freeze/crash or other issues because of Nvidia driver issues.\n");
fprintf(stderr, " \"screen-direct-force\" option is only available on Nvidia X11. VRR works without this option on other systems.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-capture-options option to list valid values for this option.\n");
fprintf(stderr, "\n");
fprintf(stderr, " -c Container format for output file, for example mp4, or flv. Only required if no output file is specified or if recording in replay buffer mode.\n");
fprintf(stderr, " If an output file is specified and -c is not used then the container format is determined from the output filename extension.\n");
@@ -1099,16 +1110,27 @@ static void usage_full() {
fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
- fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
+ fprintf(stderr, " If the audio device is an empty string then the argument is ignored.\n");
fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-audio-devices option to list valid audio devices to use with this -a option.\n");
fprintf(stderr, "\n");
#ifdef GSR_APP_AUDIO
- fprintf(stderr, " -aa Audio device to record from (pulse audio device). Can be specified multiple times. Each time this is specified a new audio track is added for the specified audio device.\n");
- fprintf(stderr, " A name can be given to the audio input device by prefixing the audio input with <name>/, for example \"dummy/alsa_output.pci-0000_00_1b.0.analog-stereo.monitor\".\n");
- fprintf(stderr, " Multiple audio devices can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"alsa_output1|alsa_output2\".\n");
- fprintf(stderr, " The audio device can also be \"default_output\" in which case the default output device is used, or \"default_input\" in which case the default input device is used.\n");
- fprintf(stderr, " If the audio device is an empty string then the audio device is ignored.\n");
- fprintf(stderr, " Optional, no audio track is added by default.\n");
+ fprintf(stderr, " -aa Application to record audio from (case-insensitive). Can be specified multiple times. Each time this is specified a new audio track is added for the specified application audio.\n");
+ fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
+ fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
+ fprintf(stderr, " Optional, no application audio is added by default.\n");
+ fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aa option.\n");
+ fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio from an application that hasn't started yet.\n");
+ fprintf(stderr, "\n");
+ fprintf(stderr, " -aai Record audio from all applications except the ones specified with this option (case-insensitive). Can be specified multiple times.\n");
+ fprintf(stderr, " Each time this is specified a new audio track is added that records all applications except the ones specified.\n");
+ fprintf(stderr, " Multiple application audio can be merged into one audio track by using \"|\" as a separator into one -a argument, for example: -a \"firefox|csgo\".\n");
+ fprintf(stderr, " If the application name is an empty string then the argument is ignored.\n");
+ fprintf(stderr, " Optional, no application audio is added by default.\n");
+ fprintf(stderr, " Note: this option is only available when the sound server on the system is PipeWire.\n");
+ fprintf(stderr, " Run GPU Screen Recorder with the --list-application-audio option to list valid application names to use with this -aai option.\n");
+ fprintf(stderr, " It's possible to use an application name that is not listed in --list-application-audio, for example when trying to record audio and the target application hasn't started yet.\n");
fprintf(stderr, "\n");
#endif
fprintf(stderr, " -q Video quality. Should be either 'medium', 'high', 'very_high' or 'ultra' when using '-bm qp' or '-bm vbr' options, and '-bm qp' is the default option used.\n");
@@ -1181,18 +1203,36 @@ static void usage_full() {
fprintf(stderr, " Optional, set to 'gpu' by default.\n");
fprintf(stderr, "\n");
fprintf(stderr, " --info\n");
- fprintf(stderr, " List info about the system (for use by GPU Screen Recorder UI). Lists the following information (prints them to stdout and exits):\n");
+ fprintf(stderr, " List info about the system. Lists the following information (prints them to stdout and exits):\n");
fprintf(stderr, " Supported video codecs (h264, h264_software, hevc, hevc_hdr, hevc_10bit, av1, av1_hdr, av1_10bit, vp8, vp9 (if supported)).\n");
fprintf(stderr, " Supported capture options (window, focused, screen, monitors and portal, if supported by the system).\n");
fprintf(stderr, " If opengl initialization fails then the program exits with 22, if no usable drm device is found then it exits with 23. On success it exits with 0.\n");
fprintf(stderr, "\n");
+ fprintf(stderr, " --list-capture-options\n");
+ fprintf(stderr, " List available capture options. Lists capture options in the following format (prints them to stdout and exits):\n");
+ fprintf(stderr, " <option>\n");
+ fprintf(stderr, " <monitor_name>|<resolution>\n");
+ fprintf(stderr, " For example:\n");
+ fprintf(stderr, " window\n");
+ fprintf(stderr, " DP-1|1920x1080\n");
+ fprintf(stderr, " The <option> and <monitor_name> is the name that can be passed to GPU Screen Recorder with the -w option.\n");
+ fprintf(stderr, "\n");
fprintf(stderr, " --list-audio-devices\n");
- fprintf(stderr, " List audio devices (for use by GPU Screen Recorder UI). Lists audio devices in the following format (prints them to stdout and exits):\n");
+ fprintf(stderr, " List audio devices. Lists audio devices in the following format (prints them to stdout and exits):\n");
fprintf(stderr, " <audio_device_name>|<audio_device_name_in_human_readable_format>\n");
fprintf(stderr, " For example:\n");
fprintf(stderr, " bluez_input.88:C9:E8:66:A2:27|WH-1000XM4\n");
- fprintf(stderr, " The <audio_device_name> is the name to pass to GPU Screen Recorder in a -a option.\n");
+ fprintf(stderr, " alsa_output.pci-0000_0c_00.4.iec958-stereo|Monitor of Starship/Matisse HD Audio Controller Digital Stereo (IEC958)\n");
+ fprintf(stderr, " The <audio_device_name> is the name that can be passed to GPU Screen Recorder with the -a option.\n");
+ fprintf(stderr, "\n");
+#ifdef GSR_APP_AUDIO
+ fprintf(stderr, " --list-application-audio\n");
+ fprintf(stderr, " Lists application that you can record from (with the -aa or -aai option) (prints them to stdout and exits), for example:\n");
+ fprintf(stderr, " firefox\n");
+ fprintf(stderr, " csgo\n");
+ fprintf(stderr, " These names are the application audio names that can be passed to GPU Screen Recorder with the -aa option.\n");
fprintf(stderr, "\n");
+#endif
fprintf(stderr, " --version\n");
fprintf(stderr, " Print version (%s) and exit\n", GSR_VERSION);
fprintf(stderr, "\n");
@@ -1219,6 +1259,10 @@ static void usage_full() {
fprintf(stderr, " %s -w screen -f 60 -a default_output -c mkv -sc script.sh -r 60 -o \"$HOME/Videos\"\n", program_name);
fprintf(stderr, " %s -w portal -f 60 -a default_output -restore-portal-session yes -o \"$HOME/Videos/video.mp4\"\n", program_name);
fprintf(stderr, " %s -w screen -f 60 -a default_output -bm cbr -q 15000 -o \"$HOME/Videos/video.mp4\"\n", program_name);
+#ifdef GSR_APP_AUDIO
+ fprintf(stderr, " %s -w screen -f 60 -aa \"firefox|csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+ fprintf(stderr, " %s -w screen -f 60 -aai \"firefox|csgo\" -o \"$HOME/Videos/video.mp4\"\n", program_name);
+#endif
//fprintf(stderr, " gpu-screen-recorder -w screen -f 60 -q ultra -pixfmt yuv444 -o video.mp4\n");
_exit(1);
}
@@ -1375,6 +1419,7 @@ struct AudioDevice {
AVFilterContext *src_filter_ctx = nullptr;
AVFrame *frame = nullptr;
std::thread thread; // TODO: Instead of having a thread for each track, have one thread for all threads and read the data with non-blocking read
+ std::string combined_sink_name;
};
// TODO: Cleanup
@@ -1624,6 +1669,18 @@ static std::vector<AudioInput> parse_audio_input_arg(const char *str, const Audi
return audio_inputs;
}
+static std::vector<AudioInput> parse_app_audio_input_arg(const char *str) {
+ std::vector<AudioInput> audio_inputs;
+ split_string(str, '|', [&](const char *sub, size_t size) {
+ AudioInput audio_input;
+ audio_input.name.assign(sub, size);
+ audio_input.description = audio_input.name;
+ audio_inputs.push_back(std::move(audio_input));
+ return true;
+ });
+ return audio_inputs;
+}
+
// TODO: Does this match all livestreaming cases?
static bool is_livestream_path(const char *str) {
const int len = strlen(str);
@@ -2091,6 +2148,74 @@ static void list_audio_devices_command() {
_exit(0);
}
+static bool app_audio_query_callback(const char *app_name, void*) {
+ puts(app_name);
+ return true;
+}
+
+static void list_application_audio_command() {
+#ifdef GSR_APP_AUDIO
+ gsr_pipewire_audio audio;
+ if(gsr_pipewire_audio_init(&audio)) {
+ gsr_pipewire_audio_for_each_app(&audio, app_audio_query_callback, NULL);
+ gsr_pipewire_audio_deinit(&audio);
+ }
+#endif
+
+ fflush(stdout);
+ _exit(0);
+}
+
+static void list_capture_options_command() {
+ bool wayland = false;
+ Display *dpy = XOpenDisplay(nullptr);
+ if (!dpy) {
+ wayland = true;
+ fprintf(stderr, "Warning: failed to connect to the X server. Assuming wayland is running without Xwayland\n");
+ }
+
+ XSetErrorHandler(x11_error_handler);
+ XSetIOErrorHandler(x11_io_error_handler);
+
+ if(!wayland)
+ wayland = is_xwayland(dpy);
+
+ if(!wayland && is_using_prime_run()) {
+ // Disable prime-run and similar options as it doesn't work, the monitor to capture has to be run on the same device.
+ // This is fine on wayland since nvidia uses drm interface there and the monitor query checks the monitors connected
+ // to the drm device.
+ fprintf(stderr, "Warning: use of prime-run on X11 is not supported. Disabling prime-run\n");
+ disable_prime_run();
+ }
+
+ gsr_egl egl;
+ if(!gsr_egl_load(&egl, dpy, wayland, false)) {
+ fprintf(stderr, "gsr error: failed to load opengl\n");
+ _exit(1);
+ }
+
+ egl.card_path[0] = '\0';
+ if(monitor_capture_use_drm(&egl, wayland)) {
+ // TODO: Allow specifying another card, and in other places
+ if(!gsr_get_valid_card_path(&egl, egl.card_path, false)) {
+ fprintf(stderr, "Error: no /dev/dri/cardX device found. Make sure that you have at least one monitor connected\n");
+ _exit(23);
+ }
+ }
+
+ av_log_set_level(AV_LOG_FATAL);
+ list_supported_capture_options(&egl, wayland);
+
+ fflush(stdout);
+
+ // Not needed as this will just slow down shutdown
+ //gsr_egl_unload(&egl);
+ //if(dpy)
+ // XCloseDisplay(dpy);
+
+ _exit(0);
+}
+
static gsr_capture* create_capture_impl(std::string &window_str, vec2i output_resolution, bool wayland, gsr_egl *egl, int fps, VideoCodec video_codec, gsr_color_range color_range,
bool record_cursor, bool use_software_video_encoder, bool restore_portal_session, const char *portal_session_token_filepath,
gsr_color_depth color_depth)
@@ -2273,10 +2398,30 @@ struct Arg {
}
};
+static void match_app_audio_input_to_available_apps(const std::vector<AudioInput> &requested_audio_inputs, const std::vector<std::string> &app_audio_names) {
+ for(const AudioInput &request_audio_input : requested_audio_inputs) {
+ bool match = false;
+ for(const std::string &app_name : app_audio_names) {
+ if(strcasecmp(app_name.c_str(), request_audio_input.name.c_str()) == 0) {
+ match = true;
+ break;
+ }
+ }
+
+ if(!match) {
+ fprintf(stderr, "gsr warning: no audio application with the name \"%s\" was found, expected one of the following:\n", request_audio_input.name.c_str());
+ for(const std::string &app_name : app_audio_names) {
+ fprintf(stderr, " * %s\n", app_name.c_str());
+ }
+ fprintf(stderr, " assuming this is intentional (if you are trying to record audio for applications that haven't started yet).\n");
+ }
+ }
+}
+
// Manually check if the audio inputs we give exist. This is only needed for pipewire, not pulseaudio.
// Pipewire instead DEFAULTS TO THE DEFAULT AUDIO INPUT. THAT'S RETARDED.
// OH, YOU MISSPELLED THE AUDIO INPUT? FUCK YOU
-static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, bool &uses_amix) {
+static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &audio_devices, const Arg &audio_input_arg, const Arg &app_audio_input_arg, const Arg &app_audio_input_inverted_arg, bool &uses_amix, const std::vector<std::string> &app_audio_names) {
std::vector<MergedAudioInputs> requested_audio_inputs;
uses_amix = false;
@@ -2284,7 +2429,7 @@ static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &aud
if(!audio_input || audio_input[0] == '\0')
continue;
- requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices)});
+ requested_audio_inputs.push_back({parse_audio_input_arg(audio_input, audio_devices), AudioInputType::DEVICE, false});
if(requested_audio_inputs.back().audio_inputs.size() > 1)
uses_amix = true;
@@ -2326,6 +2471,22 @@ static std::vector<MergedAudioInputs> parse_audio_inputs(const AudioDevices &aud
}
}
+ for(const char *app_audio_input : app_audio_input_arg.values) {
+ if(!app_audio_input || app_audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, false});
+ match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ }
+
+ for(const char *app_audio_input : app_audio_input_inverted_arg.values) {
+ if(!app_audio_input || app_audio_input[0] == '\0')
+ continue;
+
+ requested_audio_inputs.push_back({parse_app_audio_input_arg(app_audio_input), AudioInputType::APPLICATION, true});
+ match_app_audio_input_to_available_apps(requested_audio_inputs.back().audio_inputs, app_audio_names);
+ }
+
return requested_audio_inputs;
}
@@ -2614,6 +2775,77 @@ static const AVCodec* select_video_codec_with_fallback(VideoCodec *video_codec,
return pick_video_codec(video_codec, egl, use_software_video_encoder, video_codec_auto, video_codec_to_use, is_flv, low_power);
}
+static std::vector<AudioDevice> create_device_audio_inputs(const std::vector<AudioInput> &audio_inputs, AVCodecContext *audio_codec_context, int num_channels, double num_audio_frames_shift, std::vector<AVFilterContext*> &src_filter_ctx, bool use_amix) {
+ std::vector<AudioDevice> audio_track_audio_devices;
+ for(size_t i = 0; i < audio_inputs.size(); ++i) {
+ const auto &audio_input = audio_inputs[i];
+ AVFilterContext *src_ctx = nullptr;
+ if(use_amix)
+ src_ctx = src_filter_ctx[i];
+
+ AudioDevice audio_device;
+ audio_device.audio_input = audio_input;
+ audio_device.src_filter_ctx = src_ctx;
+
+ if(audio_input.name.empty()) {
+ audio_device.sound_device.handle = NULL;
+ audio_device.sound_device.frames = 0;
+ } else {
+ if(sound_device_get_by_name(&audio_device.sound_device, audio_input.name.c_str(), audio_input.description.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
+ fprintf(stderr, "Error: failed to get \"%s\" audio device\n", audio_input.name.c_str());
+ _exit(1);
+ }
+ }
+
+ audio_device.frame = create_audio_frame(audio_codec_context);
+ audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
+
+ audio_track_audio_devices.push_back(std::move(audio_device));
+ }
+ return audio_track_audio_devices;
+}
+
+#ifdef GSR_APP_AUDIO
+static AudioDevice create_application_audio_audio_input(const MergedAudioInputs &merged_audio_inputs, AVCodecContext *audio_codec_context, int num_channels, double num_audio_frames_shift, gsr_pipewire_audio *pipewire_audio) {
+ AudioDevice audio_device;
+ audio_device.frame = create_audio_frame(audio_codec_context);
+ audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
+
+ char random_str[8];
+ if(!generate_random_characters_standard_alphabet(random_str, sizeof(random_str))) {
+ fprintf(stderr, "gsr error: ailed to generate random string\n");
+ _exit(1);
+ }
+ audio_device.combined_sink_name = "gsr-combined-";
+ audio_device.combined_sink_name.append(random_str, sizeof(random_str));
+
+ if(sound_device_create_combined_sink_connect(&audio_device.sound_device, audio_device.combined_sink_name.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
+ fprintf(stderr, "Error: failed to setup audio recording to combined sink\n");
+ _exit(1);
+ }
+
+ std::vector<const char*> app_names;
+ app_names.reserve(merged_audio_inputs.audio_inputs.size());
+ for(const auto &audio_input : merged_audio_inputs.audio_inputs) {
+ app_names.push_back(audio_input.name.c_str());
+ }
+
+ if(merged_audio_inputs.inverted) {
+ if(!gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(pipewire_audio, app_names.data(), app_names.size(), audio_device.combined_sink_name.c_str())) {
+ fprintf(stderr, "gsr error: failed to add application audio link\n");
+ _exit(1);
+ }
+ } else {
+ if(!gsr_pipewire_audio_add_link_from_apps_to_sink(pipewire_audio, app_names.data(), app_names.size(), audio_device.combined_sink_name.c_str())) {
+ fprintf(stderr, "gsr error: failed to add application audio link\n");
+ _exit(1);
+ }
+ }
+
+ return audio_device;
+}
+#endif
+
int main(int argc, char **argv) {
setlocale(LC_ALL, "C"); // Sigh... stupid C
@@ -2658,6 +2890,16 @@ int main(int argc, char **argv) {
_exit(0);
}
+ if(argc == 2 && strcmp(argv[1], "--list-application-audio") == 0) {
+ list_application_audio_command();
+ _exit(0);
+ }
+
+ if(argc == 2 && strcmp(argv[1], "--list-capture-options") == 0) {
+ list_capture_options_command();
+ _exit(0);
+ }
+
if(argc == 2 && strcmp(argv[1], "--version") == 0) {
puts(GSR_VERSION);
_exit(0);
@@ -2671,6 +2913,10 @@ int main(int argc, char **argv) {
{ "-f", Arg { {}, false, false } },
{ "-s", Arg { {}, true, false } },
{ "-a", Arg { {}, true, true } },
+#ifdef GSR_APP_AUDIO
+ { "-aa", Arg { {}, true, true } },
+ { "-aai", Arg { {}, true, true } },
+#endif
{ "-q", Arg { {}, true, false } },
{ "-o", Arg { {}, true, false } },
{ "-r", Arg { {}, true, false } },
@@ -2682,7 +2928,6 @@ int main(int argc, char **argv) {
{ "-bm", Arg { {}, true, false } },
{ "-pixfmt", Arg { {}, true, false } },
{ "-v", Arg { {}, true, false } },
- { "-mf", Arg { {}, true, false } }, // TODO: Remove, this exists for backwards compatibility. -df should be used instead
{ "-df", Arg { {}, true, false } },
{ "-sc", Arg { {}, true, false } },
{ "-cr", Arg { {}, true, false } },
@@ -2866,11 +3111,6 @@ int main(int argc, char **argv) {
bool date_folders = false;
const char *date_folders_str = args["-df"].value();
- if(!date_folders_str) {
- date_folders_str = args["-mf"].value();
- if(date_folders_str)
- fprintf(stderr, "Warning: -mf is deprecated, use -df instead\n");
- }
if(!date_folders_str)
date_folders_str = "no";
@@ -2935,12 +3175,37 @@ int main(int argc, char **argv) {
}
const Arg &audio_input_arg = args["-a"];
+ const Arg &app_audio_input_arg = args["-aa"];
+ const Arg &app_audio_input_inverted_arg = args["-aai"];
+
AudioDevices audio_devices;
if(!audio_input_arg.values.empty())
audio_devices = get_pulseaudio_inputs();
+ bool uses_app_audio = false;
+ if(!app_audio_input_arg.values.empty() || !app_audio_input_inverted_arg.values.empty())
+ uses_app_audio = true;
+
+ std::vector<std::string> app_audio_names;
+#ifdef GSR_APP_AUDIO
+ gsr_pipewire_audio pipewire_audio;
+ memset(&pipewire_audio, 0, sizeof(pipewire_audio));
+ if(uses_app_audio) {
+ if(!gsr_pipewire_audio_init(&pipewire_audio)) {
+ fprintf(stderr, "gsr error: failed to setup PipeWire audio for application audio capture. The likely reason for this failure is that your sound server is not PipeWire\n");
+ _exit(2);
+ }
+
+ gsr_pipewire_audio_for_each_app(&pipewire_audio, [](const char *app_name, void *userdata) {
+ std::vector<std::string> *app_audio_names = (std::vector<std::string>*)userdata;
+ app_audio_names->push_back(app_name);
+ return true;
+ }, &app_audio_names);
+ }
+#endif
+
bool uses_amix = false;
- std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, uses_amix);
+ std::vector<MergedAudioInputs> requested_audio_inputs = parse_audio_inputs(audio_devices, audio_input_arg, app_audio_input_arg, app_audio_input_inverted_arg, uses_amix, app_audio_names);
const char *container_format = args["-c"].value();
if(container_format && strcmp(container_format, "mkv") == 0)
@@ -3248,7 +3513,7 @@ int main(int argc, char **argv) {
const double target_fps = 1.0 / (double)fps;
if(video_codec_is_hdr(video_codec) && is_portal_capture) {
- fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (pipewire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
+ fprintf(stderr, "Warning: portal capture option doesn't support hdr yet (PipeWire doesn't support hdr), the video will be tonemapped from hdr to sdr\n");
video_codec = hdr_video_codec_to_sdr_video_codec(video_codec);
}
@@ -3318,7 +3583,6 @@ int main(int argc, char **argv) {
memset(&color_conversion_params, 0, sizeof(color_conversion_params));
color_conversion_params.color_range = color_range;
color_conversion_params.egl = &egl;
- color_conversion_params.source_color = gsr_capture_get_source_color(capture);
color_conversion_params.load_external_image_shader = gsr_capture_uses_external_image(capture);
gsr_video_encoder_get_textures(video_encoder, color_conversion_params.destination_textures, &color_conversion_params.num_destination_textures, &color_conversion_params.destination_color);
@@ -3363,7 +3627,7 @@ int main(int argc, char **argv) {
std::vector<AVFilterContext*> src_filter_ctx;
AVFilterGraph *graph = nullptr;
AVFilterContext *sink = nullptr;
- if(use_amix) {
+ if(use_amix && merged_audio_inputs.type == AudioInputType::DEVICE) {
int err = init_filter_graph(audio_codec_context, &graph, &sink, src_filter_ctx, merged_audio_inputs.audio_inputs.size());
if(err < 0) {
fprintf(stderr, "Error: failed to create audio filter\n");
@@ -3380,30 +3644,15 @@ int main(int argc, char **argv) {
const double num_audio_frames_shift = audio_startup_time_seconds / timeout_sec;
std::vector<AudioDevice> audio_track_audio_devices;
- for(size_t i = 0; i < merged_audio_inputs.audio_inputs.size(); ++i) {
- auto &audio_input = merged_audio_inputs.audio_inputs[i];
- AVFilterContext *src_ctx = nullptr;
- if(use_amix)
- src_ctx = src_filter_ctx[i];
-
- AudioDevice audio_device;
- audio_device.audio_input = audio_input;
- audio_device.src_filter_ctx = src_ctx;
-
- if(audio_input.name.empty()) {
- audio_device.sound_device.handle = NULL;
- audio_device.sound_device.frames = 0;
- } else {
- if(sound_device_get_by_name(&audio_device.sound_device, audio_input.name.c_str(), audio_input.description.c_str(), num_channels, audio_codec_context->frame_size, audio_codec_context_get_audio_format(audio_codec_context)) != 0) {
- fprintf(stderr, "Error: failed to get \"%s\" sound device\n", audio_input.name.c_str());
- _exit(1);
- }
- }
-
- audio_device.frame = create_audio_frame(audio_codec_context);
- audio_device.frame->pts = -audio_codec_context->frame_size * num_audio_frames_shift;
-
- audio_track_audio_devices.push_back(std::move(audio_device));
+ switch(merged_audio_inputs.type) {
+ case AudioInputType::DEVICE:
+ audio_track_audio_devices = create_device_audio_inputs(merged_audio_inputs.audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, src_filter_ctx, use_amix);
+ break;
+ case AudioInputType::APPLICATION:
+#ifdef GSR_APP_AUDIO
+ audio_track_audio_devices.push_back(create_application_audio_audio_input(merged_audio_inputs, audio_codec_context, num_channels, num_audio_frames_shift, &pipewire_audio));
+#endif
+ break;
}
AudioTrack audio_track;
@@ -3884,6 +4133,9 @@ int main(int argc, char **argv) {
gsr_color_conversion_deinit(&color_conversion);
gsr_video_encoder_destroy(video_encoder, video_codec_context);
gsr_capture_destroy(capture, video_codec_context);
+#ifdef GSR_APP_AUDIO
+ gsr_pipewire_audio_deinit(&pipewire_audio);
+#endif
if(replay_buffer_size_secs == -1 && recording_saved_script)
run_recording_saved_script_async(recording_saved_script, filename, "regular");
diff --git a/src/pipewire_audio.c b/src/pipewire_audio.c
index 2c18432..122895a 100644
--- a/src/pipewire_audio.c
+++ b/src/pipewire_audio.c
@@ -1 +1,405 @@
-#include "../include/pipewire_audio.h" \ No newline at end of file
+#include "../include/pipewire_audio.h"
+
+#include <pipewire/pipewire.h>
+
+static void on_core_info_cb(void *user_data, const struct pw_core_info *info) {
+ gsr_pipewire_audio *self = user_data;
+ //fprintf(stderr, "server name: %s\n", info->name);
+}
+
+static void on_core_error_cb(void *user_data, uint32_t id, int seq, int res, const char *message) {
+ gsr_pipewire_audio *self = user_data;
+ //fprintf(stderr, "gsr error: pipewire: error id:%u seq:%d res:%d: %s\n", id, seq, res, message);
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
+ gsr_pipewire_audio *self = user_data;
+ if(id == PW_ID_CORE && self->server_version_sync == seq)
+ pw_thread_loop_signal(self->thread_loop, false);
+}
+
+static const struct pw_core_events core_events = {
+ PW_VERSION_CORE_EVENTS,
+ .info = on_core_info_cb,
+ .done = on_core_done_cb,
+ .error = on_core_error_cb,
+};
+
+static gsr_pipewire_audio_node* gsr_pipewire_audio_get_node_by_name_case_insensitive(gsr_pipewire_audio *self, const char *node_name, gsr_pipewire_audio_node_type node_type) {
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
+ if(node->type == node_type && strcasecmp(node->name, node_name) == 0)
+ return &self->stream_nodes[i];
+ }
+ return NULL;
+}
+
+static gsr_pipewire_audio_port* gsr_pipewire_audio_get_node_port_by_name(gsr_pipewire_audio *self, uint32_t node_id, const char *port_name) {
+ for(int i = 0; i < self->num_ports; ++i) {
+ if(self->ports[i].node_id == node_id && strcmp(self->ports[i].name, port_name) == 0)
+ return &self->ports[i];
+ }
+ return NULL;
+}
+
+static bool requested_link_matches_name_case_insensitive(const gsr_pipewire_audio_requested_link *requested_link, const char *name) {
+ for(int i = 0; i < requested_link->num_app_names; ++i) {
+ if(strcasecmp(requested_link->app_names[i], name) == 0)
+ return true;
+ }
+ return false;
+}
+
+static void gsr_pipewire_audio_create_link(gsr_pipewire_audio *self, const gsr_pipewire_audio_requested_link *requested_link) {
+ const gsr_pipewire_audio_node_type requested_link_node_type = requested_link->output_type == GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM ? GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT : GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
+ const gsr_pipewire_audio_node *stream_input_node = gsr_pipewire_audio_get_node_by_name_case_insensitive(self, requested_link->output_name, requested_link_node_type);
+ if(!stream_input_node)
+ return;
+
+ const gsr_pipewire_audio_port *stream_input_fl_port = NULL;
+ const gsr_pipewire_audio_port *stream_input_fr_port = NULL;
+
+ switch(requested_link->output_type) {
+ case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM: {
+ stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FL");
+ stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "input_FR");
+ break;
+ }
+ case GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK: {
+ stream_input_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FL");
+ stream_input_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, stream_input_node->id, "playback_FR");
+ break;
+ }
+ }
+
+ if(!stream_input_fl_port || !stream_input_fr_port)
+ return;
+
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *app_node = &self->stream_nodes[i];
+ if(app_node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
+ continue;
+
+ const bool requested_link_matches_app = requested_link_matches_name_case_insensitive(requested_link, app_node->name);
+ if(requested_link->inverted) {
+ if(requested_link_matches_app)
+ continue;
+ } else {
+ if(!requested_link_matches_app)
+ continue;
+ }
+
+ const gsr_pipewire_audio_port *app_output_fl_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FL");
+ const gsr_pipewire_audio_port *app_output_fr_port = gsr_pipewire_audio_get_node_port_by_name(self, app_node->id, "output_FR");
+ if(!app_output_fl_port || !app_output_fr_port)
+ continue;
+
+ // TODO: Detect if link already exists before so we dont create these proxies when not needed
+
+ //fprintf(stderr, "linking!\n");
+ // TODO: error check and cleanup
+ {
+ struct pw_properties *props = pw_properties_new(NULL, NULL);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fl_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fl_port->id);
+ // TODO: Clean this up when removing node
+ struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
+ //self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
+ pw_properties_free(props);
+ }
+
+ {
+ struct pw_properties *props = pw_properties_new(NULL, NULL);
+ pw_properties_setf(props, PW_KEY_LINK_OUTPUT_PORT, "%u", app_output_fr_port->id);
+ pw_properties_setf(props, PW_KEY_LINK_INPUT_PORT, "%u", stream_input_fr_port->id);
+ // TODO: Clean this up when removing node
+ struct pw_proxy *proxy = pw_core_create_object(self->core, "link-factory", PW_TYPE_INTERFACE_Link, PW_VERSION_LINK, &props->dict, 0);
+ //self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, self->server_version_sync);
+ pw_properties_free(props);
+ }
+ }
+}
+
+static void gsr_pipewire_audio_create_links(gsr_pipewire_audio *self) {
+ for(int j = 0; j < self->num_requested_links; ++j) {
+ gsr_pipewire_audio_create_link(self, &self->requested_links[j]);
+ }
+}
+
+static void registry_event_global(void *data, uint32_t id, uint32_t permissions,
+ const char *type, uint32_t version,
+ const struct spa_dict *props)
+{
+ //fprintf(stderr, "add: id: %d, type: %s\n", (int)id, type);
+ if (props == NULL)
+ return;
+
+ //pw_properties_new_dict(props);
+
+ gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
+ if(strcmp(type, PW_TYPE_INTERFACE_Node) == 0) {
+ const char *node_name = spa_dict_lookup(props, PW_KEY_NODE_NAME);
+ const char *media_class = spa_dict_lookup(props, PW_KEY_MEDIA_CLASS);
+ //fprintf(stderr, " node name: %s, media class: %s\n", node_name, media_class);
+ const bool is_stream_output = media_class && strcmp(media_class, "Stream/Output/Audio") == 0;
+ const bool is_stream_input = media_class && strcmp(media_class, "Stream/Input/Audio") == 0;
+ const bool is_sink = media_class && strcmp(media_class, "Audio/Sink") == 0;
+ if(self->num_stream_nodes < GSR_PIPEWIRE_AUDIO_MAX_STREAM_NODES && node_name && (is_stream_output || is_stream_input || is_sink)) {
+ //const char *application_binary = spa_dict_lookup(props, PW_KEY_APP_PROCESS_BINARY);
+ //const char *application_name = spa_dict_lookup(props, PW_KEY_APP_NAME);
+ //fprintf(stderr, " node name: %s, app binary: %s, app name: %s\n", node_name, application_binary, application_name);
+
+ char *node_name_copy = strdup(node_name);
+ if(node_name_copy) {
+ self->stream_nodes[self->num_stream_nodes].id = id;
+ self->stream_nodes[self->num_stream_nodes].name = node_name_copy;
+ if(is_stream_output)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT;
+ else if(is_stream_input)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_INPUT;
+ else if(is_sink)
+ self->stream_nodes[self->num_stream_nodes].type = GSR_PIPEWIRE_AUDIO_NODE_TYPE_SINK;
+ ++self->num_stream_nodes;
+
+ gsr_pipewire_audio_create_links(self);
+ }
+ }
+ } else if(strcmp(type, PW_TYPE_INTERFACE_Port) == 0) {
+ const char *port_name = spa_dict_lookup(props, PW_KEY_PORT_NAME);
+
+ const char *port_direction = spa_dict_lookup(props, PW_KEY_PORT_DIRECTION);
+ gsr_pipewire_audio_port_direction direction = -1;
+ if(port_direction && strcmp(port_direction, "in") == 0)
+ direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_INPUT;
+ else if(port_direction && strcmp(port_direction, "out") == 0)
+ direction = GSR_PIPEWIRE_AUDIO_PORT_DIRECTION_OUTPUT;
+
+ const char *node_id = spa_dict_lookup(props, PW_KEY_NODE_ID);
+ const int node_id_num = node_id ? atoi(node_id) : 0;
+
+ if(self->num_ports < GSR_PIPEWIRE_AUDIO_MAX_PORTS && port_name && direction >= 0 && node_id_num > 0) {
+ //fprintf(stderr, " port name: %s, node id: %d, direction: %s\n", port_name, node_id_num, port_direction);
+ char *port_name_copy = strdup(port_name);
+ if(port_name_copy) {
+ self->ports[self->num_ports].id = id;
+ self->ports[self->num_ports].node_id = node_id_num;
+ self->ports[self->num_ports].direction = direction;
+ self->ports[self->num_ports].name = port_name_copy;
+ ++self->num_ports;
+
+ gsr_pipewire_audio_create_links(self);
+ }
+ }
+ }
+}
+
+static bool gsr_pipewire_audio_remove_node_by_id(gsr_pipewire_audio *self, uint32_t node_id) {
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ if(self->stream_nodes[i].id != node_id)
+ continue;
+
+ free(self->stream_nodes[i].name);
+ for(int j = i + 1; j < self->num_stream_nodes; ++j) {
+ self->stream_nodes[j - 1] = self->stream_nodes[j];
+ }
+ --self->num_stream_nodes;
+ return true;
+ }
+ return false;
+}
+
+static bool gsr_pipewire_audio_remove_port_by_id(gsr_pipewire_audio *self, uint32_t port_id) {
+ for(int i = 0; i < self->num_ports; ++i) {
+ if(self->ports[i].id != port_id)
+ continue;
+
+ free(self->ports[i].name);
+ for(int j = i + 1; j < self->num_ports; ++j) {
+ self->ports[j - 1] = self->ports[j];
+ }
+ --self->num_ports;
+ return true;
+ }
+ return false;
+}
+
+static void registry_event_global_remove(void *data, uint32_t id) {
+ //fprintf(stderr, "remove: %d\n", (int)id);
+ gsr_pipewire_audio *self = (gsr_pipewire_audio*)data;
+ if(gsr_pipewire_audio_remove_node_by_id(self, id)) {
+ //fprintf(stderr, "removed node\n");
+ return;
+ }
+
+ if(gsr_pipewire_audio_remove_port_by_id(self, id)) {
+ //fprintf(stderr, "removed port\n");
+ return;
+ }
+}
+
+static const struct pw_registry_events registry_events = {
+ PW_VERSION_REGISTRY_EVENTS,
+ .global = registry_event_global,
+ .global_remove = registry_event_global_remove,
+};
+
+bool gsr_pipewire_audio_init(gsr_pipewire_audio *self) {
+ memset(self, 0, sizeof(*self));
+
+ pw_init(NULL, NULL);
+
+ self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
+ if(!self->thread_loop) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire thread\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ self->context = pw_context_new(pw_thread_loop_get_loop(self->thread_loop), NULL, 0);
+ if(!self->context) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire context\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ if(pw_thread_loop_start(self->thread_loop) < 0) {
+ fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to start thread\n");
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+
+ self->core = pw_context_connect(self->context, pw_properties_new(PW_KEY_REMOTE_NAME, NULL, NULL), 0);
+ if(!self->core) {
+ pw_thread_loop_unlock(self->thread_loop);
+ gsr_pipewire_audio_deinit(self);
+ return false;
+ }
+
+ // TODO: Error check
+ pw_core_add_listener(self->core, &self->core_listener, &core_events, self);
+
+ self->registry = pw_core_get_registry(self->core, PW_VERSION_REGISTRY, 0);
+ pw_registry_add_listener(self->registry, &self->registry_listener, &registry_events, self);
+
+ self->server_version_sync = pw_core_sync(self->core, PW_ID_CORE, 0);
+ pw_thread_loop_wait(self->thread_loop);
+
+ pw_thread_loop_unlock(self->thread_loop);
+ return true;
+}
+
+void gsr_pipewire_audio_deinit(gsr_pipewire_audio *self) {
+ if(self->thread_loop) {
+ //pw_thread_loop_wait(self->thread_loop);
+ pw_thread_loop_stop(self->thread_loop);
+ }
+
+ if(self->core) {
+ pw_core_disconnect(self->core);
+ self->core = NULL;
+ }
+
+ if(self->context) {
+ pw_context_destroy(self->context);
+ self->context = NULL;
+ }
+
+ if(self->thread_loop) {
+ pw_thread_loop_destroy(self->thread_loop);
+ self->thread_loop = NULL;
+ }
+
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ free(self->stream_nodes[i].name);
+ }
+ self->num_stream_nodes = 0;
+
+ for(int i = 0; i < self->num_ports; ++i) {
+ free(self->ports[i].name);
+ }
+ self->num_ports = 0;
+
+ for(int i = 0; i < self->num_requested_links; ++i) {
+ for(int j = 0; j < self->requested_links[i].num_app_names; ++j) {
+ free(self->requested_links[i].app_names[j]);
+ }
+ free(self->requested_links[i].app_names);
+ free(self->requested_links[i].output_name);
+ }
+ self->num_requested_links = 0;
+
+#if PW_CHECK_VERSION(0, 3, 49)
+ pw_deinit();
+#endif
+}
+
+static bool gsr_pipewire_audio_add_link_from_apps_to_output(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *output_name, gsr_pipewire_audio_link_output_type output_type, bool inverted) {
+ if(self->num_requested_links >= GSR_PIPEWIRE_AUDIO_MAX_REQUESTED_LINKS)
+ return false;
+
+ char **app_names_output_copy = calloc(num_app_names_output, sizeof(char*));
+ if(!app_names_output_copy)
+ return false;
+
+ char *output_name_copy = strdup(output_name);
+ if(!output_name_copy)
+ goto error;
+
+ for(int i = 0; i < num_app_names_output; ++i) {
+ app_names_output_copy[i] = strdup(app_names_output[i]);
+ if(!app_names_output_copy[i])
+ goto error;
+ }
+
+ pw_thread_loop_lock(self->thread_loop);
+ self->requested_links[self->num_requested_links].app_names = app_names_output_copy;
+ self->requested_links[self->num_requested_links].num_app_names = num_app_names_output;
+ self->requested_links[self->num_requested_links].output_name = output_name_copy;
+ self->requested_links[self->num_requested_links].output_type = output_type;
+ self->requested_links[self->num_requested_links].inverted = inverted;
+ ++self->num_requested_links;
+ gsr_pipewire_audio_create_link(self, &self->requested_links[self->num_requested_links - 1]);
+ pw_thread_loop_unlock(self->thread_loop);
+
+ return true;
+
+ error:
+ free(output_name_copy);
+ for(int i = 0; i < num_app_names_output; ++i) {
+ free(app_names_output_copy[i]);
+ }
+ free(app_names_output_copy);
+ return false;
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_stream(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, false);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_stream_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *stream_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, stream_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_STREAM, true);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_sink(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, false);
+}
+
+bool gsr_pipewire_audio_add_link_from_apps_to_sink_inverted(gsr_pipewire_audio *self, const char **app_names_output, int num_app_names_output, const char *sink_name_input) {
+ return gsr_pipewire_audio_add_link_from_apps_to_output(self, app_names_output, num_app_names_output, sink_name_input, GSR_PIPEWIRE_AUDIO_LINK_OUTPUT_TYPE_SINK, true);
+}
+
+void gsr_pipewire_audio_for_each_app(gsr_pipewire_audio *self, gsr_pipewire_audio_app_query_callback callback, void *userdata) {
+ pw_thread_loop_lock(self->thread_loop);
+ for(int i = 0; i < self->num_stream_nodes; ++i) {
+ const gsr_pipewire_audio_node *node = &self->stream_nodes[i];
+ if(node->type != GSR_PIPEWIRE_AUDIO_NODE_TYPE_STREAM_OUTPUT)
+ continue;
+
+ if(!callback(node->name, userdata))
+ break;
+ }
+ pw_thread_loop_unlock(self->thread_loop);
+}
diff --git a/src/pipewire_video.c b/src/pipewire_video.c
index b5f1562..3c6965e 100644
--- a/src/pipewire_video.c
+++ b/src/pipewire_video.c
@@ -446,7 +446,7 @@ static bool gsr_pipewire_video_setup_stream(gsr_pipewire_video *self) {
uint8_t params_buffer[2048];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
- self->thread_loop = pw_thread_loop_new("PipeWire thread loop", NULL);
+ self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);
if(!self->thread_loop) {
fprintf(stderr, "gsr error: gsr_pipewire_video_setup_stream: failed to create pipewire thread\n");
goto error;
diff --git a/src/sound.cpp b/src/sound.cpp
index aea5b91..bd26d89 100644
--- a/src/sound.cpp
+++ b/src/sound.cpp
@@ -42,34 +42,79 @@ struct pa_handle {
int operation_success;
double latency_seconds;
+
+ uint32_t combined_sink_module_index;
};
-static void pa_sound_device_free(pa_handle *s) {
- assert(s);
+static void destroy_combined_sink(pa_handle *p) {
+ // TODO: error handling
+ pa_operation *module_pa = pa_context_unload_module(p->context, p->combined_sink_module_index, NULL, NULL);
+ for(;;) {
+ if(pa_operation_get_state(module_pa) == PA_OPERATION_DONE) {
+ pa_operation_unref(module_pa);
+ break;
+ }
+ pa_mainloop_iterate(p->mainloop, 1, NULL);
+ }
+}
- if (s->stream)
- pa_stream_unref(s->stream);
+static void pa_sound_device_free(pa_handle *p) {
+ assert(p);
- if (s->context) {
- pa_context_disconnect(s->context);
- pa_context_unref(s->context);
+ if(p->combined_sink_module_index != PA_INVALID_INDEX) {
+ destroy_combined_sink(p);
+ p->combined_sink_module_index = PA_INVALID_INDEX;
}
- if (s->mainloop)
- pa_mainloop_free(s->mainloop);
+ if (p->stream) {
+ pa_stream_unref(p->stream);
+ p->stream = NULL;
+ }
- if (s->output_data) {
- free(s->output_data);
- s->output_data = NULL;
+ if (p->context) {
+ pa_context_disconnect(p->context);
+ pa_context_unref(p->context);
+ p->context = NULL;
}
- pa_xfree(s);
+ if (p->mainloop) {
+ pa_mainloop_free(p->mainloop);
+ p->mainloop = NULL;
+ }
+
+ if (p->output_data) {
+ free(p->output_data);
+ p->output_data = NULL;
+ }
+
+ pa_xfree(p);
+}
+
+static void module_index_callback(pa_context*, uint32_t idx, void *userdata) {
+ pa_handle *p = (pa_handle*)userdata;
+ p->combined_sink_module_index = idx;
+}
+
+static bool create_null_sink(pa_handle *p, const char *null_sink_name) {
+ // TODO: Error handling
+ char module_argument[256];
+ snprintf(module_argument, sizeof(module_argument), "sink_name=\"%s\" slaves= adjust_time=0", null_sink_name);
+ pa_operation *module_pa = pa_context_load_module(p->context, "module-null-sink", module_argument, module_index_callback, p);
+ for(;;) {
+ if(pa_operation_get_state(module_pa) == PA_OPERATION_DONE) {
+ pa_operation_unref(module_pa);
+ break;
+ }
+ pa_mainloop_iterate(p->mainloop, 1, NULL);
+ }
+ return p->combined_sink_module_index != PA_INVALID_INDEX;
}
static pa_handle* pa_sound_device_new(const char *server,
const char *name,
const char *dev,
const char *stream_name,
+ const char *combined_sink_name,
const pa_sample_spec *ss,
const pa_buffer_attr *attr,
int *rerror) {
@@ -77,10 +122,7 @@ static pa_handle* pa_sound_device_new(const char *server,
int error = PA_ERR_INTERNAL, r;
p = pa_xnew0(pa_handle, 1);
- p->read_data = NULL;
- p->read_length = 0;
- p->read_index = 0;
- p->latency_seconds = 0.0;
+ p->combined_sink_module_index = PA_INVALID_INDEX;
const int buffer_size = attr->fragsize;
void *buffer = malloc(buffer_size);
@@ -119,12 +161,23 @@ static pa_handle* pa_sound_device_new(const char *server,
pa_mainloop_iterate(p->mainloop, 1, NULL);
}
+ char device_to_record[256];
+ if(combined_sink_name) {
+ if(!create_null_sink(p, combined_sink_name)) {
+ fprintf(stderr, "gsr error: pa_sound_device_new: failed to create module-combine-sink\n");
+ goto fail;
+ }
+ snprintf(device_to_record, sizeof(device_to_record), "%s.monitor", combined_sink_name);
+ } else {
+ snprintf(device_to_record, sizeof(device_to_record), "%s", dev);
+ }
+
if (!(p->stream = pa_stream_new(p->context, stream_name, ss, NULL))) {
error = pa_context_errno(p->context);
goto fail;
}
- r = pa_stream_connect_record(p->stream, dev, attr,
+ r = pa_stream_connect_record(p->stream, device_to_record, attr,
(pa_stream_flags_t)(PA_STREAM_INTERPOLATE_TIMING|PA_STREAM_ADJUST_LATENCY|PA_STREAM_AUTO_TIMING_UPDATE));
if (r < 0) {
@@ -259,7 +312,7 @@ static int audio_format_to_get_bytes_per_sample(AudioFormat audio_format) {
return 2;
}
-int sound_device_get_by_name(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+static int sound_device_setup_record(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format, const char *combined_sink_name) {
pa_sample_spec ss;
ss.format = audio_format_to_pulse_audio_format(audio_format);
ss.rate = 48000;
@@ -273,7 +326,7 @@ int sound_device_get_by_name(SoundDevice *device, const char *device_name, const
buffer_attr.maxlength = buffer_attr.fragsize;
int error = 0;
- pa_handle *handle = pa_sound_device_new(nullptr, description, device_name, description, &ss, &buffer_attr, &error);
+ pa_handle *handle = pa_sound_device_new(nullptr, description, device_name, description, combined_sink_name, &ss, &buffer_attr, &error);
if(!handle) {
fprintf(stderr, "pa_sound_device_new() failed: %s. Audio input device %s might not be valid\n", pa_strerror(error), description);
return -1;
@@ -284,6 +337,14 @@ int sound_device_get_by_name(SoundDevice *device, const char *device_name, const
return 0;
}
+int sound_device_get_by_name(SoundDevice *device, const char *device_name, const char *description, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+ return sound_device_setup_record(device, device_name, description, num_channels, period_frame_size, audio_format, NULL);
+}
+
+int sound_device_create_combined_sink_connect(SoundDevice *device, const char *combined_sink_name, unsigned int num_channels, unsigned int period_frame_size, AudioFormat audio_format) {
+ return sound_device_setup_record(device, "gpu-screen-recorder", "gpu-screen-recorder", num_channels, period_frame_size, audio_format, combined_sink_name);
+}
+
void sound_device_close(SoundDevice *device) {
if(device->handle)
pa_sound_device_free((pa_handle*)device->handle);
@@ -322,8 +383,7 @@ static void pa_state_cb(pa_context *c, void *userdata) {
}
}
-static void pa_sourcelist_cb(pa_context *ctx, const pa_source_info *source_info, int eol, void *userdata) {
- (void)ctx;
+static void pa_sourcelist_cb(pa_context*, const pa_source_info *source_info, int eol, void *userdata) {
if(eol > 0)
return;
@@ -345,6 +405,8 @@ static void get_pulseaudio_default_inputs(AudioDevices &audio_devices) {
pa_operation *pa_op = NULL;
pa_mainloop *main_loop = pa_mainloop_new();
+ if(!main_loop)
+ return;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
@@ -392,6 +454,8 @@ AudioDevices get_pulseaudio_inputs() {
get_pulseaudio_default_inputs(audio_devices);
pa_mainloop *main_loop = pa_mainloop_new();
+ if(!main_loop)
+ return audio_devices;
pa_context *ctx = pa_context_new(pa_mainloop_get_api(main_loop), "gpu-screen-recorder");
if(pa_context_connect(ctx, NULL, PA_CONTEXT_NOFLAGS, NULL) < 0)
diff --git a/src/utils.c b/src/utils.c
index a8e4dc2..e871126 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -7,6 +7,7 @@
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
+#include <sys/random.h>
#include <errno.h>
#include <assert.h>
@@ -27,6 +28,25 @@ double clock_get_monotonic_seconds(void) {
return (double)ts.tv_sec + (double)ts.tv_nsec * 0.000000001;
}
+bool generate_random_characters(char *buffer, int buffer_size, const char *alphabet, size_t alphabet_size) {
+ /* TODO: Use other functions on other platforms than linux */
+ if(getrandom(buffer, buffer_size, 0) < buffer_size) {
+ fprintf(stderr, "Failed to get random bytes, error: %s\n", strerror(errno));
+ return false;
+ }
+
+ for(int i = 0; i < buffer_size; ++i) {
+ unsigned char c = *(unsigned char*)&buffer[i];
+ buffer[i] = alphabet[c % alphabet_size];
+ }
+
+ return true;
+}
+
+bool generate_random_characters_standard_alphabet(char *buffer, int buffer_size) {
+ return generate_random_characters(buffer, buffer_size, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 62);
+}
+
static gsr_monitor_rotation wayland_transform_to_gsr_rotation(int32_t rot) {
switch(rot) {
case 0: return GSR_MONITOR_ROT_0;