aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/capture/xcomposite.c2
-rw-r--r--src/main.cpp63
-rw-r--r--src/pipewire_video.c69
3 files changed, 93 insertions, 41 deletions
diff --git a/src/capture/xcomposite.c b/src/capture/xcomposite.c
index d10807e..5cef71d 100644
--- a/src/capture/xcomposite.c
+++ b/src/capture/xcomposite.c
@@ -98,7 +98,7 @@ static int gsr_capture_xcomposite_start(gsr_capture *cap, AVCodecContext *video_
/* Disable vsync */
self->params.egl->eglSwapInterval(self->params.egl->egl_display, 0);
if(window_texture_init(&self->window_texture, self->display, self->window, self->params.egl) != 0 && !self->params.follow_focused) {
- fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to get window texture for window %ld\n", self->window);
+ fprintf(stderr, "gsr error: gsr_capture_xcomposite_start: failed to get window texture for window %ld\n", (long)self->window);
return -1;
}
diff --git a/src/main.cpp b/src/main.cpp
index 983684b..4dade05 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1716,29 +1716,44 @@ static bool is_livestream_path(const char *str) {
return false;
}
-// TODO: Proper cleanup
-static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph **graph, AVFilterContext **sink, std::vector<AVFilterContext*> &src_filter_ctx, size_t num_sources) {
+static int init_filter_graph(AVCodecContext* audio_codec_context, AVFilterGraph** graph, AVFilterContext** sink, std::vector<AVFilterContext*>& src_filter_ctx, size_t num_sources) {
char ch_layout[64];
int err = 0;
ch_layout[0] = '\0';
- AVFilterGraph *filter_graph = avfilter_graph_alloc();
+ // C89-style variable declaration to
+ // avoid problems because of goto
+ AVFilterGraph* filter_graph = nullptr;
+ AVFilterContext* mix_ctx = nullptr;
+
+ const AVFilter* mix_filter = nullptr;
+ const AVFilter* abuffersink = nullptr;
+ AVFilterContext* abuffersink_ctx = nullptr;
+ char args[512] = { 0 };
+#if LIBAVFILTER_VERSION_INT >= AV_VERSION_INT(7, 107, 100)
+ bool normalize = false;
+#endif
+
+ filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
fprintf(stderr, "Unable to create filter graph.\n");
- return AVERROR(ENOMEM);
+ err = AVERROR(ENOMEM);
+ goto fail;
}
for(size_t i = 0; i < num_sources; ++i) {
const AVFilter *abuffer = avfilter_get_by_name("abuffer");
if (!abuffer) {
fprintf(stderr, "Could not find the abuffer filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
+ err = AVERROR_FILTER_NOT_FOUND;
+ goto fail;
}
AVFilterContext *abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, NULL);
if (!abuffer_ctx) {
fprintf(stderr, "Could not allocate the abuffer instance.\n");
- return AVERROR(ENOMEM);
+ err = AVERROR(ENOMEM);
+ goto fail;
}
#if LIBAVCODEC_VERSION_MAJOR < 60
@@ -1755,51 +1770,50 @@ static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph
err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffer filter.\n");
- return err;
+ goto fail;
}
src_filter_ctx.push_back(abuffer_ctx);
}
- const AVFilter *mix_filter = avfilter_get_by_name("amix");
+ mix_filter = avfilter_get_by_name("amix");
if (!mix_filter) {
av_log(NULL, AV_LOG_ERROR, "Could not find the mix filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
+ err = AVERROR_FILTER_NOT_FOUND;
+ goto fail;
}
#if LIBAVFILTER_VERSION_INT >= AV_VERSION_INT(7, 107, 100)
- bool normalize = false;
- char args[512];
snprintf(args, sizeof(args), "inputs=%d:normalize=%s", (int)num_sources, normalize ? "true" : "false");
#else
- char args[512];
snprintf(args, sizeof(args), "inputs=%d", (int)num_sources);
fprintf(stderr, "Warning: your ffmpeg version doesn't support disabling normalizing of mixed audio. Volume might be lower than expected\n");
#endif
- AVFilterContext *mix_ctx;
err = avfilter_graph_create_filter(&mix_ctx, mix_filter, "amix", args, NULL, filter_graph);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio amix filter\n");
- return err;
+ goto fail;
}
- const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
+ abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
fprintf(stderr, "Could not find the abuffersink filter.\n");
- return AVERROR_FILTER_NOT_FOUND;
+ err = AVERROR_FILTER_NOT_FOUND;
+ goto fail;
}
- AVFilterContext *abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
+ abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
if (!abuffersink_ctx) {
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
- return AVERROR(ENOMEM);
+ err = AVERROR(ENOMEM);
+ goto fail;
}
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
- return err;
+ goto fail;
}
err = 0;
@@ -1812,19 +1826,24 @@ static int init_filter_graph(AVCodecContext *audio_codec_context, AVFilterGraph
err = avfilter_link(mix_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error connecting filters\n");
- return err;
+ goto fail;
}
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
- return err;
+ goto fail;
}
*graph = filter_graph;
- *sink = abuffersink_ctx;
+ *sink = abuffersink_ctx;
return 0;
+
+fail:
+ avfilter_graph_free(&filter_graph);
+ src_filter_ctx.clear(); // possibly unnecessary?
+ return err;
}
static gsr_video_encoder* create_video_encoder(gsr_egl *egl, bool overclock, gsr_color_depth color_depth, bool use_software_video_encoder, VideoCodec video_codec) {
diff --git a/src/pipewire_video.c b/src/pipewire_video.c
index 3c6965e..7c76300 100644
--- a/src/pipewire_video.c
+++ b/src/pipewire_video.c
@@ -65,14 +65,20 @@ static void on_core_done_cb(void *user_data, uint32_t id, int seq) {
static bool is_cursor_format_supported(const enum spa_video_format format) {
switch(format) {
- case SPA_VIDEO_FORMAT_RGBx: return true;
- case SPA_VIDEO_FORMAT_BGRx: return true;
- case SPA_VIDEO_FORMAT_xRGB: return true;
- case SPA_VIDEO_FORMAT_xBGR: return true;
- case SPA_VIDEO_FORMAT_RGBA: return true;
- case SPA_VIDEO_FORMAT_BGRA: return true;
- case SPA_VIDEO_FORMAT_ARGB: return true;
- case SPA_VIDEO_FORMAT_ABGR: return true;
+ case SPA_VIDEO_FORMAT_RGBx: return true;
+ case SPA_VIDEO_FORMAT_BGRx: return true;
+ case SPA_VIDEO_FORMAT_RGBA: return true;
+ case SPA_VIDEO_FORMAT_BGRA: return true;
+ case SPA_VIDEO_FORMAT_RGB: return true;
+ case SPA_VIDEO_FORMAT_BGR: return true;
+ case SPA_VIDEO_FORMAT_ARGB: return true;
+ case SPA_VIDEO_FORMAT_ABGR: return true;
+#if PW_CHECK_VERSION(0, 3, 41)
+ case SPA_VIDEO_FORMAT_xRGB_210LE: return true;
+ case SPA_VIDEO_FORMAT_xBGR_210LE: return true;
+ case SPA_VIDEO_FORMAT_ARGB_210LE: return true;
+ case SPA_VIDEO_FORMAT_ABGR_210LE: return true;
+#endif
default: break;
}
return false;
@@ -338,24 +344,46 @@ static inline struct spa_pod *build_format(struct spa_pod_builder *b,
/* For some reason gstreamer formats are in opposite order to drm formats */
static int64_t spa_video_format_to_drm_format(const enum spa_video_format format) {
switch(format) {
- case SPA_VIDEO_FORMAT_RGBx: return DRM_FORMAT_XBGR8888;
- case SPA_VIDEO_FORMAT_BGRx: return DRM_FORMAT_XRGB8888;
- case SPA_VIDEO_FORMAT_RGBA: return DRM_FORMAT_ABGR8888;
- case SPA_VIDEO_FORMAT_BGRA: return DRM_FORMAT_ARGB8888;
- case SPA_VIDEO_FORMAT_RGB: return DRM_FORMAT_XBGR8888;
- case SPA_VIDEO_FORMAT_BGR: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_RGBx: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGRx: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_RGBA: return DRM_FORMAT_ABGR8888;
+ case SPA_VIDEO_FORMAT_BGRA: return DRM_FORMAT_ARGB8888;
+ case SPA_VIDEO_FORMAT_RGB: return DRM_FORMAT_XBGR8888;
+ case SPA_VIDEO_FORMAT_BGR: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_ARGB: return DRM_FORMAT_XRGB8888;
+ case SPA_VIDEO_FORMAT_ABGR: return DRM_FORMAT_XRGB8888;
+#if PW_CHECK_VERSION(0, 3, 41)
+ case SPA_VIDEO_FORMAT_xRGB_210LE: return DRM_FORMAT_XRGB2101010;
+ case SPA_VIDEO_FORMAT_xBGR_210LE: return DRM_FORMAT_XBGR2101010;
+ case SPA_VIDEO_FORMAT_ARGB_210LE: return DRM_FORMAT_ARGB2101010;
+ case SPA_VIDEO_FORMAT_ABGR_210LE: return DRM_FORMAT_ABGR2101010;
+#endif
default: break;
}
return DRM_FORMAT_INVALID;
}
-static const enum spa_video_format video_formats[] = {
+#if PW_CHECK_VERSION(0, 3, 41)
+#define GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS GSR_PIPEWIRE_VIDEO_MAX_VIDEO_FORMATS
+#else
+#define GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS 8
+#endif
+
+static const enum spa_video_format video_formats[GSR_PIPEWIRE_VIDEO_MAX_VIDEO_FORMATS] = {
SPA_VIDEO_FORMAT_BGRA,
SPA_VIDEO_FORMAT_BGRx,
SPA_VIDEO_FORMAT_BGR,
SPA_VIDEO_FORMAT_RGBx,
SPA_VIDEO_FORMAT_RGBA,
SPA_VIDEO_FORMAT_RGB,
+ SPA_VIDEO_FORMAT_ARGB,
+ SPA_VIDEO_FORMAT_ABGR,
+#if PW_CHECK_VERSION(0, 3, 41)
+ SPA_VIDEO_FORMAT_xRGB_210LE,
+ SPA_VIDEO_FORMAT_xBGR_210LE,
+ SPA_VIDEO_FORMAT_ARGB_210LE,
+ SPA_VIDEO_FORMAT_ABGR_210LE
+#endif
};
static bool gsr_pipewire_video_build_format_params(gsr_pipewire_video *self, struct spa_pod_builder *pod_builder, struct spa_pod **params, uint32_t *num_params) {
@@ -367,7 +395,7 @@ static bool gsr_pipewire_video_build_format_params(gsr_pipewire_video *self, str
for(size_t i = 0; i < GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS; i++) {
if(self->supported_video_formats[i].modifiers_size == 0)
continue;
- params[i] = build_format(pod_builder, &self->video_info, self->supported_video_formats[i].format, self->modifiers + self->supported_video_formats[i].modifiers_index, self->supported_video_formats[i].modifiers_size);
+ params[*num_params] = build_format(pod_builder, &self->video_info, self->supported_video_formats[i].format, self->modifiers + self->supported_video_formats[i].modifiers_index, self->supported_video_formats[i].modifiers_size);
++(*num_params);
}
@@ -382,7 +410,7 @@ static void renegotiate_format(void *data, uint64_t expirations) {
struct spa_pod *params[GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS];
uint32_t num_video_formats = 0;
- uint8_t params_buffer[2048];
+ uint8_t params_buffer[4096];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
if (!gsr_pipewire_video_build_format_params(self, &pod_builder, params, &num_video_formats)) {
pw_thread_loop_unlock(self->thread_loop);
@@ -413,6 +441,11 @@ static bool spa_video_format_get_modifiers(gsr_pipewire_video *self, const enum
}
const int64_t drm_format = spa_video_format_to_drm_format(format);
+ if(drm_format == DRM_FORMAT_INVALID) {
+ fprintf(stderr, "gsr error: spa_video_format_get_modifiers: unsupported format: %d\n", (int)format);
+ return false;
+ }
+
if(!self->egl->eglQueryDmaBufModifiersEXT(self->egl->egl_display, drm_format, max_modifiers, modifiers, NULL, num_modifiers)) {
fprintf(stderr, "gsr error: spa_video_format_get_modifiers: eglQueryDmaBufModifiersEXT failed with drm format %d, %" PRIi64 "\n", (int)format, drm_format);
//modifiers[0] = DRM_FORMAT_MOD_LINEAR;
@@ -443,7 +476,7 @@ static void gsr_pipewire_video_init_modifiers(gsr_pipewire_video *self) {
static bool gsr_pipewire_video_setup_stream(gsr_pipewire_video *self) {
struct spa_pod *params[GSR_PIPEWIRE_VIDEO_NUM_VIDEO_FORMATS];
uint32_t num_video_formats = 0;
- uint8_t params_buffer[2048];
+ uint8_t params_buffer[4096];
struct spa_pod_builder pod_builder = SPA_POD_BUILDER_INIT(params_buffer, sizeof(params_buffer));
self->thread_loop = pw_thread_loop_new("gsr screen capture", NULL);