aboutsummaryrefslogtreecommitdiff
path: root/src/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'src/encoder')
-rw-r--r--src/encoder/encoder.c155
-rw-r--r--src/encoder/video/software.c11
-rw-r--r--src/encoder/video/vaapi.c7
-rw-r--r--src/encoder/video/video.c151
-rw-r--r--src/encoder/video/vulkan.c7
5 files changed, 167 insertions, 164 deletions
diff --git a/src/encoder/encoder.c b/src/encoder/encoder.c
new file mode 100644
index 0000000..0f8eda5
--- /dev/null
+++ b/src/encoder/encoder.c
@@ -0,0 +1,155 @@
+#include "../../include/encoder/encoder.h"
+#include "../../include/utils.h"
+
+#include <string.h>
+#include <stdio.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+
+bool gsr_encoder_init(gsr_encoder *self, gsr_replay_storage replay_storage, size_t replay_buffer_num_packets, double replay_buffer_time, const char *replay_directory) {
+ memset(self, 0, sizeof(*self));
+ self->num_recording_destinations = 0;
+ self->recording_destination_id_counter = 0;
+
+ if(pthread_mutex_init(&self->file_write_mutex, NULL) != 0) {
+ fprintf(stderr, "gsr error: gsr_encoder_init: failed to create mutex\n");
+ return false;
+ }
+ self->mutex_created = true;
+
+ if(replay_buffer_num_packets > 0) {
+ self->replay_buffer = gsr_replay_buffer_create(replay_storage, replay_directory, replay_buffer_time, replay_buffer_num_packets);
+ if(!self->replay_buffer) {
+ fprintf(stderr, "gsr error: gsr_encoder_init: failed to create replay buffer\n");
+ gsr_encoder_deinit(self);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void gsr_encoder_deinit(gsr_encoder *self) {
+ if(self->mutex_created) {
+ self->mutex_created = false;
+ pthread_mutex_destroy(&self->file_write_mutex);
+ }
+
+ if(self->replay_buffer) {
+ gsr_replay_buffer_destroy(self->replay_buffer);
+ self->replay_buffer = NULL;
+ }
+
+ self->num_recording_destinations = 0;
+ self->recording_destination_id_counter = 0;
+}
+
+void gsr_encoder_receive_packets(gsr_encoder *self, AVCodecContext *codec_context, int64_t pts, int stream_index) {
+ for(;;) {
+ AVPacket *av_packet = av_packet_alloc();
+ if(!av_packet)
+ break;
+
+ av_packet->data = NULL;
+ av_packet->size = 0;
+ int res = avcodec_receive_packet(codec_context, av_packet);
+ if(res == 0) { // we have a packet, send the packet to the muxer
+ av_packet->stream_index = stream_index;
+ av_packet->pts = pts;
+ av_packet->dts = pts;
+
+ if(self->replay_buffer) {
+ const double time_now = clock_get_monotonic_seconds();
+ if(!gsr_replay_buffer_append(self->replay_buffer, av_packet, time_now))
+ fprintf(stderr, "gsr error: gsr_encoder_receive_packets: failed to add replay buffer data\n");
+ }
+
+ pthread_mutex_lock(&self->file_write_mutex);
+ const bool is_keyframe = av_packet->flags & AV_PKT_FLAG_KEY;
+ for(size_t i = 0; i < self->num_recording_destinations; ++i) {
+ gsr_encoder_recording_destination *recording_destination = &self->recording_destinations[i];
+ if(recording_destination->codec_context != codec_context)
+ continue;
+
+ if(is_keyframe)
+ recording_destination->has_received_keyframe = true;
+ else if(!recording_destination->has_received_keyframe)
+ continue;
+
+ av_packet->pts = pts - recording_destination->start_pts;
+ av_packet->dts = pts - recording_destination->start_pts;
+
+ av_packet_rescale_ts(av_packet, codec_context->time_base, recording_destination->stream->time_base);
+ // TODO: Is av_interleaved_write_frame needed?. Answer: might be needed for mkv but dont use it! it causes frames to be inconsistent, skipping frames and duplicating frames.
+ // TODO: av_interleaved_write_frame might be needed for cfr, or always for flv
+ const int ret = av_write_frame(recording_destination->format_context, av_packet);
+ if(ret < 0) {
+ char error_buffer[AV_ERROR_MAX_STRING_SIZE];
+ if(av_strerror(ret, error_buffer, sizeof(error_buffer)) < 0)
+ snprintf(error_buffer, sizeof(error_buffer), "Unknown error");
+ fprintf(stderr, "gsr error: gsr_encoder_receive_packets: failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet->stream_index, error_buffer, ret);
+ }
+ }
+ pthread_mutex_unlock(&self->file_write_mutex);
+
+ av_packet_free(&av_packet);
+ } else if (res == AVERROR(EAGAIN)) { // we have no packet
+ // fprintf(stderr, "No packet!\n");
+ av_packet_free(&av_packet);
+ break;
+ } else if (res == AVERROR_EOF) { // this is the end of the stream
+ av_packet_free(&av_packet);
+ fprintf(stderr, "End of stream!\n");
+ break;
+ } else {
+ av_packet_free(&av_packet);
+ fprintf(stderr, "Unexpected error: %d\n", res);
+ break;
+ }
+ }
+}
+
+size_t gsr_encoder_add_recording_destination(gsr_encoder *self, AVCodecContext *codec_context, AVFormatContext *format_context, AVStream *stream, int64_t start_pts) {
+ if(self->num_recording_destinations >= GSR_MAX_RECORDING_DESTINATIONS) {
+ fprintf(stderr, "gsr error: gsr_encoder_add_recording_destination: failed to add destination, reached the max amount of recording destinations (%d)\n", GSR_MAX_RECORDING_DESTINATIONS);
+ return (size_t)-1;
+ }
+
+ for(size_t i = 0; i < self->num_recording_destinations; ++i) {
+ if(self->recording_destinations[i].stream == stream) {
+ fprintf(stderr, "gsr error: gsr_encoder_add_recording_destination: failed to add destination, the stream %p already exists as an output\n", (void*)stream);
+ return (size_t)-1;
+ }
+ }
+
+ pthread_mutex_lock(&self->file_write_mutex);
+ gsr_encoder_recording_destination *recording_destination = &self->recording_destinations[self->num_recording_destinations];
+ recording_destination->id = self->recording_destination_id_counter;
+ recording_destination->codec_context = codec_context;
+ recording_destination->format_context = format_context;
+ recording_destination->stream = stream;
+ recording_destination->start_pts = start_pts;
+ recording_destination->has_received_keyframe = false;
+
+ ++self->recording_destination_id_counter;
+ ++self->num_recording_destinations;
+ pthread_mutex_unlock(&self->file_write_mutex);
+
+ return recording_destination->id;
+}
+
+bool gsr_encoder_remove_recording_destination(gsr_encoder *self, size_t id) {
+ bool found = false;
+ pthread_mutex_lock(&self->file_write_mutex);
+ for(size_t i = 0; i < self->num_recording_destinations; ++i) {
+ if(self->recording_destinations[i].id == id) {
+ self->recording_destinations[i] = self->recording_destinations[self->num_recording_destinations - 1];
+ --self->num_recording_destinations;
+ found = true;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&self->file_write_mutex);
+ return found;
+}
diff --git a/src/encoder/video/software.c b/src/encoder/video/software.c
index 627cdea..d8d9828 100644
--- a/src/encoder/video/software.c
+++ b/src/encoder/video/software.c
@@ -71,16 +71,15 @@ void gsr_video_encoder_software_stop(gsr_video_encoder_software *self, AVCodecCo
}
static void gsr_video_encoder_software_copy_textures_to_frame(gsr_video_encoder *encoder, AVFrame *frame, gsr_color_conversion *color_conversion) {
- gsr_video_encoder_software *self = encoder->priv;
+ (void)encoder;
+ //gsr_video_encoder_software *self = encoder->priv;
// TODO: hdr support
const unsigned int formats[2] = { GL_RED, GL_RG };
+ const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
for(int i = 0; i < 2; ++i) {
- self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
- // We could use glGetTexSubImage and then we wouldn't have to use a specific linesize (LINESIZE_ALIGNMENT) that adds padding,
- // but glGetTexSubImage is only available starting from opengl 4.5.
- self->params.egl->glGetTexImage(GL_TEXTURE_2D, 0, formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
+ // TODO: Use glPixelStore?
+ gsr_color_conversion_read_destination_texture(color_conversion, i, 0, 0, frame->width / div[i], frame->height / div[i], formats[i], GL_UNSIGNED_BYTE, frame->data[i]);
}
- self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
// cap_kms->kms.base.egl->eglSwapBuffers(cap_kms->kms.base.egl->egl_display, cap_kms->kms.base.egl->egl_surface);
//self->params.egl->glFlush();
diff --git a/src/encoder/video/vaapi.c b/src/encoder/video/vaapi.c
index c7ccd26..0daf4d8 100644
--- a/src/encoder/video/vaapi.c
+++ b/src/encoder/video/vaapi.c
@@ -92,10 +92,6 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
if(self->prime.fourcc == VA_FOURCC_NV12 || self->prime.fourcc == VA_FOURCC_P010) {
const uint32_t *formats = self->prime.fourcc == VA_FOURCC_NV12 ? formats_nv12 : formats_p010;
const int div[2] = {1, 2}; // divide UV texture size by 2 because chroma is half size
- const float border_colors[2][4] = {
- {0.0f, 0.0f, 0.0f, 1.0f},
- {0.5f, 0.5f, 0.0f, 1.0f}
- };
self->params.egl->glGenTextures(2, self->target_textures);
for(int i = 0; i < 2; ++i) {
@@ -125,9 +121,6 @@ static bool gsr_video_encoder_vaapi_setup_textures(gsr_video_encoder_vaapi *self
}
self->params.egl->glBindTexture(GL_TEXTURE_2D, self->target_textures[i]);
- self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
- self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
- self->params.egl->glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border_colors[i]);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
self->params.egl->glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
diff --git a/src/encoder/video/video.c b/src/encoder/video/video.c
index 82711ce..ce3b61b 100644
--- a/src/encoder/video/video.c
+++ b/src/encoder/video/video.c
@@ -1,54 +1,18 @@
#include "../../../include/encoder/video/video.h"
-#include "../../../include/utils.h"
-#include <string.h>
-#include <stdio.h>
#include <assert.h>
-#include <libavcodec/avcodec.h>
-#include <libavformat/avformat.h>
-
-bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame, size_t replay_buffer_num_packets) {
+bool gsr_video_encoder_start(gsr_video_encoder *encoder, AVCodecContext *video_codec_context, AVFrame *frame) {
assert(!encoder->started);
- encoder->num_recording_destinations = 0;
- encoder->recording_destination_id = 0;
-
- if(pthread_mutex_init(&encoder->file_write_mutex, NULL) != 0) {
- fprintf(stderr, "gsr error: gsr_video_encoder_start: failed to create mutex\n");
- return false;
- }
-
- memset(&encoder->replay_buffer, 0, sizeof(encoder->replay_buffer));
- if(replay_buffer_num_packets > 0) {
- if(!gsr_replay_buffer_init(&encoder->replay_buffer, replay_buffer_num_packets)) {
- fprintf(stderr, "gsr error: gsr_video_encoder_start: failed to create replay buffer\n");
- goto error;
- }
- encoder->has_replay_buffer = true;
- }
-
bool res = encoder->start(encoder, video_codec_context, frame);
- if(res) {
+ if(res)
encoder->started = true;
- return true;
- } else {
- goto error;
- }
-
- error:
- pthread_mutex_destroy(&encoder->file_write_mutex);
- gsr_replay_buffer_deinit(&encoder->replay_buffer);
- return false;
+ return res;
}
void gsr_video_encoder_destroy(gsr_video_encoder *encoder, AVCodecContext *video_codec_context) {
assert(encoder->started);
encoder->started = false;
- pthread_mutex_destroy(&encoder->file_write_mutex);
- gsr_replay_buffer_deinit(&encoder->replay_buffer);
- encoder->has_replay_buffer = false;
- encoder->num_recording_destinations = 0;
- encoder->recording_destination_id = 0;
encoder->destroy(encoder, video_codec_context);
}
@@ -62,112 +26,3 @@ void gsr_video_encoder_get_textures(gsr_video_encoder *encoder, unsigned int *te
assert(encoder->started);
encoder->get_textures(encoder, textures, num_textures, destination_color);
}
-
-void gsr_video_encoder_receive_packets(gsr_video_encoder *encoder, AVCodecContext *codec_context, int64_t pts, int stream_index) {
- for (;;) {
- AVPacket *av_packet = av_packet_alloc();
- if(!av_packet)
- break;
-
- av_packet->data = NULL;
- av_packet->size = 0;
- int res = avcodec_receive_packet(codec_context, av_packet);
- if(res == 0) { // we have a packet, send the packet to the muxer
- av_packet->stream_index = stream_index;
- av_packet->pts = pts;
- av_packet->dts = pts;
-
- if(encoder->has_replay_buffer) {
- const double time_now = clock_get_monotonic_seconds();
- if(!gsr_replay_buffer_append(&encoder->replay_buffer, av_packet, time_now))
- fprintf(stderr, "gsr error: gsr_video_encoder_receive_packets: failed to add replay buffer data\n");
- }
-
- pthread_mutex_lock(&encoder->file_write_mutex);
- const bool is_keyframe = av_packet->flags & AV_PKT_FLAG_KEY;
- for(size_t i = 0; i < encoder->num_recording_destinations; ++i) {
- gsr_video_encoder_recording_destination *recording_destination = &encoder->recording_destinations[i];
- if(recording_destination->codec_context != codec_context)
- continue;
-
- if(is_keyframe)
- recording_destination->has_received_keyframe = true;
- else if(!recording_destination->has_received_keyframe)
- continue;
-
- av_packet->pts = pts - recording_destination->start_pts;
- av_packet->dts = pts - recording_destination->start_pts;
-
- av_packet_rescale_ts(av_packet, codec_context->time_base, recording_destination->stream->time_base);
- // TODO: Is av_interleaved_write_frame needed?. Answer: might be needed for mkv but dont use it! it causes frames to be inconsistent, skipping frames and duplicating frames.
- // TODO: av_interleaved_write_frame might be needed for cfr, or always for flv
- const int ret = av_write_frame(recording_destination->format_context, av_packet);
- if(ret < 0) {
- char error_buffer[AV_ERROR_MAX_STRING_SIZE];
- if(av_strerror(ret, error_buffer, sizeof(error_buffer)) < 0)
- snprintf(error_buffer, sizeof(error_buffer), "Unknown error");
- fprintf(stderr, "Error: Failed to write frame index %d to muxer, reason: %s (%d)\n", av_packet->stream_index, error_buffer, ret);
- }
- }
- pthread_mutex_unlock(&encoder->file_write_mutex);
-
- av_packet_free(&av_packet);
- } else if (res == AVERROR(EAGAIN)) { // we have no packet
- // fprintf(stderr, "No packet!\n");
- av_packet_free(&av_packet);
- break;
- } else if (res == AVERROR_EOF) { // this is the end of the stream
- av_packet_free(&av_packet);
- fprintf(stderr, "End of stream!\n");
- break;
- } else {
- av_packet_free(&av_packet);
- fprintf(stderr, "Unexpected error: %d\n", res);
- break;
- }
- }
-}
-
-size_t gsr_video_encoder_add_recording_destination(gsr_video_encoder *encoder, AVCodecContext *codec_context, AVFormatContext *format_context, AVStream *stream, int64_t start_pts) {
- if(encoder->num_recording_destinations >= GSR_MAX_RECORDING_DESTINATIONS) {
- fprintf(stderr, "gsr error: gsr_video_encoder_add_recording_destination: failed to add destination, reached the max amount of recording destinations (%d)\n", GSR_MAX_RECORDING_DESTINATIONS);
- return (size_t)-1;
- }
-
- for(size_t i = 0; i < encoder->num_recording_destinations; ++i) {
- if(encoder->recording_destinations[i].stream == stream) {
- fprintf(stderr, "gsr error: gsr_video_encoder_add_recording_destination: failed to add destination, the stream %p already exists as an output\n", (void*)stream);
- return (size_t)-1;
- }
- }
-
- pthread_mutex_lock(&encoder->file_write_mutex);
- gsr_video_encoder_recording_destination *recording_destination = &encoder->recording_destinations[encoder->num_recording_destinations];
- recording_destination->id = encoder->recording_destination_id;
- recording_destination->codec_context = codec_context;
- recording_destination->format_context = format_context;
- recording_destination->stream = stream;
- recording_destination->start_pts = start_pts;
- recording_destination->has_received_keyframe = false;
-
- ++encoder->recording_destination_id;
- ++encoder->num_recording_destinations;
- pthread_mutex_unlock(&encoder->file_write_mutex);
-
- return recording_destination->id;
-}
-
-bool gsr_video_encoder_remove_recording_destination(gsr_video_encoder *encoder, size_t id) {
- bool found = false;
- pthread_mutex_lock(&encoder->file_write_mutex);
- for(size_t i = 0; i < encoder->num_recording_destinations; ++i) {
- if(encoder->recording_destinations[i].id == id) {
- encoder->recording_destinations[i] = encoder->recording_destinations[encoder->num_recording_destinations - 1];
- --encoder->num_recording_destinations;
- found = true;
- break;
- }
- }
- pthread_mutex_unlock(&encoder->file_write_mutex);
- return found;
-}
diff --git a/src/encoder/video/vulkan.c b/src/encoder/video/vulkan.c
index 7643ada..802934d 100644
--- a/src/encoder/video/vulkan.c
+++ b/src/encoder/video/vulkan.c
@@ -23,7 +23,7 @@ static bool gsr_video_encoder_vulkan_setup_context(gsr_video_encoder_vulkan *sel
AVDictionary *options = NULL;
//av_dict_set(&options, "linear_images", "1", 0);
//av_dict_set(&options, "disable_multiplane", "1", 0);
-
+#if 0
// TODO: Use correct device
if(av_hwdevice_ctx_create(&self->device_ctx, AV_HWDEVICE_TYPE_VULKAN, NULL, options, 0) < 0) {
fprintf(stderr, "gsr error: gsr_video_encoder_vulkan_setup_context: failed to create hardware device context\n");
@@ -57,6 +57,7 @@ static bool gsr_video_encoder_vulkan_setup_context(gsr_video_encoder_vulkan *sel
video_codec_context->hw_frames_ctx = av_buffer_ref(frame_context);
av_buffer_unref(&frame_context);
+#endif
return true;
}
@@ -99,7 +100,7 @@ static bool gsr_video_encoder_vulkan_setup_textures(gsr_video_encoder_vulkan *se
}
while(self->params.egl->glGetError()) {}
-
+#if 0
AVVkFrame *target_surface_id = (AVVkFrame*)frame->data[0];
AVVulkanDeviceContext* vv = video_codec_context_get_vulkan_data(video_codec_context);
const size_t luma_size = frame->width * frame->height;
@@ -224,7 +225,7 @@ static bool gsr_video_encoder_vulkan_setup_textures(gsr_video_encoder_vulkan *se
fprintf(stderr, "3 gl error: %d\n", self->params.egl->glGetError());
self->params.egl->glBindTexture(GL_TEXTURE_2D, 0);
}
-
+#endif
return true;
}