diff options
Diffstat (limited to 'src/egl.c')
-rw-r--r-- | src/egl.c | 46 |
1 files changed, 8 insertions, 38 deletions
@@ -9,7 +9,6 @@ #include <dlfcn.h> #include <assert.h> #include <unistd.h> -#include <sys/capability.h> // TODO: rename gsr_egl to something else since this includes both egl and glx and in the future maybe vulkan too @@ -29,43 +28,23 @@ #define GLX_DEPTH_SIZE 12 #define GLX_RGBA_TYPE 0x8014 -#define GLX_CONTEXT_PRIORITY_LEVEL_EXT 0x3100 -#define GLX_CONTEXT_PRIORITY_HIGH_EXT 0x3101 -#define GLX_CONTEXT_PRIORITY_MEDIUM_EXT 0x3102 -#define GLX_CONTEXT_PRIORITY_LOW_EXT 0x3103 - -static void reset_cap_nice(void) { - cap_t caps = cap_get_proc(); - if(!caps) - return; - - const cap_value_t cap_to_remove = CAP_SYS_NICE; - cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_to_remove, CAP_CLEAR); - cap_set_flag(caps, CAP_PERMITTED, 1, &cap_to_remove, CAP_CLEAR); - cap_set_proc(caps); - cap_free(caps); -} - // TODO: Create egl context without surface (in other words, x11/wayland agnostic, doesn't require x11/wayland dependency) static bool gsr_egl_create_window(gsr_egl *self) { EGLConfig ecfg; int32_t num_config = 0; - // TODO: Use EGL_OPENGL_ES_BIT as amd requires that for external texture, but that breaks software encoding const int32_t attr[] = { EGL_BUFFER_SIZE, 24, - EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT, EGL_NONE, EGL_NONE }; const int32_t ctxattr[] = { EGL_CONTEXT_CLIENT_VERSION, 2, - //EGL_CONTEXT_PRIORITY_LEVEL_IMG, EGL_CONTEXT_PRIORITY_HIGH_IMG, /* requires cap_sys_nice, ignored otherwise */ EGL_NONE, EGL_NONE }; - // TODO: Use EGL_OPENGL_ES_API as amd requires that for external texture, but that breaks software encoding - self->eglBindAPI(EGL_OPENGL_API); + self->eglBindAPI(EGL_OPENGL_ES_API); self->egl_display = self->eglGetDisplay((EGLNativeDisplayType)gsr_window_get_display(self->window)); if(!self->egl_display) { @@ -100,11 +79,9 @@ static bool gsr_egl_create_window(gsr_egl *self) { goto fail; } - reset_cap_nice(); return true; fail: - reset_cap_nice(); gsr_egl_unload(self); return false; } @@ -300,6 +277,7 @@ static bool gsr_egl_load_gl(gsr_egl *self, void *library) { { (void**)&self->glGetTexLevelParameteriv, "glGetTexLevelParameteriv" }, { (void**)&self->glTexImage2D, "glTexImage2D" }, { (void**)&self->glTexSubImage2D, "glTexSubImage2D" }, + { (void**)&self->glTexStorage2D, "glTexStorage2D" }, { (void**)&self->glGetTexImage, "glGetTexImage" }, { (void**)&self->glGenFramebuffers, "glGenFramebuffers" }, { (void**)&self->glBindFramebuffer, "glBindFramebuffer" }, @@ -481,9 +459,9 @@ bool gsr_egl_load(gsr_egl *self, gsr_window *window, bool is_monitor_capture, bo /* This fixes nvenc codecs unable to load on openSUSE tumbleweed because of a cuda error. Don't ask me why */ const bool inside_flatpak = getenv("FLATPAK_ID") != NULL; if(inside_flatpak) - system("flatpak-spawn --host -- nvidia-smi -f /dev/null"); + system("flatpak-spawn --host -- sh -c 'grep -q openSUSE /etc/os-release && nvidia-smi -f /dev/null'"); else - system("nvidia-smi -f /dev/null"); + system("sh -c 'grep -q openSUSE /etc/os-release && nvidia-smi -f /dev/null'"); } return true; @@ -538,15 +516,7 @@ void gsr_egl_unload(gsr_egl *self) { } void gsr_egl_swap_buffers(gsr_egl *self) { - /* This uses less cpu than swap buffer on nvidia */ - // TODO: Do these and remove swap - //self->glFlush(); - //self->glFinish(); - if(self->egl_display) { - self->eglSwapBuffers(self->egl_display, self->egl_surface); - } else if(gsr_window_get_display_server(self->window) == GSR_DISPLAY_SERVER_X11) { - Display *display = gsr_window_get_display(self->window); - const Window window = (Window)gsr_window_get_window(self->window); - self->glXSwapBuffers(display, window); - } + self->glFlush(); + // TODO: Use the minimal barrier required + self->glMemoryBarrier(GL_ALL_BARRIER_BITS); // GL_SHADER_IMAGE_ACCESS_BARRIER_BIT } |