+#ifndef _MSC_VER
#include <stdint.h>
+#endif
#include <sys/types.h>
#include "quakedef.h"
#include "cap_ogg.h"
// video capture cvars
-static cvar_t cl_capturevideo_ogg_theora_quality = {CVAR_SAVE, "cl_capturevideo_ogg_theora_quality", "32", "video quality factor (0 to 63), or -1 to use bitrate only; higher is better"};
-static cvar_t cl_capturevideo_ogg_theora_bitrate = {CVAR_SAVE, "cl_capturevideo_ogg_theora_bitrate", "-1", "video bitrate (45 to 2000 kbps), or -1 to use quality only; higher is better"};
+static cvar_t cl_capturevideo_ogg_theora_vp3compat = {CVAR_SAVE, "cl_capturevideo_ogg_theora_vp3compat", "1", "make VP3 compatible theora streams"};
+static cvar_t cl_capturevideo_ogg_theora_quality = {CVAR_SAVE, "cl_capturevideo_ogg_theora_quality", "48", "video quality factor (0 to 63), or -1 to use bitrate only; higher is better; setting both to -1 achieves unlimited quality"};
+static cvar_t cl_capturevideo_ogg_theora_bitrate = {CVAR_SAVE, "cl_capturevideo_ogg_theora_bitrate", "-1", "video bitrate (45 to 2000 kbps), or -1 to use quality only; higher is better; setting both to -1 achieves unlimited quality"};
static cvar_t cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier", "1.5", "how much more bit rate to use for keyframes, specified as a factor of at least 1"};
-static cvar_t cl_capturevideo_ogg_theora_keyframe_frequency = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_frequency", "64", "maximum number of frames between two key frames (1 to 1000)"};
-static cvar_t cl_capturevideo_ogg_theora_keyframe_mindistance = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_mindistance", "8", "minimum number of frames between two key frames (1 to 1000)"};
+static cvar_t cl_capturevideo_ogg_theora_keyframe_maxinterval = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_maxinterval", "64", "maximum keyframe interval (1 to 1000)"};
+static cvar_t cl_capturevideo_ogg_theora_keyframe_mininterval = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_mininterval", "8", "minimum keyframe interval (1 to 1000)"};
static cvar_t cl_capturevideo_ogg_theora_keyframe_auto_threshold = {CVAR_SAVE, "cl_capturevideo_ogg_theora_keyframe_auto_threshold", "80", "threshold for key frame decision (0 to 100)"};
static cvar_t cl_capturevideo_ogg_theora_noise_sensitivity = {CVAR_SAVE, "cl_capturevideo_ogg_theora_noise_sensitivity", "1", "video noise sensitivity (0 to 6); lower is better"};
static cvar_t cl_capturevideo_ogg_theora_sharpness = {CVAR_SAVE, "cl_capturevideo_ogg_theora_sharpness", "0", "sharpness (0 to 2); lower is sharper"};
-static cvar_t cl_capturevideo_ogg_vorbis_quality = {CVAR_SAVE, "cl_capturevideo_ogg_vorbis_quality", "1", "audio quality (-1 to 10); higher is better"};
+static cvar_t cl_capturevideo_ogg_vorbis_quality = {CVAR_SAVE, "cl_capturevideo_ogg_vorbis_quality", "3", "audio quality (-1 to 10); higher is better"};
// ogg.h stuff
#ifdef _MSC_VER
// end of vorbisenc.h stuff
// theora.h stuff
+
+#define TH_ENCCTL_SET_VP3_COMPATIBLE (10)
+
typedef struct {
int y_width; /**< Width of the Y' luminance plane */
int y_height; /**< Height of the luminance plane */
OC_PF_420, /**< Chroma subsampling by 2 in each direction (4:2:0) */
OC_PF_RSVD, /**< Reserved value */
OC_PF_422, /**< Horizonatal chroma subsampling by 2 (4:2:2) */
- OC_PF_444, /**< No chroma subsampling at all (4:4:4) */
+ OC_PF_444 /**< No chroma subsampling at all (4:4:4) */
} theora_pixelformat;
/**
* Theora bitstream info.
static void (*qtheora_comment_init) (theora_comment *tc);
static void (*qtheora_comment_clear) (theora_comment *tc);
static double (*qtheora_granule_time) (theora_state *th,ogg_int64_t granulepos);
+static int (*qtheora_control) (theora_state *th,int req,void *buf,size_t buf_sz);
// end of theora.h stuff
static dllfunction_t oggfuncs[] =
{"theora_encode_tables", (void **) &qtheora_encode_tables},
{"theora_clear", (void **) &qtheora_clear},
{"theora_granule_time", (void **) &qtheora_granule_time},
+ {"theora_control", (void **) &qtheora_control},
{NULL, NULL}
};
static dllhandle_t og_dll = NULL, vo_dll = NULL, ve_dll = NULL, th_dll = NULL;
-qboolean SCR_CaptureVideo_Ogg_OpenLibrary()
+qboolean SCR_CaptureVideo_Ogg_OpenLibrary(void)
{
const char* dllnames_og [] =
{
-#if defined(WIN64)
- "libogg64.dll",
-#elif defined(WIN32)
+#if defined(WIN32)
+ "libogg-0.dll",
"libogg.dll",
"ogg.dll",
#elif defined(MACOSX)
};
const char* dllnames_vo [] =
{
-#if defined(WIN64)
- "libvorbis64.dll",
-#elif defined(WIN32)
+#if defined(WIN32)
+ "libvorbis-0.dll",
"libvorbis.dll",
"vorbis.dll",
#elif defined(MACOSX)
};
const char* dllnames_ve [] =
{
-#if defined(WIN64)
- "libvorbisenc64.dll",
-#elif defined(WIN32)
+#if defined(WIN32)
+ "libvorbisenc-2.dll",
"libvorbisenc.dll",
"vorbisenc.dll",
#elif defined(MACOSX)
};
const char* dllnames_th [] =
{
-#if defined(WIN64)
- "libtheora64.dll",
-#elif defined(WIN32)
+#if defined(WIN32)
+ "libtheora-0.dll",
"libtheora.dll",
"theora.dll",
#elif defined(MACOSX)
Sys_LoadLibrary (dllnames_ve, &ve_dll, vorbisencfuncs);
}
-void SCR_CaptureVideo_Ogg_Init()
+void SCR_CaptureVideo_Ogg_Init(void)
{
SCR_CaptureVideo_Ogg_OpenLibrary();
+ Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_vp3compat);
Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_quality);
Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_bitrate);
Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier);
- Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_frequency);
- Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_mindistance);
+ Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_maxinterval);
+ Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_mininterval);
Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_keyframe_auto_threshold);
Cvar_RegisterVariable(&cl_capturevideo_ogg_theora_noise_sensitivity);
Cvar_RegisterVariable(&cl_capturevideo_ogg_vorbis_quality);
}
-qboolean SCR_CaptureVideo_Ogg_Available()
+qboolean SCR_CaptureVideo_Ogg_Available(void)
{
return og_dll && th_dll && vo_dll && ve_dll;
}
-void SCR_CaptureVideo_Ogg_CloseDLL()
+void SCR_CaptureVideo_Ogg_CloseDLL(void)
{
Sys_UnloadLibrary (&ve_dll);
Sys_UnloadLibrary (&vo_dll);
Sys_UnloadLibrary (&og_dll);
}
+// this struct should not be needed
+// however, libogg appears to pull the ogg_page's data element away from our
+// feet before we get to write the data due to interleaving
+// so this struct is used to keep the page data around until it actually gets
+// written
+typedef struct allocatedoggpage_s
+{
+ size_t len;
+ double time;
+ unsigned char data[65307];
+ // this number is from RFC 3533. In case libogg writes more, we'll have to increase this
+ // but we'll get a Host_Error in this case so we can track it down
+}
+allocatedoggpage_t;
+
typedef struct capturevideostate_ogg_formatspecific_s
{
ogg_stream_state to, vo;
vorbis_dsp_state vd;
vorbis_block vb;
vorbis_info vi;
- yuv_buffer yuv;
+ yuv_buffer yuv[2];
+ int yuvi;
+ int lastnum;
int channels;
- // for interleaving
- ogg_page videopage;
- ogg_page audiopage;
- qboolean have_videopage;
- qboolean have_audiopage;
+ allocatedoggpage_t videopage, audiopage;
}
capturevideostate_ogg_formatspecific_t;
#define LOAD_FORMATSPECIFIC_OGG() capturevideostate_ogg_formatspecific_t *format = (capturevideostate_ogg_formatspecific_t *) cls.capturevideo.formatspecific
-static void SCR_CaptureVideo_Ogg_Interleave()
+static void SCR_CaptureVideo_Ogg_Interleave(void)
{
LOAD_FORMATSPECIFIC_OGG();
-
- //fprintf(stderr, "<");
+ ogg_page pg;
if(!cls.capturevideo.soundrate)
{
- for(;;)
+ while(qogg_stream_pageout(&format->to, &pg) > 0)
{
- // first: make sure we have a page of both types
- if(!format->have_videopage)
- if(qogg_stream_pageout(&format->to, &format->videopage) > 0)
- format->have_videopage = true;
- if(format->have_videopage)
- {
- FS_Write(cls.capturevideo.videofile, format->videopage.header, format->videopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->videopage.body, format->videopage.body_len);
- format->have_videopage = false;
- }
+ FS_Write(cls.capturevideo.videofile, pg.header, pg.header_len);
+ FS_Write(cls.capturevideo.videofile, pg.body, pg.body_len);
}
return;
}
for(;;)
{
// first: make sure we have a page of both types
- if(!format->have_videopage)
- if(qogg_stream_pageout(&format->to, &format->videopage) > 0)
+ if(!format->videopage.len)
+ if(qogg_stream_pageout(&format->to, &pg) > 0)
{
- //fprintf(stderr, "V");
- format->have_videopage = true;
-
- // why do I have to do this? the code should work without the
- // following three lines, which turn this attempt at correct
- // interleaving back into the old stupid one that oggz-validate
- // hates
- FS_Write(cls.capturevideo.videofile, format->videopage.header, format->videopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->videopage.body, format->videopage.body_len);
- format->have_videopage = false;
+ format->videopage.len = pg.header_len + pg.body_len;
+ format->videopage.time = qtheora_granule_time(&format->ts, qogg_page_granulepos(&pg));
+ if(format->videopage.len > sizeof(format->videopage.data))
+ Host_Error("video page too long");
+ memcpy(format->videopage.data, pg.header, pg.header_len);
+ memcpy(format->videopage.data + pg.header_len, pg.body, pg.body_len);
}
- if(!format->have_audiopage)
- if(qogg_stream_pageout(&format->vo, &format->audiopage) > 0)
+ if(!format->audiopage.len)
+ if(qogg_stream_pageout(&format->vo, &pg) > 0)
{
- //fprintf(stderr, "A");
- format->have_audiopage = true;
-
- // why do I have to do this? the code should work without the
- // following three lines, which turn this attempt at correct
- // interleaving back into the old stupid one that oggz-validate
- // hates
- FS_Write(cls.capturevideo.videofile, format->audiopage.header, format->audiopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->audiopage.body, format->audiopage.body_len);
- format->have_audiopage = false;
+ format->audiopage.len = pg.header_len + pg.body_len;
+ format->audiopage.time = qvorbis_granule_time(&format->vd, qogg_page_granulepos(&pg));
+ if(format->audiopage.len > sizeof(format->audiopage.data))
+ Host_Error("audio page too long");
+ memcpy(format->audiopage.data, pg.header, pg.header_len);
+ memcpy(format->audiopage.data + pg.header_len, pg.body, pg.body_len);
}
- if(format->have_videopage && format->have_audiopage)
+ if(format->videopage.len && format->audiopage.len)
{
// output the page that ends first
- double audiotime = qvorbis_granule_time(&format->vd, qogg_page_granulepos(&format->audiopage));
- double videotime = qtheora_granule_time(&format->ts, qogg_page_granulepos(&format->videopage));
- if(audiotime < videotime)
+ if(format->videopage.time < format->audiopage.time)
{
- FS_Write(cls.capturevideo.videofile, format->audiopage.header, format->audiopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->audiopage.body, format->audiopage.body_len);
- format->have_audiopage = false;
-
- //fprintf(stderr, "a");
+ FS_Write(cls.capturevideo.videofile, format->videopage.data, format->videopage.len);
+ format->videopage.len = 0;
}
else
{
- FS_Write(cls.capturevideo.videofile, format->videopage.header, format->videopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->videopage.body, format->videopage.body_len);
- format->have_videopage = false;
-
- //fprintf(stderr, "v");
+ FS_Write(cls.capturevideo.videofile, format->audiopage.data, format->audiopage.len);
+ format->audiopage.len = 0;
}
}
else
break;
}
-
- //fprintf(stderr, ">");
}
-static void SCR_CaptureVideo_Ogg_FlushInterleaving()
+static void SCR_CaptureVideo_Ogg_FlushInterleaving(void)
{
LOAD_FORMATSPECIFIC_OGG();
if(cls.capturevideo.soundrate)
- if(format->have_audiopage)
+ if(format->audiopage.len)
{
- FS_Write(cls.capturevideo.videofile, format->audiopage.header, format->audiopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->audiopage.body, format->audiopage.body_len);
- format->have_audiopage = false;
+ FS_Write(cls.capturevideo.videofile, format->audiopage.data, format->audiopage.len);
+ format->audiopage.len = 0;
}
- if(format->have_videopage)
+ if(format->videopage.len)
{
- FS_Write(cls.capturevideo.videofile, format->videopage.header, format->videopage.header_len);
- FS_Write(cls.capturevideo.videofile, format->videopage.body, format->videopage.body_len);
- format->have_videopage = false;
+ FS_Write(cls.capturevideo.videofile, format->videopage.data, format->videopage.len);
+ format->videopage.len = 0;
}
}
-static void SCR_CaptureVideo_Ogg_EndVideo()
+static void SCR_CaptureVideo_Ogg_EndVideo(void)
{
LOAD_FORMATSPECIFIC_OGG();
ogg_page pg;
ogg_packet pt;
- // repeat the last frame so we can set the end-of-stream flag
- qtheora_encode_YUVin(&format->ts, &format->yuv);
- qtheora_encode_packetout(&format->ts, true, &pt);
- qogg_stream_packetin(&format->to, &pt);
- SCR_CaptureVideo_Ogg_Interleave();
+ if(format->yuvi >= 0)
+ {
+ // send the previous (and last) frame
+ while(format->lastnum-- > 0)
+ {
+ qtheora_encode_YUVin(&format->ts, &format->yuv[format->yuvi]);
+
+ while(qtheora_encode_packetout(&format->ts, !format->lastnum, &pt))
+ qogg_stream_packetin(&format->to, &pt);
+
+ SCR_CaptureVideo_Ogg_Interleave();
+ }
+ }
if(cls.capturevideo.soundrate)
{
qtheora_clear(&format->ts);
qvorbis_info_clear(&format->vi);
- Mem_Free(format->yuv.y);
- Mem_Free(format->yuv.u);
- Mem_Free(format->yuv.v);
+ Mem_Free(format->yuv[0].y);
+ Mem_Free(format->yuv[0].u);
+ Mem_Free(format->yuv[0].v);
+ Mem_Free(format->yuv[1].y);
+ Mem_Free(format->yuv[1].u);
+ Mem_Free(format->yuv[1].v);
Mem_Free(format);
FS_Close(cls.capturevideo.videofile);
cls.capturevideo.videofile = NULL;
}
-static void SCR_CaptureVideo_Ogg_ConvertFrame_BGRA_to_YUV()
+static void SCR_CaptureVideo_Ogg_ConvertFrame_BGRA_to_YUV(void)
{
LOAD_FORMATSPECIFIC_OGG();
+ yuv_buffer *yuv;
int x, y;
int blockr, blockg, blockb;
unsigned char *b = cls.capturevideo.outbuffer;
int h = cls.capturevideo.height;
int inpitch = w*4;
+ yuv = &format->yuv[format->yuvi];
+
for(y = 0; y < h; ++y)
{
for(b = cls.capturevideo.outbuffer + (h-1-y)*w*4, x = 0; x < w; ++x)
blockr = b[2];
blockg = b[1];
blockb = b[0];
- format->yuv.y[x + format->yuv.y_stride * y] =
+ yuv->y[x + yuv->y_stride * y] =
cls.capturevideo.yuvnormalizetable[0][cls.capturevideo.rgbtoyuvscaletable[0][0][blockr] + cls.capturevideo.rgbtoyuvscaletable[0][1][blockg] + cls.capturevideo.rgbtoyuvscaletable[0][2][blockb]];
b += 4;
}
- if((y & 1) == 0)
+ if ((y & 1) == 0 && y/2 < h/2) // if h is odd, this skips the last row
{
for(b = cls.capturevideo.outbuffer + (h-2-y)*w*4, x = 0; x < w/2; ++x)
{
blockr = (b[2] + b[6] + b[inpitch+2] + b[inpitch+6]) >> 2;
blockg = (b[1] + b[5] + b[inpitch+1] + b[inpitch+5]) >> 2;
blockb = (b[0] + b[4] + b[inpitch+0] + b[inpitch+4]) >> 2;
- format->yuv.u[x + format->yuv.uv_stride * (y/2)] =
+ yuv->u[x + yuv->uv_stride * (y/2)] =
cls.capturevideo.yuvnormalizetable[1][cls.capturevideo.rgbtoyuvscaletable[1][0][blockr] + cls.capturevideo.rgbtoyuvscaletable[1][1][blockg] + cls.capturevideo.rgbtoyuvscaletable[1][2][blockb] + 128];
- format->yuv.v[x + format->yuv.uv_stride * (y/2)] =
+ yuv->v[x + yuv->uv_stride * (y/2)] =
cls.capturevideo.yuvnormalizetable[2][cls.capturevideo.rgbtoyuvscaletable[2][0][blockr] + cls.capturevideo.rgbtoyuvscaletable[2][1][blockg] + cls.capturevideo.rgbtoyuvscaletable[2][2][blockb] + 128];
b += 8;
}
// data is in cls.capturevideo.outbuffer as BGRA and has size width*height
- SCR_CaptureVideo_Ogg_ConvertFrame_BGRA_to_YUV();
-
- while(num-- > 0)
+ if(format->yuvi >= 0)
{
- qtheora_encode_YUVin(&format->ts, &format->yuv);
+ // send the previous frame
+ while(format->lastnum-- > 0)
+ {
+ qtheora_encode_YUVin(&format->ts, &format->yuv[format->yuvi]);
- while(qtheora_encode_packetout(&format->ts, false, &pt))
- qogg_stream_packetin(&format->to, &pt);
+ while(qtheora_encode_packetout(&format->ts, false, &pt))
+ qogg_stream_packetin(&format->to, &pt);
- SCR_CaptureVideo_Ogg_Interleave();
+ SCR_CaptureVideo_Ogg_Interleave();
+ }
}
+
+ format->yuvi = (format->yuvi + 1) % 2;
+ SCR_CaptureVideo_Ogg_ConvertFrame_BGRA_to_YUV();
+ format->lastnum = num;
+
+ // TODO maybe send num-1 frames from here already
}
+typedef int channelmapping_t[8];
+channelmapping_t mapping[8] =
+{
+ { 0, -1, -1, -1, -1, -1, -1, -1 }, // mono
+ { 0, 1, -1, -1, -1, -1, -1, -1 }, // stereo
+ { 0, 1, 2, -1, -1, -1, -1, -1 }, // L C R
+ { 0, 1, 2, 3, -1, -1, -1, -1 }, // surround40
+ { 0, 2, 3, 4, 1, -1, -1, -1 }, // FL FC FR RL RR
+ { 0, 2, 3, 4, 1, 5, -1, -1 }, // surround51
+ { 0, 2, 3, 4, 1, 5, 6, -1 }, // (not defined by vorbis spec)
+ { 0, 2, 3, 4, 1, 5, 6, 7 } // surround71 (not defined by vorbis spec)
+};
+
static void SCR_CaptureVideo_Ogg_SoundFrame(const portable_sampleframe_t *paintbuffer, size_t length)
{
LOAD_FORMATSPECIFIC_OGG();
size_t i;
int j;
ogg_packet pt;
+ int *map = mapping[bound(1, cls.capturevideo.soundchannels, 8) - 1];
vorbis_buffer = qvorbis_analysis_buffer(&format->vd, length);
- for(i = 0; i < length; ++i)
+ for(j = 0; j < cls.capturevideo.soundchannels; ++j)
{
- for(j = 0; j < cls.capturevideo.soundchannels; ++j)
- vorbis_buffer[j][i] = paintbuffer[i].sample[j] / 32768.0f;
+ float *b = vorbis_buffer[map[j]];
+ for(i = 0; i < length; ++i)
+ b[i] = paintbuffer[i].sample[j];
}
qvorbis_analysis_wrote(&format->vd, length);
SCR_CaptureVideo_Ogg_Interleave();
}
-void SCR_CaptureVideo_Ogg_BeginVideo()
+void SCR_CaptureVideo_Ogg_BeginVideo(void)
{
cls.capturevideo.format = CAPTUREVIDEOFORMAT_OGG_VORBIS_THEORA;
- cls.capturevideo.videofile = FS_OpenRealFile(va("%s.ogv", cls.capturevideo.basename), "wb", false);
+ cls.capturevideo.formatextension = "ogv";
+ cls.capturevideo.videofile = FS_OpenRealFile(va("%s.%s", cls.capturevideo.basename, cls.capturevideo.formatextension), "wb", false);
cls.capturevideo.endvideo = SCR_CaptureVideo_Ogg_EndVideo;
cls.capturevideo.videoframes = SCR_CaptureVideo_Ogg_VideoFrames;
cls.capturevideo.soundframe = SCR_CaptureVideo_Ogg_SoundFrame;
cls.capturevideo.formatspecific = Mem_Alloc(tempmempool, sizeof(capturevideostate_ogg_formatspecific_t));
{
LOAD_FORMATSPECIFIC_OGG();
- int num, denom;
+ int num, denom, i;
ogg_page pg;
ogg_packet pt, pt2, pt3;
theora_comment tc;
vorbis_comment vc;
theora_info ti;
+ int vp3compat;
format->serial1 = rand();
qogg_stream_init(&format->to, format->serial1);
qogg_stream_init(&format->vo, format->serial2);
}
- format->have_videopage = format->have_audiopage = false;
+ format->videopage.len = format->audiopage.len = 0;
qtheora_info_init(&ti);
ti.frame_width = cls.capturevideo.width;
//ti.offset_x = ((ti.width - ti.frame_width) / 2) & ~1;
//ti.offset_y = ((ti.height - ti.frame_height) / 2) & ~1;
- format->yuv.y_width = ti.width;
- format->yuv.y_height = ti.height;
- format->yuv.y_stride = ti.width;
-
- format->yuv.uv_width = ti.width / 2;
- format->yuv.uv_height = ti.height / 2;
- format->yuv.uv_stride = ti.width / 2;
-
- format->yuv.y = Mem_Alloc(tempmempool, format->yuv.y_stride * format->yuv.y_height);
- format->yuv.u = Mem_Alloc(tempmempool, format->yuv.uv_stride * format->yuv.uv_height);
- format->yuv.v = Mem_Alloc(tempmempool, format->yuv.uv_stride * format->yuv.uv_height);
+ for(i = 0; i < 2; ++i)
+ {
+ format->yuv[i].y_width = ti.width;
+ format->yuv[i].y_height = ti.height;
+ format->yuv[i].y_stride = ti.width;
+ format->yuv[i].uv_width = ti.width / 2;
+ format->yuv[i].uv_height = ti.height / 2;
+ format->yuv[i].uv_stride = ti.width / 2;
+ format->yuv[i].y = (unsigned char *) Mem_Alloc(tempmempool, format->yuv[i].y_stride * format->yuv[i].y_height);
+ format->yuv[i].u = (unsigned char *) Mem_Alloc(tempmempool, format->yuv[i].uv_stride * format->yuv[i].uv_height);
+ format->yuv[i].v = (unsigned char *) Mem_Alloc(tempmempool, format->yuv[i].uv_stride * format->yuv[i].uv_height);
+ }
+ format->yuvi = -1; // -1: no frame valid yet, write into 0
- FindFraction(cls.capturevideo.framerate, &num, &denom, 1001);
+ FindFraction(cls.capturevideo.framerate / cls.capturevideo.framestep, &num, &denom, 1001);
ti.fps_numerator = num;
ti.fps_denominator = denom;
if(ti.target_bitrate <= 0)
{
- if(ti.quality < 0)
- {
- ti.target_bitrate = -1;
- ti.keyframe_data_target_bitrate = -1;
- ti.quality = 63;
- }
- else
- {
- ti.target_bitrate = -1;
- ti.keyframe_data_target_bitrate = -1;
- ti.quality = bound(0, ti.quality, 63);
- }
+ ti.target_bitrate = -1;
+ ti.keyframe_data_target_bitrate = (unsigned int)-1;
}
else
{
- if(ti.quality < 0)
- {
- ti.target_bitrate = bound(45000, ti.target_bitrate, 2000000);
- ti.keyframe_data_target_bitrate = ti.target_bitrate * max(1, cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier.value);
- ti.quality = -1;
- }
- else
+ ti.keyframe_data_target_bitrate = (int) (ti.target_bitrate * max(1, cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier.value));
+
+ if(ti.target_bitrate < 45000 || ti.target_bitrate > 2000000)
+ Con_DPrintf("WARNING: requesting an odd bitrate for theora (sensible values range from 45 to 2000 kbps)\n");
+ }
+
+ if(ti.quality < 0 || ti.quality > 63)
+ {
+ ti.quality = 63;
+ if(ti.target_bitrate <= 0)
{
- ti.target_bitrate = bound(45000, ti.target_bitrate, 2000000);
- ti.keyframe_data_target_bitrate = ti.target_bitrate * max(1, cl_capturevideo_ogg_theora_keyframe_bitrate_multiplier.value);
- ti.quality = -1;
+ ti.target_bitrate = 0x7FFFFFFF;
+ ti.keyframe_data_target_bitrate = 0x7FFFFFFF;
}
}
- ti.keyframe_frequency = bound(1, cl_capturevideo_ogg_theora_keyframe_frequency.integer, 1000);
- ti.keyframe_mindistance = bound(1, cl_capturevideo_ogg_theora_keyframe_mindistance.integer, (int) ti.keyframe_frequency);
+ // this -1 magic is because ti.keyframe_frequency and ti.keyframe_mindistance use different metrics
+ ti.keyframe_frequency = bound(1, cl_capturevideo_ogg_theora_keyframe_maxinterval.integer, 1000);
+ ti.keyframe_mindistance = bound(1, cl_capturevideo_ogg_theora_keyframe_mininterval.integer, (int) ti.keyframe_frequency) - 1;
ti.noise_sensitivity = bound(0, cl_capturevideo_ogg_theora_noise_sensitivity.integer, 6);
ti.sharpness = bound(0, cl_capturevideo_ogg_theora_sharpness.integer, 2);
ti.keyframe_auto_threshold = bound(0, cl_capturevideo_ogg_theora_keyframe_auto_threshold.integer, 100);
ti.keyframe_frequency_force = ti.keyframe_frequency;
- ti.keyframe_auto_p = (ti.keyframe_frequency != ti.keyframe_mindistance);
+ ti.keyframe_auto_p = (ti.keyframe_frequency != ti.keyframe_mindistance + 1);
qtheora_encode_init(&format->ts, &ti);
qtheora_info_clear(&ti);
+ if(cl_capturevideo_ogg_theora_vp3compat.integer)
+ {
+ vp3compat = 1;
+ qtheora_control(&format->ts, TH_ENCCTL_SET_VP3_COMPATIBLE, &vp3compat, sizeof(vp3compat));
+ if(!vp3compat)
+ Con_DPrintf("Warning: theora stream is not fully VP3 compatible\n");
+ }
+
// vorbis?
if(cls.capturevideo.soundrate)
{