[xiph-cvs] cvs commit: theora/lib compglobals.c dct_encode.c encode.c encoder_internal.h encoder_lookup.h frarray.c mcomp.c mcomp.h misc_common.c toplevel.c
Monty
xiphmont at xiph.org
Mon Sep 16 00:10:03 PDT 2002
xiphmont 02/09/16 03:10:02
Added: lib compglobals.c dct_encode.c encode.c
encoder_internal.h encoder_lookup.h frarray.c
mcomp.c mcomp.h misc_common.c toplevel.c
Log:
None of this runs or builds; this commit is to avoid losing work
Revision Changes Path
1.1 theora/lib/compglobals.c
Index: compglobals.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: compglobals.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
#include "ogg/ogg.h"
#include "encoder_internal.h"
/* the Roundup32 silliness is dangerous on non-Intel processors and
will also choke some C compilers. Find a non dangerous way later.
Disabled for now.
#define ROUNDUP32(X) ( ( ( (unsigned long) X ) + 31 )&( 0xFFFFFFE0 ) ) */
void EDeleteFragmentInfo(CP_INSTANCE * cpi){
if(cpi->extra_fragmentsAlloc)
_ogg_free(cpi->extra_fragmentsAlloc);
if(cpi->FragmentLastQAlloc)
_ogg_free(cpi->FragmentLastQAlloc);
if(cpi->FragTokensAlloc)
_ogg_free(cpi->FragTokensAlloc);
if(cpi->FragTokenCountsAlloc)
_ogg_free(cpi->FragTokenCountsAlloc);
if(cpi->RunHuffIndicesAlloc)
_ogg_free(cpi->RunHuffIndicesAlloc);
if(cpi->LastCodedErrorScoreAlloc)
_ogg_free(cpi->LastCodedErrorScoreAlloc);
if(cpi->ModeListAlloc)
_ogg_free(cpi->ModeListAlloc);
if(cpi->MVListAlloc)
_ogg_free(cpi->MVListAlloc);
if(cpi->DCT_codesAlloc )
_ogg_free( cpi->DCT_codesAlloc );
if(cpi->DCTDataBufferAlloc )
_ogg_free( cpi->DCTDataBufferAlloc);
if(cpi->quantized_listAlloc)
_ogg_free( cpi->quantized_listAlloc);
if(cpi->OriginalDCAlloc)
_ogg_free( cpi->OriginalDCAlloc);
if(cpi->PartiallyCodedFlags)
_ogg_free(cpi->PartiallyCodedFlags);
if(cpi->PartiallyCodedMbPatterns)
_ogg_free(cpi->PartiallyCodedMbPatterns);
if(cpi->UncodedMbFlags)
_ogg_free(cpi->UncodedMbFlags);
if(cpi->BlockCodedFlagsAlloc)
_ogg_free(cpi->BlockCodedFlagsAlloc);
cpi->extra_fragmentsAlloc = 0;
cpi->FragmentLastQAlloc = 0;
cpi->FragTokensAlloc = 0;
cpi->FragTokenCountsAlloc = 0;
cpi->RunHuffIndicesAlloc = 0;
cpi->LastCodedErrorScoreAlloc = 0;
cpi->ModeListAlloc = 0;
cpi->MVListAlloc = 0;
cpi->DCT_codesAlloc = 0;
cpi->DCTDataBufferAlloc = 0;
cpi->quantized_listAlloc = 0;
cpi->OriginalDCAlloc = 0;
cpi->extra_fragments = 0;
cpi->FragmentLastQ = 0;
cpi->FragTokens = 0;
cpi->FragTokenCounts = 0;
cpi->RunHuffIndices = 0;
cpi->LastCodedErrorScore = 0;
cpi->ModeList = 0;
cpi->MVList = 0;
cpi->DCT_codes = 0;
cpi->DCTDataBuffer = 0;
cpi->quantized_list = 0;
cpi->OriginalDC = 0;
cpi->FixedQ = 0;
cpi->BlockCodedFlagsAlloc = 0;
cpi->BlockCodedFlags = 0;
}
void EAllocateFragmentInfo(CP_INSTANCE * cpi){
/* clear any existing info */
EDeleteFragmentInfo(cpi);
/* Perform Fragment Allocations */
cpi->extra_fragments =
_ogg_malloc(32+cpi->pb.UnitFragments*sizeof(unsigned char));
/* A note to people reading and wondering why malloc returns aren't
checked:
lines like the following that implement a general strategy of
'check the return of malloc; a zero pointer means we're out of
memory!'...:
if(!cpi->extra_fragmentsAlloc) { EDeleteFragmentInfo(cpi); return FALSE; }
...are not useful. It's true that many platforms follow this
malloc behavior, but many do not. The more modern malloc
strategy is only to allocate virtual pages, which are not mapped
until the memory on that page is touched. At *that* point, if
the machine is out of heap, the page fails to be mapped and a
SEGV is generated.
That means that is we want to deal with out of memory conditions,
we *must* be prepared to process a SEGV. If we implement the
SEGV handler, there's no reason to to check malloc return; it is
a waste of code. */
cpi->FragmentLastQ =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->FragmentLastQAlloc));
cpi->FragTokens =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->FragTokensAlloc));
cpi->OriginalDC =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->OriginalDCAlloc));
cpi->FragTokenCounts =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->FragTokenCountsAlloc));
cpi->RunHuffIndices =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->RunHuffIndicesAlloc));
cpi->LastCodedErrorScore =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->LastCodedErrorScoreAlloc));
cpi->BlockCodedFlags =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->BlockCodedFlagsAlloc));
cpi->ModeList =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(*cpi->ModeListAlloc));
cpi->MVList =
_ogg_malloc(cpi->pb.UnitFragments*
sizeof(cpi->MVListAlloc));
cpi->DCT_codes =
_ogg_malloc(64*
sizeof(*cpi->DCT_codesAlloc));
cpi->DCTDataBuffer =
_ogg_malloc(64*
sizeof(*cpi->DCTDataBufferAlloc));
cpi->quantized_list =
_ogg_malloc(64*
sizeof(*cpi->quantized_listAlloc));
cpi->PartiallyCodedFlags =
_ogg_malloc(cpi->pb.MacroBlocks*
sizeof(*cpi->PartiallyCodedFlags));
cpi->PartiallyCodedMbPatterns =
_ogg_malloc(cpi->pb.MacroBlocks*
sizeof(*cpi->PartiallyCodedMbPatterns));
cpi->UncodedMbFlags =
_ogg_malloc(cpi->pb.MacroBlocks*
sizeof(*cpi->UncodedMbFlags));
}
void EDeleteFrameInfo(CP_INSTANCE * cpi) {
if(cpi->ConvDestBufferAlloc )
_ogg_free(cpi->ConvDestBufferAlloc );
cpi->ConvDestBufferAlloc = 0;
cpi->ConvDestBuffer = 0;
if(cpi->yuv0ptrAlloc)
_ogg_free(cpi->yuv0ptrAlloc);
cpi->yuv0ptrAlloc = 0;
cpi->yuv0ptr = 0;
if(cpi->yuv1ptrAlloc)
_ogg_free(cpi->yuv1ptrAlloc);
cpi->yuv1ptrAlloc = 0;
cpi->yuv1ptr = 0;
if(cpi->OptimisedTokenListEbAlloc )
_ogg_free(cpi->OptimisedTokenListEbAlloc);
cpi->OptimisedTokenListEbAlloc = 0;
cpi->OptimisedTokenListEb = 0;
if(cpi->OptimisedTokenListAlloc )
_ogg_free(cpi->OptimisedTokenListAlloc);
cpi->OptimisedTokenListAlloc = 0;
cpi->OptimisedTokenList = 0;
if(cpi->OptimisedTokenListHiAlloc )
_ogg_free(cpi->OptimisedTokenListHiAlloc);
cpi->OptimisedTokenListHiAlloc = 0;
cpi->OptimisedTokenListHi = 0;
if(cpi->OptimisedTokenListPlAlloc )
_ogg_free(cpi->OptimisedTokenListPlAlloc);
cpi->OptimisedTokenListPlAlloc = 0;
cpi->OptimisedTokenListPl = 0;
}
void EAllocateFrameInfo(CP_INSTANCE * cpi){
int FrameSize = cpi->pb.ReconYPlaneSize + 2 * cpi->pb.ReconUVPlaneSize;
/* clear any existing info */
EDeleteFrameInfo(cpi);
/* allocate frames */
cpi->ConvDestBuffer =
_ogg_malloc(FrameSize*
sizeof(*cpi->ConvDestBuffer));
cpi->yuv0ptr =
_ogg_malloc(FrameSize*
sizeof(*cpi->yuv0ptr));
cpi->yuv1ptr =
_ogg_malloc(FrameSize*
sizeof(*cpi->yuv1ptr));
cpi->OptimisedTokenListEb =
_ogg_malloc(FrameSize*
sizeof(*cpi->OptimisedTokenListEb));
cpi->OptimisedTokenList =
_ogg_malloc(FrameSize*
sizeof(*cpi->OptimisedTokenList));
cpi->OptimisedTokenListHi =
_ogg_malloc(FrameSize*
sizeof(*cpi->OptimisedTokenListHi));
cpi->OptimisedTokenListPl =
_ogg_malloc(FrameSize*
sizeof(*cpi->OptimisedTokenListPl));
}
void ClearCPInstance(CP_INSTANCE *cpi){
if(cpi){
DeleteTmpBuffers(cpi->pb);
DeletePPInstance(cpi->pp);
}
}
void DeleteCPInstance(CP_INSTANCE *cpi){
if(cpi){
ClearCPInstance(cpi);
_ogg_free(cpi);
}
}
void InitCPInstance(CP_INSTANCE *cpi){
ogg_uint32_t i;
memset((unsigned char *) cpi, 0, sizeof(*cpi));
AllocateTmpBuffers(&cpi->pb);
cpi->pp = CreatePPInstance();
/* Initialise Configuration structure to legal values */
cpi->Configuration.BaseQ = 32;
cpi->Configuration.FirstFrameQ = 32;
cpi->Configuration.MaxQ = 32;
cpi->Configuration.ActiveMaxQ = 32;
cpi->Configuration.OutputFrameRate = 30;
cpi->Configuration.TargetBandwidth = 3000;
cpi->MVChangeFactor = 14;
cpi->FourMvChangeFactor = 8;
cpi->MinImprovementForNewMV = 25;
cpi->ExhaustiveSearchThresh = 2500;
cpi->MinImprovementForFourMV = 100;
cpi->FourMVThreshold = 10000;
cpi->BitRateCapFactor = 1.50;
cpi->InterTripOutThresh = 5000;
cpi->MVEnabled = TRUE;
cpi->InterCodeCount = 127;
cpi->BpbCorrectionFactor = 1.0;
cpi->GoldenFrameEnabled = TRUE;
cpi->InterPrediction = TRUE;
cpi->MotionCompensation = TRUE;
cpi->ThreshMapThreshold = 5;
cpi->QuickCompress = TRUE;
cpi->MaxConsDroppedFrames = 1;
cpi->Sharpness = 2;
cpi->PreProcFilterLevel = 2;
/* Set up default values for QTargetModifier[Q_TABLE_SIZE] table */
for ( i = 0; i < Q_TABLE_SIZE; i++ )
cpi->QTargetModifier[Q_TABLE_SIZE] = 1.0;
}
<p><p>1.1 theora/lib/dct_encode.c
Index: dct_encode.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: dct_encode.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
void SUB8 (unsigned char *FiltPtr, unsigned char *ReconPtr,
ogg_int16_t *DctInputPtr, unsigned char *old_ptr1,
unsigned char *new_ptr1, ogg_uint32_t PixelsPerLine,
ogg_uint32_t ReconPixelsPerLine ) {
int i;
/* For each block row */
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ){
DctInputPtr[0] = (ogg_int16_t)((int)(FiltPtr[0]) - ((int)ReconPtr[0]) );
DctInputPtr[1] = (ogg_int16_t)((int)(FiltPtr[1]) - ((int)ReconPtr[1]) );
DctInputPtr[2] = (ogg_int16_t)((int)(FiltPtr[2]) - ((int)ReconPtr[2]) );
DctInputPtr[3] = (ogg_int16_t)((int)(FiltPtr[3]) - ((int)ReconPtr[3]) );
DctInputPtr[4] = (ogg_int16_t)((int)(FiltPtr[4]) - ((int)ReconPtr[4]) );
DctInputPtr[5] = (ogg_int16_t)((int)(FiltPtr[5]) - ((int)ReconPtr[5]) );
DctInputPtr[6] = (ogg_int16_t)((int)(FiltPtr[6]) - ((int)ReconPtr[6]) );
DctInputPtr[7] = (ogg_int16_t)((int)(FiltPtr[7]) - ((int)ReconPtr[7]) );
/* Update the screen canvas in one step*/
((ogg_uint32_t*)old_ptr1)[0] = ((ogg_uint32_t*)new_ptr1)[0];
((ogg_uint32_t*)old_ptr1)[1] = ((ogg_uint32_t*)new_ptr1)[1];
/* Start next row */
new_ptr1 += PixelsPerLine;
old_ptr1 += PixelsPerLine;
FiltPtr += PixelsPerLine;
ReconPtr += ReconPixelsPerLine;
DctInputPtr += BLOCK_HEIGHT_WIDTH;
}
}
void SUB8_128 (unsigned char *FiltPtr, ogg_int16_t *DctInputPtr,
unsigned char *old_ptr1, unsigned char *new_ptr1,
ogg_uint32_t PixelsPerLine ) {
int i;
/* For each block row */
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ){
/* INTRA mode so code raw image data */
/* We convert the data to 8 bit signed (by subtracting 128) as
this reduces the internal precision requirments in the DCT
transform. */
DctInputPtr[0] = (ogg_int16_t)((int)(FiltPtr[0]) - 128);
DctInputPtr[1] = (ogg_int16_t)((int)(FiltPtr[1]) - 128);
DctInputPtr[2] = (ogg_int16_t)((int)(FiltPtr[2]) - 128);
DctInputPtr[3] = (ogg_int16_t)((int)(FiltPtr[3]) - 128);
DctInputPtr[4] = (ogg_int16_t)((int)(FiltPtr[4]) - 128);
DctInputPtr[5] = (ogg_int16_t)((int)(FiltPtr[5]) - 128);
DctInputPtr[6] = (ogg_int16_t)((int)(FiltPtr[6]) - 128);
DctInputPtr[7] = (ogg_int16_t)((int)(FiltPtr[7]) - 128);
/* Update the screen canvas in one step */
((ogg_uint32_t*)old_ptr1)[0] = ((ogg_uint32_t*)new_ptr1)[0];
((ogg_uint32_t*)old_ptr1)[1] = ((ogg_uint32_t*)new_ptr1)[1];
/* Start next row */
new_ptr1 += PixelsPerLine;
old_ptr1 += PixelsPerLine;
FiltPtr += PixelsPerLine;
DctInputPtr += BLOCK_HEIGHT_WIDTH;
}
}
void SUB8AV2 (unsigned char *FiltPtr, unsigned char *ReconPtr1,
unsigned char *ReconPtr2, ogg_int16_t *DctInputPtr,
unsigned char *old_ptr1, unsigned char *new_ptr1,
ogg_uint32_t PixelsPerLine, ogg_uint32_t ReconPixelsPerLine ) {
int i;
/* For each block row */
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ) {
DctInputPtr[0] = (ogg_int16_t)
((int)(FiltPtr[0]) - (((int)ReconPtr1[0] + (int)ReconPtr2[0]) / 2) );
DctInputPtr[1] = (ogg_int16_t)
((int)(FiltPtr[1]) - (((int)ReconPtr1[1] + (int)ReconPtr2[1]) / 2) );
DctInputPtr[2] = (ogg_int16_t)
((int)(FiltPtr[2]) - (((int)ReconPtr1[2] + (int)ReconPtr2[2]) / 2) );
DctInputPtr[3] = (ogg_int16_t)
((int)(FiltPtr[3]) - (((int)ReconPtr1[3] + (int)ReconPtr2[3]) / 2) );
DctInputPtr[4] = (ogg_int16_t)
((int)(FiltPtr[4]) - (((int)ReconPtr1[4] + (int)ReconPtr2[4]) / 2) );
DctInputPtr[5] = (ogg_int16_t)
((int)(FiltPtr[5]) - (((int)ReconPtr1[5] + (int)ReconPtr2[5]) / 2) );
DctInputPtr[6] = (ogg_int16_t)
((int)(FiltPtr[6]) - (((int)ReconPtr1[6] + (int)ReconPtr2[6]) / 2) );
DctInputPtr[7] = (ogg_int16_t)
((int)(FiltPtr[7]) - (((int)ReconPtr1[7] + (int)ReconPtr2[7]) / 2) );
/* Update the screen canvas in one step */
((ogg_uint32_t*)old_ptr1)[0] = ((ogg_uint32_t*)new_ptr1)[0];
((ogg_uint32_t*)old_ptr1)[1] = ((ogg_uint32_t*)new_ptr1)[1];
/* Start next row */
new_ptr1 += PixelsPerLine;
old_ptr1 += PixelsPerLine;
FiltPtr += PixelsPerLine;
ReconPtr1 += ReconPixelsPerLine;
ReconPtr2 += ReconPixelsPerLine;
DctInputPtr += BLOCK_HEIGHT_WIDTH;
}
}
ogg_uint32_t DPCMTokenizeBlock (CP_INSTANCE *cpi, ogg_int32_t FragIndex,
ogg_uint32_t PixelsPerLine ) {
ogg_uint32_t token_count;
Q_LIST_ENTRY TempLastDC = 0;
if ( GetFrameType(&cpi->pb) == BASE_FRAME ){
/* Key frame so code block in INTRA mode. */
cpi->pb.CodingMode = CODE_INTRA;
}else{
/* Get Motion vector and mode for this block. */
cpi->pb.CodingMode = cpi->pb.FragCodingMethod[FragIndex];
}
/* Tokenise the dct data. */
token_count = TokenizeDctBlock( cpi->pb.QFragData[FragIndex],
cpi->pb.TokenList[FragIndex] );
cpi->FragTokenCounts[FragIndex] = token_count;
cpi->TotTokenCount += token_count;
/* Return number of pixels coded (i.e. 8x8). */
return BLOCK_SIZE;
}
int AllZeroDctData( Q_LIST_ENTRY * QuantList ){
ogg_uint32_t i;
for ( i = 0; i < 64; i ++ )
if ( QuantList[i] != 0 )
return 0;
return 1;
}
unsigned char TokenizeDctBlock (ogg_int16_t * RawData,
ogg_uint32_t * TokenListPtr ) {
ogg_uint32_t i;
unsigned char run_count;
unsigned char token_count = 0; /* Number of tokens crated. */
ogg_uint32_t AbsData;
/* Tokenize the block */
for( i = 0; i < BLOCK_SIZE; i++ ){
run_count = 0;
/* Look for a zero run. */
/* NOTE the use of & instead of && which is faster (and
equivalent) in this instance. */
while( (i < BLOCK_SIZE) & (!RawData[i]) ){
run_count++;
i++;
}
/* If we have reached the end of the block then code EOB */
if ( i == BLOCK_SIZE ){
TokenListPtr[token_count] = DCT_EOB_TOKEN;
token_count++;
}else{
/* If we have a short zero run followed by a low data value code
the two as a composite token. */
if ( run_count ){
AbsData = abs(RawData[i]);
if ( ((AbsData == 1) && (run_count <= 17)) ||
((AbsData <= 3) && (run_count <= 3)) ) {
/* Tokenise the run and subsequent value combination value */
token_count += TokenizeDctRunValue( run_count,
RawData[i],
&TokenListPtr[token_count] );
}else{
/* Else if we have a long non-EOB run or a run followed by a
value token > MAX_RUN_VAL then code the run and token
seperately */
if ( run_count <= 8 )
TokenListPtr[token_count] = DCT_SHORT_ZRL_TOKEN;
else
TokenListPtr[token_count] = DCT_ZRL_TOKEN;
token_count++;
TokenListPtr[token_count] = run_count - 1;
token_count++;
/* Now tokenize the value */
token_count += TokenizeDctValue( RawData[i],
&TokenListPtr[token_count] );
}
}else{
/* Else there was NO zero run. */
/* Tokenise the value */
token_count += TokenizeDctValue( RawData[i],
&TokenListPtr[token_count] );
}
}
}
/* Return the total number of tokens (including additional bits
tokens) used. */
return token_count;
}
unsigned char TokenizeDctValue (ogg_int16_t DataValue,
ogg_uint32_t * TokenListPtr ){
unsigned char tokens_added = 0;
ogg_uint32_t AbsDataVal = abs( (ogg_int32_t)DataValue );
/* Values are tokenised as category value and a number of additional
bits that define the position within the category. */
if ( DataValue == 0 ) {
IssueWarning( "Bad Input to TokenizeDctValue" );
} else if ( AbsDataVal == 1 ){
if ( DataValue == 1 )
TokenListPtr[0] = ONE_TOKEN;
else
TokenListPtr[0] = MINUS_ONE_TOKEN;
tokens_added = 1;
} else if ( AbsDataVal == 2 ) {
if ( DataValue == 2 )
TokenListPtr[0] = TWO_TOKEN;
else
TokenListPtr[0] = MINUS_TWO_TOKEN;
tokens_added = 1;
} else if ( AbsDataVal <= MAX_SINGLE_TOKEN_VALUE ) {
TokenListPtr[0] = LOW_VAL_TOKENS + (AbsDataVal - DCT_VAL_CAT2_MIN);
if ( DataValue > 0 )
TokenListPtr[1] = 0;
else
TokenListPtr[1] = 1;
tokens_added = 2;
} else if ( AbsDataVal <= 8 ) {
/* Bit 1 determines sign, Bit 0 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY3;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT3_MIN);
else
TokenListPtr[1] = (0x02) + (AbsDataVal - DCT_VAL_CAT3_MIN);
tokens_added = 2;
} else if ( AbsDataVal <= 12 ) {
/* Bit 2 determines sign, Bit 0-2 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY4;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT4_MIN);
else
TokenListPtr[1] = (0x04) + (AbsDataVal - DCT_VAL_CAT4_MIN);
tokens_added = 2;
} else if ( AbsDataVal <= 20 ) {
/* Bit 3 determines sign, Bit 0-2 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY5;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT5_MIN);
else
TokenListPtr[1] = (0x08) + (AbsDataVal - DCT_VAL_CAT5_MIN);
tokens_added = 2;
} else if ( AbsDataVal <= 36 ) {
/* Bit 4 determines sign, Bit 0-3 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY6;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT6_MIN);
else
TokenListPtr[1] = (0x010) + (AbsDataVal - DCT_VAL_CAT6_MIN);
tokens_added = 2;
} else if ( AbsDataVal <= 68 ) {
/* Bit 5 determines sign, Bit 0-4 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY7;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT7_MIN);
else
TokenListPtr[1] = (0x20) + (AbsDataVal - DCT_VAL_CAT7_MIN);
tokens_added = 2;
} else if ( AbsDataVal <= 511 ) {
/* Bit 9 determines sign, Bit 0-8 the value */
TokenListPtr[0] = DCT_VAL_CATEGORY8;
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - DCT_VAL_CAT8_MIN);
else
TokenListPtr[1] = (0x200) + (AbsDataVal - DCT_VAL_CAT8_MIN);
tokens_added = 2;
} else {
TokenListPtr[0] = DCT_VAL_CATEGORY8;
if ( DataValue > 0 )
TokenListPtr[1] = (511 - DCT_VAL_CAT8_MIN);
else
TokenListPtr[1] = (0x200) + (511 - DCT_VAL_CAT8_MIN);
tokens_added = 2;
tokens_added = 2; /* ERROR */
IssueWarning( "Bad Input to TokenizeDctValue" );
}
/* Return the total number of tokens added */
return tokens_added;
}
unsigned char TokenizeDctRunValue (unsigned char RunLength,
ogg_int16_t DataValue,
ogg_uint32_t * TokenListPtr ){
unsigned char tokens_added = 0;
ogg_uint32_t AbsDataVal = abs( (ogg_int32_t)DataValue );
/* Values are tokenised as category value and a number of additional
bits that define the category. */
if ( DataValue == 0 ) {
IssueWarning( "Bad Input to TokenizeDctRunValue" );
} else if ( AbsDataVal == 1 ) {
/* Zero runs of 1-5 */
if ( RunLength <= 5 ) {
TokenListPtr[0] = DCT_RUN_CATEGORY1 + (RunLength - 1);
if ( DataValue > 0 )
TokenListPtr[1] = 0;
else
TokenListPtr[1] = 1;
} else if ( RunLength <= 9 ) {
/* Zero runs of 6-9 */
TokenListPtr[0] = DCT_RUN_CATEGORY1B;
if ( DataValue > 0 )
TokenListPtr[1] = (RunLength - 6);
else
TokenListPtr[1] = 0x04 + (RunLength - 6);
} else {
/* Zero runs of 10-17 */
TokenListPtr[0] = DCT_RUN_CATEGORY1C;
if ( DataValue > 0 )
TokenListPtr[1] = (RunLength - 10);
else
TokenListPtr[1] = 0x08 + (RunLength - 10);
}
tokens_added = 2;
} else if ( AbsDataVal <= 3 ) {
if ( RunLength == 1 ) {
TokenListPtr[0] = DCT_RUN_CATEGORY2;
/* Extra bits token bit 1 indicates sign, bit 0 indicates value */
if ( DataValue > 0 )
TokenListPtr[1] = (AbsDataVal - 2);
else
TokenListPtr[1] = (0x02) + (AbsDataVal - 2);
tokens_added = 2;
}else{
TokenListPtr[0] = DCT_RUN_CATEGORY2 + 1;
/* Extra bits token. */
/* bit 2 indicates sign, bit 1 indicates value, bit 0 indicates
run length */
if ( DataValue > 0 )
TokenListPtr[1] = ((AbsDataVal - 2) << 1) + (RunLength - 2);
else
TokenListPtr[1] = (0x04) + ((AbsDataVal - 2) << 1) + (RunLength - 2);
tokens_added = 2;
}
} else {
tokens_added = 2; /* ERROR */
IssueWarning( "Bad Input to TokenizeDctRunValue" );
}
/* Return the total number of tokens added */
return tokens_added;
}
void MotionBlockDifference (CP_INSTANCE * cpi, unsigned char * FiltPtr,
ogg_int16_t *DctInputPtr, ogg_int32_t MvDevisor,
unsigned char* old_ptr1, unsigned char* new_ptr1,
ogg_uint32_t FragIndex,ogg_uint32_t PixelsPerLine,
ogg_uint32_t ReconPixelsPerLine) {
ogg_int32_t MvShift;
ogg_int32_t MvModMask;
ogg_uint32_t ReconPixelIndex = GetFragIndex(cpi->pb.recon_pixel_index_table,
FragIndex);
ogg_int32_t AbsRefOffset;
ogg_int32_t AbsXOffset;
ogg_int32_t AbsYOffset;
ogg_int32_t MVOffset; /* Baseline motion vector offset */
ogg_int32_t ReconPtr2Offset; /* Offset for second reconstruction in
half pixel MC */
unsigned char *ReconPtr1; /* DCT reconstructed image pointers */
unsigned char *ReconPtr2; /* Pointer used in half pixel MC */
switch(MvDevisor) {
case 2:
MvShift = 1;
MvModMask = 1;
break;
case 4:
MvShift = 2;
MvModMask = 3;
break;
default:
break;
}
cpi->MVector.x = cpi->pb.FragMVect[FragIndex].x;
cpi->MVector.y = cpi->pb.FragMVect[FragIndex].y;
/* Set up the baseline offset for the motion vector. */
MVOffset = ((cpi->MVector.y / MvDevisor) * ReconPixelsPerLine) +
(cpi->MVector.x / MvDevisor);
/* Work out the offset of the second reference position for 1/2
pixel interpolation. For the U and V planes the MV specifies 1/4
pixel accuracy. This is adjusted to 1/2 pixel as follows ( 0->0,
1/4->1/2, 1/2->1/2, 3/4->1/2 ). */
ReconPtr2Offset = 0;
AbsXOffset = cpi->MVector.x % MvDevisor;
AbsYOffset = cpi->MVector.y % MvDevisor;
if ( AbsXOffset ) {
if ( cpi->MVector.x > 0 )
ReconPtr2Offset += 1;
else
ReconPtr2Offset -= 1;
}
if ( AbsYOffset ) {
if ( cpi->MVector.y > 0 )
ReconPtr2Offset += ReconPixelsPerLine;
else
ReconPtr2Offset -= ReconPixelsPerLine;
}
if ( cpi->pb.CodingMode==CODE_GOLDEN_MV ) {
ReconPtr1 = &cpi->
pb.GoldenFrame[GetFragIndex(cpi->pb.recon_pixel_index_table,
FragIndex)];
} else {
ReconPtr1 = &cpi->
pb.LastFrameRecon[GetFragIndex(cpi->pb.recon_pixel_index_table,
FragIndex)];
}
ReconPtr1 += MVOffset;
ReconPtr2 = ReconPtr1 + ReconPtr2Offset;
AbsRefOffset = abs((int)(ReconPtr1 - ReconPtr2));
/* Is the MV offset exactly pixel alligned */
if ( AbsRefOffset == 0 ){
Sub8( FiltPtr, ReconPtr1, DctInputPtr, old_ptr1, new_ptr1,
PixelsPerLine, ReconPixelsPerLine );
} else {
/* Fractional pixel MVs. */
/* Note that we only use two pixel values even for the diagonal */
Sub8Av2(FiltPtr, ReconPtr1,ReconPtr2,DctInputPtr, old_ptr1,
new_ptr1, PixelsPerLine, ReconPixelsPerLine );
}
}
void TransformQuantizeBlock (CP_INSTANCE *cpi, ogg_int32_t FragIndex,
ogg_uint32_t PixelsPerLine ) {
unsigned char *new_ptr1; /* Pointers into current frame */
unsigned char *old_ptr1; /* Pointers into old frame */
unsigned char *FiltPtr; /* Pointers to srf filtered pixels */
ogg_int16_t *DctInputPtr; /* Pointer into buffer containing input to DCT */
int LeftEdge; /* Flag if block at left edge of component */
ogg_uint32_t ReconPixelsPerLine; /* Line length for recon buffers. */
Q_LIST_ENTRY TempLastDC = 0;
double GF_Error = 0.0;
double InterNMV_Error = 0.0;
double IntraScore = 0.0;
double BestInterError = 0.0; /* Measure of the best error score
available by application of a
limited motion vector. */
unsigned char *ReconPtr1; /* DCT reconstructed image pointers */
ogg_int32_t MvDevisor; /* Defines MV resolution (2 = 1/2
pixel for Y or 4 = 1/4 for UV) */
new_ptr1 = &cpi->yuv1ptr[GetFragIndex(cpi->pb.pixel_index_table,FragIndex)];
old_ptr1 = &cpi->yuv0ptr[GetFragIndex(cpi->pb.pixel_index_table,FragIndex)];
DctInputPtr = cpi->DCTDataBuffer;
/* Set plane specific values */
if (FragIndex < (ogg_int32_t)cpi->pb.YPlaneFragments){
ReconPixelsPerLine = cpi->pb.Configuration.YStride;
MvDevisor = 2; /* 1/2 pixel accuracy in Y */
}else{
ReconPixelsPerLine = cpi->pb.Configuration.UVStride;
MvDevisor = 4; /* UV planes at 1/2 resolution of Y */
}
/* adjusted / filtered pointers */
FiltPtr = &cpi->ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,
FragIndex)];
if ( GetFrameType(&cpi->pb) == BASE_FRAME ) {
/* Key frame so code block in INTRA mode. */
cpi->pb.CodingMode = CODE_INTRA;
}else{
/* Get Motion vector and mode for this block. */
cpi->pb.CodingMode = cpi->pb.FragCodingMethod[FragIndex];
}
/* Selection of Quantiser matirx and set other plane related values. */
if ( FragIndex < (ogg_int32_t)cpi->pb.YPlaneFragments ){
LeftEdge = !(FragIndex%cpi->pb.HFragments);
/* Select the approrpriate Y quantiser matrix */
if ( cpi->pb.CodingMode == CODE_INTRA )
select_Y_quantiser(&cpi->pb);
else
select_Inter_quantiser(&cpi->pb);
}else{
LeftEdge = !((FragIndex-cpi->pb.YPlaneFragments)%(cpi->pb.HFragments>>1));
/* Select the approrpriate UV quantiser matrix */
if ( cpi->pb.CodingMode == CODE_INTRA )
select_UV_quantiser(&cpi->pb);
else
select_Inter_quantiser(&cpi->pb);
}
if ( ModeUsesMC[cpi->pb.CodingMode] ){
MotionBlockDifference(cpi, FiltPtr, DctInputPtr, MvDevisor,
old_ptr1, new_ptr1, FragIndex, PixelsPerLine,
ReconPixelsPerLine);
} else if ( (cpi->pb.CodingMode==CODE_INTER_NO_MV ) ||
( cpi->pb.CodingMode==CODE_USING_GOLDEN ) ) {
if ( cpi->pb.CodingMode==CODE_INTER_NO_MV ) {
ReconPtr1 = &cpi->
pb.LastFrameRecon[GetFragIndex(cpi->pb.recon_pixel_index_table,
FragIndex)];
} else {
ReconPtr1 = &cpi->
pb.GoldenFrame[GetFragIndex(cpi->pb.recon_pixel_index_table,
FragIndex)];
}
Sub8( FiltPtr, ReconPtr1, DctInputPtr, old_ptr1, new_ptr1,
PixelsPerLine, ReconPixelsPerLine );
} else if ( cpi->pb.CodingMode==CODE_INTRA ) {
Sub8_128(FiltPtr, DctInputPtr, old_ptr1, new_ptr1, PixelsPerLine);
} else {
IssueWarning( "Illegal coding mode" );
}
/* Proceed to encode the data into the encode buffer if the encoder
is enabled. */
/* Perform a 2D DCT transform on the data. */
fdct_short( cpi->DCTDataBuffer, cpi->DCT_codes );
/* Quantize that transform data. */
quantize ( &cpi->pb, cpi->DCT_codes, cpi->pb.QFragData[FragIndex] );
if ( (cpi->pb.CodingMode == CODE_INTER_NO_MV) &&
( AllZeroDctData(cpi->pb.QFragData[FragIndex]) ) ) {
cpi->pb.display_fragments[FragIndex] = 0;
}
}
<p><p><p>1.1 theora/lib/encode.c
Index: encode.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: encode.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
#include "ogg/ogg.h"
#include "encoder_lookup.h"
#include "encoder_internal.h"
ogg_uint32_t EncodeData(CP_INSTANCE *cpi){
ogg_uint32_t coded_pixels = 0;
/* Zero the count of tokens so far this frame. */
cpi->TotTokenCount = 0;
/* Zero the mode and MV list indices. */
cpi->ModeListCount = 0;
/* Zero Decoder EOB run count */
cpi->pb.EOB_Run = 0;
/* Encode any fragments coded using DCT. */
coded_pixels += QuadCodeDisplayFragments (cpi);
return coded_pixels;
}
#define PUL 8
#define PU 4
#define PUR 2
#define PL 1
#define HIGHBITDUPPED(X) (((ogg_int16_t) X) >> 15)
ogg_int32_t CalculateMotionErrorforFragments(CP_INSTANCE *cpi,
ogg_int32_t CountUsingMV,
ogg_int32_t *FragsUsing,
MOTION_VECTOR MVect,
ogg_int32_t PixelsPerLine){
int i;
ogg_int32_t NewError = 0;
/* for now 4 motion vector is to hard to recalculate so return huge
error!! */
if( cpi->pb.FragCodingMethod[0] == CODE_INTER_FOURMV)
return HUGE_ERROR;
for(i = 0 ; i < CountUsingMV ; i++) {
ogg_int32_t FragIndex = FragsUsing[i];
ogg_int32_t ThisError = GetMBInterError( cpi,
cpi->ConvDestBuffer,
cpi->pb.LastFrameRecon,
FragIndex, MVect.x,
MVect.y, PixelsPerLine );
NewError += ThisError;
}
return NewError;
}
ogg_uint32_t PickIntra( CP_INSTANCE *cpi, ogg_uint32_t SBRows,
ogg_uint32_t SBCols, ogg_uint32_t HExtra,
ogg_uint32_t VExtra, ogg_uint32_t PixelsPerLine){
ogg_int32_t FragIndex; /* Fragment number */
ogg_uint32_t MB, B; /* Macro-Block, Block indices */
ogg_uint32_t SBrow; /* Super-Block row number */
ogg_uint32_t SBcol; /* Super-Block row number */
ogg_uint32_t SB=0; /* Super-Block index, initialised to first of
this component */
ogg_uint32_t UVRow;
ogg_uint32_t UVColumn;
ogg_uint32_t UVFragOffset;
/* decide what block type and motion vectors to use on all of the frames */
for ( SBrow=0; SBrow<SBRows; SBrow++ ) {
for ( SBcol=0; SBcol<SBCols; SBcol++ ) {
/* Check its four Macro-Blocks */
for ( MB=0; MB<4; MB++ ) {
/* There may be MB's lying out of frame which must be
ignored. For these MB's Top left block will have a negative
Fragment Index. */
if ( QuadMapToMBTopLeft(cpi->pb.BlockMap,SB,MB) >= 0 ) {
cpi->MBCodingMode = CODE_INTRA;
/* Now actually code the blocks. */
for ( B=0; B<4; B++ ) {
FragIndex = QuadMapToIndex1( cpi->pb.BlockMap, SB, MB, B );
cpi->pb.FragCodingMethod[FragIndex] = cpi->MBCodingMode;
}
/* Matching fragments in the U and V planes */
UVRow = (FragIndex / (cpi->pb.HFragments * 2));
UVColumn = (FragIndex % cpi->pb.HFragments) / 2;
UVFragOffset = (UVRow * (cpi->pb.HFragments / 2)) + UVColumn;
cpi->pb.FragCodingMethod[cpi->pb.YPlaneFragments + UVFragOffset] =
cpi->MBCodingMode;
cpi->pb.FragCodingMethod[cpi->pb.YPlaneFragments +
cpi->pb.UVPlaneFragments + UVFragOffset] =
cpi->MBCodingMode;
}
}
/* Next Super-Block */
SB++;
}
}
return 0;
}
ogg_uint32_t QuadCodeComponent ( CP_INSTANCE *cpi,
ogg_uint32_t FirstSB,
ogg_uint32_t SBRows,
ogg_uint32_t SBCols,
ogg_uint32_t HExtra,
ogg_uint32_t VExtra,
ogg_uint32_t PixelsPerLine ){
ogg_int32_t FragIndex; /* Fragment number */
ogg_uint32_t MB, B; /* Macro-Block, Block indices */
ogg_uint32_t SBrow; /* Super-Block row number */
ogg_uint32_t SBcol; /* Super-Block row number */
ogg_uint32_t SB=FirstSB; /* Super-Block index, initialised to first
of this component */
ogg_uint32_t coded_pixels=0; /* Number of pixels coded */
int MBCodedFlag;
/* actually transform and quantize the image now that we've decided
on the modes Parse in quad-tree ordering */
SB=FirstSB;
for ( SBrow=0; SBrow<SBRows; SBrow++ ) {
for ( SBcol=0; SBcol<SBCols; SBcol++ ) {
/* Check its four Macro-Blocks */
for ( MB=0; MB<4; MB++ ) {
if ( QuadMapToMBTopLeft(cpi->pb.BlockMap,SB,MB) >= 0 ) {
MBCodedFlag = 0;
/* Now actually code the blocks */
for ( B=0; B<4; B++ ) {
FragIndex = QuadMapToIndex1( cpi->pb.BlockMap, SB, MB, B );
/* Does Block lie in frame: */
if ( FragIndex >= 0 ) {
/* In Frame: Is it coded: */
if ( cpi->pb.display_fragments[FragIndex] ) {
/* transform and quantize block */
TransformQuantizeBlock( cpi, FragIndex, PixelsPerLine );
/* Has the block got struck off (no MV and no data
generated after DCT) If not then mark it and the
assosciated MB as coded. */
if ( cpi->pb.display_fragments[FragIndex] ) {
/* Create linear list of coded block indices */
cpi->pb.CodedBlockList[cpi->pb.CodedBlockIndex] = FragIndex;
cpi->pb.CodedBlockIndex++;
/* MB is still coded */
MBCodedFlag = 1;
cpi->MBCodingMode = cpi->pb.FragCodingMethod[FragIndex];
}
}
}
}
/* If the MB is marked as coded and we are in the Y plane then */
// the mode list needs to be updated.
if ( MBCodedFlag && (FirstSB == 0) ){
/* Make a note of the selected mode in the mode list */
cpi->ModeList[cpi->ModeListCount] = cpi->MBCodingMode;
cpi->ModeListCount++;
}
}
}
SB++;
}
}
/* system state should be cleared here.... */
cpi->pb.ClearSysState();
/* Return number of pixels coded */
return coded_pixels;
}
void PackCodedVideo (CP_INSTANCE *cpi) {
ogg_int32_t i;
ogg_int32_t EncodedCoeffs = 1;
ogg_int32_t TotalTokens = cpi->TotTokenCount;
ogg_int32_t FragIndex;
ogg_uint32_t HuffIndex; /* Index to group of tables used to code a token */
cpi->pb.ClearSysState();
/* Reset the count of second order optimised tokens */
cpi->OptimisedTokenCount = 0;
cpi->TokensToBeCoded = cpi->TotTokenCount;
cpi->TokensCoded = 0;
/* Calculate the bit rate at which this frame should be capped. */
cpi->MaxBitTarget = (ogg_uint32_t)((double)(cpi->ThisFrameTargetBytes * 8) *
cpi->BitRateCapFactor);
/* Blank the various fragment data structures before we start. */
memset(cpi->pb.FragCoeffs, 0, cpi->pb.UnitFragments);
memset(cpi->FragTokens, 0, cpi->pb.UnitFragments);
/* Clear down the QFragData structure for all coded blocks. */
cpi->pb.ClearDownQFrag(&cpi->pb);
/* The tree is not needed (implicit) for key frames */
if ( GetFrameType(&cpi->pb) != BASE_FRAME ){
/* Pack the quad tree fragment mapping. */
PackAndWriteDFArray( cpi );
}
/* Note the number of bits used to code the tree itself. */
cpi->FrameBitCount = cpi->ThisFrameSize << 3;
/* Mode and MV data not needed for key frames. */
if ( GetFrameType(&cpi->pb) != BASE_FRAME ){
/* Pack and code the mode list. */
PackModes(cpi);
/* Pack the motion vectors */
PackMotionVectors (cpi);
}
cpi->FrameBitCount = cpi->ThisFrameSize << 3;
/* Optimise the DC tokens */
for ( i = 0; i < cpi->pb.CodedBlockIndex; i++ ) {
/* Get the linear index for the current fragment. */
FragIndex = cpi->pb.CodedBlockList[i];
cpi->pb.FragCoefEOB[FragIndex]=EncodedCoeffs;
PackToken(cpi, FragIndex, DC_HUFF_OFFSET );
}
/* Pack any outstanding EOB tokens */
PackEOBRun(cpi);
/* Now output the optimised DC token list using the appropriate
entropy tables. */
EncodeDcTokenList(cpi);
/* Work out the number of DC bits coded */
/* Optimise the AC tokens */
while ( EncodedCoeffs < 64 ) {
/* Huffman table adjustment based upon coefficient number. */
if ( EncodedCoeffs <= AC_TABLE_2_THRESH )
HuffIndex = AC_HUFF_OFFSET;
else if ( EncodedCoeffs <= AC_TABLE_3_THRESH )
HuffIndex = AC_HUFF_OFFSET + AC_HUFF_CHOICES;
else if ( EncodedCoeffs <= AC_TABLE_4_THRESH )
HuffIndex = AC_HUFF_OFFSET + (AC_HUFF_CHOICES * 2);
else
HuffIndex = AC_HUFF_OFFSET + (AC_HUFF_CHOICES * 3);
/* Repeatedly scan through the list of blocks. */
for ( i = 0; i < cpi->pb.CodedBlockIndex; i++ ) {
/* Get the linear index for the current fragment. */
FragIndex = cpi->pb.CodedBlockList[i];
/* Should we code a token for this block on this pass. */
if ( cpi->FragTokens[FragIndex] < cpi->FragTokenCounts[FragIndex]
&& cpi->pb.FragCoeffs[FragIndex] <= EncodedCoeffs ) {
/* Bit pack and a token for this block */
cpi->pb.FragCoefEOB[FragIndex]=EncodedCoeffs;
PackToken( cpi, FragIndex, HuffIndex );
}
}
EncodedCoeffs ++;
}
/* Pack any outstanding EOB tokens */
PackEOBRun(cpi);
/* Now output the optimised AC token list using the appropriate
entropy tables. */
EncodeAcTokenList(cpi);
}
void EncodeDcTokenList (CP_INSTANCE *cpi) {
ogg_int32_t i,j;
ogg_uint32_t Token;
ogg_uint32_t ExtraBitsToken;
ogg_uint32_t HuffIndex;
ogg_uint32_t BestDcBits;
ogg_uint32_t DcHuffChoice[2];
ogg_uint32_t EntropyTableBits[2][DC_HUFF_CHOICES];
oggpack_buffer *opb=cpi.oggbuffer;
/* Clear table data structure */
memset ( EntropyTableBits, 0, sizeof(ogg_uint32_t)*DC_HUFF_CHOICES*2 );
/* Analyse token list to see which is the best entropy table to use */
for ( i = 0; i < cpi->OptimisedTokenCount; i++ ) {
/* Count number of bits for each table option */
Token = (ogg_uint32_t)cpi->OptimisedTokenList[i];
for ( j = 0; j < DC_HUFF_CHOICES; j++ ){
EntropyTableBits[cpi->OptimisedTokenListPl[i]][j] +=
cpi->pb.HuffCodeLengthArray_VP3x[DC_HUFF_OFFSET + j][Token];
}
}
/* Work out which table option is best for Y */
BestDcBits = EntropyTableBits[0][0];
DcHuffChoice[0] = 0;
for ( j = 1; j < DC_HUFF_CHOICES; j++ ) {
if ( EntropyTableBits[0][j] < BestDcBits ) {
BestDcBits = EntropyTableBits[0][j];
DcHuffChoice[0] = j;
}
}
/* Add the DC huffman table choice to the bitstream */
oggpackB_write( opb, DcHuffChoice[0], DC_HUFF_CHOICE_BITS );
/* Work out which table option is best for UV */
BestDcBits = EntropyTableBits[1][0];
DcHuffChoice[1] = 0;
for ( j = 1; j < DC_HUFF_CHOICES; j++ ) {
if ( EntropyTableBits[1][j] < BestDcBits ) {
BestDcBits = EntropyTableBits[1][j];
DcHuffChoice[1] = j;
}
}
/* Add the DC huffman table choice to the bitstream */
oggpackB_write( opb, DcHuffChoice[1], DC_HUFF_CHOICE_BITS );
/* Encode the token list */
for ( i = 0; i < cpi->OptimisedTokenCount; i++ ) {
/* Get the token and extra bits */
Token = (ogg_uint32_t)cpi->OptimisedTokenList[i];
ExtraBitsToken = (ogg_uint32_t)cpi->OptimisedTokenListEb[i];
/* Select the huffman table */
if ( cpi->OptimisedTokenListPl[i] == 0)
HuffIndex = (ogg_uint32_t)DC_HUFF_OFFSET + (ogg_uint32_t)DcHuffChoice[0];
else
HuffIndex = (ogg_uint32_t)DC_HUFF_OFFSET + (ogg_uint32_t)DcHuffChoice[1];
/* Add the bits to the encode holding buffer. */
cpi->FrameBitCount += cpi->pb.HuffCodeLengthArray_VP3x[HuffIndex][Token];
oggpackB_write( opb, cpi->pb.HuffCodeArray_VP3x[HuffIndex][Token],
(ogg_uint32_t)cpi->
pb.HuffCodeLengthArray_VP3x[HuffIndex][Token] );
/* If the token is followed by an extra bits token then code it */
if ( ExtraBitLengths_VP31[Token] > 0 ) {
/* Add the bits to the encode holding buffer. */
cpi->FrameBitCount += ExtraBitLengths_VP31[Token];
oggpackB_write( opb, ExtraBitsToken,
(ogg_uint32_t)ExtraBitLengths_VP31[Token] );
}
}
/* Reset the count of second order optimised tokens */
cpi->OptimisedTokenCount = 0;
}
void EncodeAcTokenList (CP_INSTANCE *cpi) {
ogg_int32_t i,j;
ogg_uint32_t Token;
ogg_uint32_t ExtraBitsToken;
ogg_uint32_t HuffIndex;
ogg_uint32_t BestAcBits;
ogg_uint32_t AcHuffChoice[2];
ogg_uint32_t EntropyTableBits[2][AC_HUFF_CHOICES];
oggpack_buffer *opb=cpi.oggbuffer;
memset ( EntropyTableBits, 0, sizeof(ogg_uint32_t)*AC_HUFF_CHOICES*2 );
/* Analyse token list to see which is the best entropy table to use */
for ( i = 0; i < cpi->OptimisedTokenCount; i++ ) {
/* Count number of bits for each table option */
Token = (ogg_uint32_t)cpi->OptimisedTokenList[i];
HuffIndex = cpi->OptimisedTokenListHi[i];
for ( j = 0; j < AC_HUFF_CHOICES; j++ ) {
EntropyTableBits[cpi->OptimisedTokenListPl[i]][j] +=
cpi->pb.HuffCodeLengthArray_VP3x[HuffIndex + j][Token];
}
}
/* Select the best set of AC tables for Y */
BestAcBits = EntropyTableBits[0][0];
AcHuffChoice[0] = 0;
for ( j = 1; j < AC_HUFF_CHOICES; j++ ) {
if ( EntropyTableBits[0][j] < BestAcBits ) {
BestAcBits = EntropyTableBits[0][j];
AcHuffChoice[0] = j;
}
}
/* Add the AC-Y huffman table choice to the bitstream */
oggpackB_write( opb, AcHuffChoice[0], AC_HUFF_CHOICE_BITS );
/* Select the best set of AC tables for UV */
BestAcBits = EntropyTableBits[1][0];
AcHuffChoice[1] = 0;
for ( j = 1; j < AC_HUFF_CHOICES; j++ ) {
if ( EntropyTableBits[1][j] < BestAcBits ) {
BestAcBits = EntropyTableBits[1][j];
AcHuffChoice[1] = j;
}
}
/* Add the AC-UV huffman table choice to the bitstream */
oggpackB_write( opb, AcHuffChoice[1], AC_HUFF_CHOICE_BITS );
/* Encode the token list */
for ( i = 0; i < cpi->OptimisedTokenCount; i++ ) {
/* Get the token and extra bits */
Token = (ogg_uint32_t)cpi->OptimisedTokenList[i];
ExtraBitsToken = (ogg_uint32_t)cpi->OptimisedTokenListEb[i];
/* Select the huffman table */
HuffIndex = (ogg_uint32_t)cpi->OptimisedTokenListHi[i] +
AcHuffChoice[cpi->OptimisedTokenListPl[i]];
/* Add the bits to the encode holding buffer. */
cpi->FrameBitCount += cpi->pb.HuffCodeLengthArray_VP3x[HuffIndex][Token];
oggpackB_write( opb, cpi->pb.HuffCodeArray_VP3x[HuffIndex][Token],
(ogg_uint32_t)cpi->
pb.HuffCodeLengthArray_VP3x[HuffIndex][Token] );
/* If the token is followed by an extra bits token then code it */
if ( ExtraBitLengths_VP31[Token] > 0 ) {
/* Add the bits to the encode holding buffer. */
cpi->FrameBitCount += ExtraBitLengths_VP31[Token];
oggpackB_write( opb, ExtraBitsToken,
(ogg_uint32_t)ExtraBitLengths_VP31[Token] );
}
}
/* Reset the count of second order optimised tokens */
cpi->OptimisedTokenCount = 0;
}
void PackModes (CP_INSTANCE *cpi) {
ogg_uint32_t i,j;
ogg_uint16_t ModeIndex;
ogg_int32_t ModeCount[MAX_MODES];
ogg_int32_t TmpFreq;
ogg_int32_t TmpIndex;
ogg_uint16_t BestScheme;
ogg_uint32_t BestSchemeScore;
ogg_uint32_t SchemeScore;
oggpack_buffer *opb=cpi.oggbuffer;
/* Build a frequency map for the modes in this frame */
memset( ModeCount, 0, MAX_MODES*sizeof(ogg_int32_t) );
for ( i = 0; i < cpi->ModeListCount; i++ )
ModeCount[cpi->ModeList[i]] ++;
/* Order the modes from most to least frequent. Store result as
scheme 0 */
for ( j = 0; j < MAX_MODES; j++ ) {
/* Find the most frequent */
TmpFreq = -1;
for ( i = 0; i < MAX_MODES; i++ ) {
/* Is this the best scheme so far ??? */
if ( ModeCount[i] > TmpFreq ) {
TmpFreq = ModeCount[i];
TmpIndex = i;
}
}
ModeCount[TmpIndex] = -1;
ModeSchemes[0][TmpIndex] = j;
}
/* Default/ fallback scheme uses MODE_BITS bits per mode entry */
BestScheme = (MODE_METHODS - 1);
BestSchemeScore = cpi->ModeListCount * 3;
/* Get a bit score for the available schemes. */
for ( j = 0; j < (MODE_METHODS - 1); j++ ) {
/* Reset the scheme score */
if ( j == 0 )
SchemeScore = 24; /* Scheme 0 additional cost of sending
frequency order */
else
SchemeScore = 0;
/* Find the total bits to code using each avaialable scheme */
for ( i = 0; i < cpi->ModeListCount; i++ )
SchemeScore += ModeBitLengths[ModeSchemes[j][cpi->ModeList[i]]];
/* Is this the best scheme so far ??? */
if ( SchemeScore < BestSchemeScore ) {
BestSchemeScore = SchemeScore;
BestScheme = j;
}
}
/* Encode the best scheme. */
oggpackB_write( opb, BestScheme, (ogg_uint32_t)MODE_METHOD_BITS );
/* If the chosen schems is scheme 0 send details of the mode
frequency order */
if ( BestScheme == 0 ) {
for ( j = 0; j < MAX_MODES; j++ )
/* Note that the last two entries are implicit */
oggpackB_write( opb, ModeSchemes[0][j], (ogg_uint32_t)MODE_BITS );
}
/* Are we using one of the alphabet based schemes or the fallback scheme */
if ( BestScheme < (MODE_METHODS - 1)) {
/* Pack and encode the Mode list */
for ( i = 0; i < cpi->ModeListCount; i++ ) {
/* Add the appropriate mode entropy token. */
ModeIndex = ModeSchemes[BestScheme][cpi->ModeList[i]];
oggpackB_write( cpi, ModeBitPatterns[ModeIndex],
(ogg_uint32_t)ModeBitLengths[ModeIndex] );
}
}else{
/* Fall back to MODE_BITS per entry */
for ( i = 0; i < cpi->ModeListCount; i++ ) {
/* Add the appropriate mode entropy token. */
oggpackB_write( opb, cpi->ModeList[i], MODE_BITS );
}
}
}
void PackMotionVectors (CP_INSTANCE *cpi) {
ogg_int32_t i;
ogg_uint32_t MethodBits[2] = {0,0};
ogg_uint32_t * MvBitsPtr;
ogg_uint32_t * MvPatternPtr;
ogg_int32_t LastXMVComponent = 0;
ogg_int32_t LastYMVComponent = 0;
oggpack_buffer *opb=cpi.oggbuffer;
/* Choose the coding method */
MvBitsPtr = &MvBits[MAX_MV_EXTENT];
for ( i = 0; i < (ogg_int32_t)cpi->MvListCount; i++ ) {
MethodBits[0] += MvBitsPtr[cpi->MVList[i].x];
MethodBits[0] += MvBitsPtr[cpi->MVList[i].y];
MethodBits[1] += 12; /* Simple six bits per mv component fallback
mechanism */
}
/* Select entropy table */
if ( MethodBits[0] < MethodBits[1] ) {
oggpackB_write( opb, 0, 1 );
MvBitsPtr = &MvBits[MAX_MV_EXTENT];
MvPatternPtr = &MvPattern[MAX_MV_EXTENT];
}else{
oggpackB_write( opb, 1, 1 );
MvBitsPtr = &MvBits2[MAX_MV_EXTENT];
MvPatternPtr = &MvPattern2[MAX_MV_EXTENT];
}
/* Pack and encode the motion vectors */
for ( i = 0; i < (ogg_int32_t)cpi->MvListCount; i++ ) {
oggpackB_write( cpi, MvPatternPtr[cpi->MVList[i].x],
(ogg_uint32_t)MvBitsPtr[cpi->MVList[i].x] );
oggpackB_write( cpi, MvPatternPtr[cpi->MVList[i].y],
(ogg_uint32_t)MvBitsPtr[cpi->MVList[i].y] );
}
}
void PackEOBRun( CP_INSTANCE *cpi) {
if(cpi->RunLength == 0)
return;
/* Note the appropriate EOB or EOB run token and any extra bits in
the optimised token list. Use the huffman index assosciated with
the first token in the run */
/* Mark out which plane the block belonged to */
cpi->OptimisedTokenListPl[cpi->OptimisedTokenCount] =
cpi->RunPlaneIndex;
/* Note the huffman index to be used */
cpi->OptimisedTokenListHi[cpi->OptimisedTokenCount] =
(ogg_uint16_t)cpi->RunHuffIndex;
if ( cpi->RunLength <= 3 ) {
if ( cpi->RunLength == 1 ) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] = DCT_EOB_TOKEN;
} else if ( cpi->RunLength == 2 ) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] = DCT_EOB_PAIR_TOKEN;
} else {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] = DCT_EOB_TRIPLE_TOKEN;
}
cpi->RunLength = 0;
} else {
/* Choose a token appropriate to the run length. */
if ( cpi->RunLength < 8 ) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] =
DCT_REPEAT_RUN_TOKEN;
cpi->OptimisedTokenListEb[cpi->OptimisedTokenCount] =
cpi->RunLength - 4;
cpi->RunLength = 0;
} else if ( cpi->RunLength < 16 ) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] =
DCT_REPEAT_RUN2_TOKEN;
cpi->OptimisedTokenListEb[cpi->OptimisedTokenCount] =
cpi->RunLength - 8;
cpi->RunLength = 0;
} else if ( cpi->RunLength < 32 ) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] =
DCT_REPEAT_RUN3_TOKEN;
cpi->OptimisedTokenListEb[cpi->OptimisedTokenCount] =
cpi->RunLength - 16;
cpi->RunLength = 0;
} else if ( cpi->RunLength < 4096) {
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] =
DCT_REPEAT_RUN4_TOKEN;
cpi->OptimisedTokenListEb[cpi->OptimisedTokenCount] =
cpi->RunLength;
cpi->RunLength = 0;
} else {
IssueWarning("PackEOBRun : RunLength > 4095");
}
}
cpi->OptimisedTokenCount++;
/* Reset run EOB length */
cpi->RunLength = 0;
}
ogg_uint32_t GetBlockReconErrorSlow( CP_INSTANCE *cpi,
ogg_int32_t BlockIndex ) {
ogg_uint32_t i;
ogg_uint32_t ErrorVal = 0;
ogg_uint16_t * SrcDataPtr =
&cpi->ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,
BlockIndex)];
ogg_uint16_t * RecDataPtr =
&cpi->pb.LastFrameRecon[GetFragIndex(cpi->pb.recon_pixel_index_table,
BlockIndex)];
ogg_int32_t SrcStride;
ogg_int32_t RecStride;
/* Is the block a Y block or a UV block. */
if ( BlockIndex < (ogg_int32_t)cpi->pb.YPlaneFragments ) {
SrcStride = cpi->pb.Configuration.VideoFrameWidth;
RecStride = cpi->pb.Configuration.YStride;
}else{
SrcStride = cpi->pb.Configuration.VideoFrameWidth >> 1;
RecStride = cpi->pb.Configuration.UVStride;
}
/* Decide on standard or MMX implementation */
for ( i=0; i < BLOCK_HEIGHT_WIDTH; i++ ) {
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[0]) - ((int)RecDataPtr[0]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[1]) - ((int)RecDataPtr[1]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[2]) - ((int)RecDataPtr[2]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[3]) - ((int)RecDataPtr[3]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[4]) - ((int)RecDataPtr[4]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[5]) - ((int)RecDataPtr[5]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[6]) - ((int)RecDataPtr[6]) ];
ErrorVal += AbsX_LUT[ ((int)SrcDataPtr[7]) - ((int)RecDataPtr[7]) ];
/* Step to next row of block. */
SrcDataPtr += SrcStride;
RecDataPtr += RecStride;
}
return ErrorVal;
}
void PackToken ( CP_INSTANCE *cpi, ogg_int32_t FragmentNumber,
ogg_uint32_t HuffIndex ) {
ogg_uint32_t Token =
cpi->pb.TokenList[FragmentNumber][cpi->FragTokens[FragmentNumber]];
ogg_uint32_t ExtraBitsToken =
cpi->pb.TokenList[FragmentNumber][cpi->FragTokens[FragmentNumber] + 1];
ogg_uint32_t OneOrTwo;
ogg_uint32_t OneOrZero;
/* Update the record of what coefficient we have got up to for this
block and unpack the encoded token back into the quantised data
array. */
if ( Token == DCT_EOB_TOKEN )
cpi->pb.FragCoeffs[FragmentNumber] = BLOCK_SIZE;
else
ExpandToken( &cpi->pb, cpi->pb.QFragData[FragmentNumber],
&cpi->pb.FragCoeffs[FragmentNumber], Token, ExtraBitsToken );
/* Update record of tokens coded and where we are in this fragment. */
/* Is there an extra bits token */
OneOrTwo= 1 + ( ExtraBitLengths_VP31[Token] > 0 );
/* Advance to the next real token. */
cpi->FragTokens[FragmentNumber] += OneOrTwo;
/* Update the counts of tokens coded */
cpi->TokensCoded += OneOrTwo;
cpi->TokensToBeCoded -= OneOrTwo;
OneOrZero = ( FragmentNumber < (ogg_int32_t)cpi->pb.YPlaneFragments );
if ( Token == DCT_EOB_TOKEN ) {
if ( cpi->RunLength == 0 ) {
cpi->RunHuffIndex = HuffIndex;
cpi->RunPlaneIndex = 1 - OneOrZero;
}
cpi->RunLength++;
/* we have exceeded our longest run length xmit an eob run token; */
if ( cpi->RunLength == 4095 ) PackEOBRun(cpi);
}else{
/* If we have an EOB run then code it up first */
if ( cpi->RunLength > 0 ) PackEOBRun( cpi);
/* Mark out which plane the block belonged to */
cpi->OptimisedTokenListPl[cpi->OptimisedTokenCount] = 1 - OneOrZero;
/* Note the token, extra bits and hufman table in the optimised
token list */
cpi->OptimisedTokenList[cpi->OptimisedTokenCount] =
(ogg_uint16_t)Token;
cpi->OptimisedTokenListEb[cpi->OptimisedTokenCount] =
ExtraBitsToken;
cpi->OptimisedTokenListHi[cpi->OptimisedTokenCount] =
(ogg_uint16_t)HuffIndex;
cpi->OptimisedTokenCount++;
}
}
void AddMotionVector(CP_INSTANCE *cpi,
MOTION_VECTOR *ThisMotionVector) {
cpi->MVList[cpi->MvListCount].x = ThisMotionVector->x;
cpi->MVList[cpi->MvListCount].y = ThisMotionVector->y;
cpi->MvListCount++;
}
void SetFragMotionVectorAndMode(CP_INSTANCE *cpi,
ogg_int32_t FragIndex,
MOTION_VECTOR *ThisMotionVector){
/* Note the coding mode and vector for each block */
cpi->pb.FragMVect[FragIndex].x = ThisMotionVector->x;
cpi->pb.FragMVect[FragIndex].y = ThisMotionVector->y;
cpi->pb.FragCodingMethod[FragIndex] = cpi->MBCodingMode;
}
void SetMBMotionVectorsAndMode(CP_INSTANCE *cpi,
ogg_int32_t YFragIndex,
ogg_int32_t UFragIndex,
ogg_int32_t VFragIndex,
MOTION_VECTOR *ThisMotionVector){
SetFragMotionVectorAndMode(cpi, YFragIndex, ThisMotionVector);
SetFragMotionVectorAndMode(cpi, YFragIndex + 1, ThisMotionVector);
SetFragMotionVectorAndMode(cpi, YFragIndex + cpi->pb.HFragments,
ThisMotionVector);
SetFragMotionVectorAndMode(cpi, YFragIndex + cpi->pb.HFragments + 1,
ThisMotionVector);
SetFragMotionVectorAndMode(cpi, UFragIndex, ThisMotionVector);
SetFragMotionVectorAndMode(cpi, VFragIndex, ThisMotionVector);
}
ogg_uint32_t PickModes(CP_INSTANCE *cpi,
ogg_uint32_t SBRows, ogg_uint32_t SBCols,
ogg_uint32_t HExtra, ogg_uint32_t VExtra,
ogg_uint32_t PixelsPerLine,
ogg_uint32_t *InterError, ogg_uint32_t *IntraError) {
ogg_int32_t YFragIndex;
ogg_int32_t UFragIndex;
ogg_int32_t VFragIndex;
ogg_uint32_t MB, B; /* Macro-Block, Block indices */
ogg_uint32_t SBrow; /* Super-Block row number */
ogg_uint32_t SBcol; /* Super-Block row number */
ogg_uint32_t SB=0; /* Super-Block index, initialised to first
of this component */
ogg_uint32_t MBIntraError; /* Intra error for macro block */
ogg_uint32_t MBGFError; /* Golden frame macro block error */
ogg_uint32_t MBGF_MVError; /* Golden frame plus MV error */
ogg_uint32_t LastMBGF_MVError; /* Golden frame error with
last used GF motion
vector. */
ogg_uint32_t MBInterError; /* Inter no MV macro block error */
ogg_uint32_t MBLastInterError; /* Inter with last used MV */
ogg_uint32_t MBPriorLastInterError; /* Inter with prior last MV */
ogg_uint32_t MBInterMVError; /* Inter MV macro block error */
ogg_uint32_t MBInterMVExError; /* Inter MV (exhaustive
search) macro block error */
ogg_uint32_t MBInterFOURMVError; /* Inter MV error when using 4
motion vectors per macro
block */
ogg_uint32_t BestError; /* Best error so far. */
MOTION_VECTOR FourMVect[6]; /* storage for last used vectors (one
entry for each block in MB) */
MOTION_VECTOR LastInterMVect; /* storage for last used Inter frame
MB motion vector */
MOTION_VECTOR PriorLastInterMVect; /* storage for prior last used
Inter frame MB motion vector */
MOTION_VECTOR TmpMVect; /* Temporary MV storage */
MOTION_VECTOR LastGFMVect; /* storage for last used Golden
Frame MB motion vector */
MOTION_VECTOR InterMVect; /* storage for motion vector */
MOTION_VECTOR InterMVectEx; /* storage for motion vector result
from exhaustive search */
MOTION_VECTOR GFMVect; /* storage for motion vector */
MOTION_VECTOR ZeroVect;
ogg_uint32_t UVRow;
ogg_uint32_t UVColumn;
ogg_uint32_t UVFragOffset;
int MBCodedFlag;
ogg_uint16_t QIndex;
/* initialize error scores */
*InterError = 0;
*IntraError = 0;
/* clear down the default motion vector. */
cpi->MvListCount = 0;
FourMVect[0].x = 0;
FourMVect[0].y = 0;
FourMVect[1].x = 0;
FourMVect[1].y = 0;
FourMVect[2].x = 0;
FourMVect[2].y = 0;
FourMVect[3].x = 0;
FourMVect[3].y = 0;
FourMVect[4].x = 0;
FourMVect[4].y = 0;
FourMVect[5].x = 0;
FourMVect[5].y = 0;
LastInterMVect.x = 0;
LastInterMVect.y = 0;
PriorLastInterMVect.x = 0;
PriorLastInterMVect.y = 0;
LastGFMVect.x = 0;
LastGFMVect.y = 0;
InterMVect.x = 0;
InterMVect.y = 0;
GFMVect.x = 0;
GFMVect.y = 0;
ZeroVect.x = 0;
ZeroVect.y = 0;
QIndex = cpi->pb.FrameQIndex;
<p><p> /* change the quatization matrix to the one at best Q to compute the
new error score */
cpi->MinImprovementForNewMV = (MvThreshTable[QIndex] << 12);
cpi->InterTripOutThresh = (5000<<12);
cpi->MVChangeFactor = MVChangeFactorTable[QIndex]; /* 0.9 */
if ( cpi->QuickCompress ) {
cpi->ExhaustiveSearchThresh = (1000<<12);
cpi->FourMVThreshold = (2500<<12);
} else {
cpi->ExhaustiveSearchThresh = (250<<12);
cpi->FourMVThreshold = (500<<12);
}
cpi->MinImprovementForFourMV = cpi->MinImprovementForNewMV * 4;
if(cpi->MinImprovementForFourMV < (40<<12))
cpi->MinImprovementForFourMV = (40<<12);
cpi->FourMvChangeFactor = 8; /* cpi->MVChangeFactor - 0.05; */
/* decide what block type and motion vectors to use on all of the frames */
for ( SBrow=0; SBrow<SBRows; SBrow++ ) {
for ( SBcol=0; SBcol<SBCols; SBcol++ ) {
/* Check its four Macro-Blocks */
for ( MB=0; MB<4; MB++ ) {
/* There may be MB's lying out of frame which must be
ignored. For these MB's Top left block will have a negative
Fragment Index. */
if ( QuadMapToMBTopLeft(cpi->pb.BlockMap,SB,MB) < 0 ) continue;
/* Is the current macro block coded (in part or in whole) */
MBCodedFlag = 0;
for ( B=0; B<4; B++ ) {
YFragIndex = QuadMapToIndex1( cpi->pb.BlockMap, SB, MB, B );
/* Does Block lie in frame: */
if ( YFragIndex >= 0 ) {
/* In Frame: Is it coded: */
if ( cpi->pb.display_fragments[YFragIndex] ) {
MBCodedFlag = 1;
break;
}
} else
MBCodedFlag = 0;
}
/* This one isn't coded go to the next one */
if(!MBCodedFlag) continue;
/* Calculate U and V FragIndex from YFragIndex */
YFragIndex = QuadMapToMBTopLeft(cpi->pb.BlockMap, SB,MB);
UVRow = (YFragIndex / (cpi->pb.HFragments * 2));
UVColumn = (YFragIndex % cpi->pb.HFragments) / 2;
UVFragOffset = (UVRow * (cpi->pb.HFragments / 2)) + UVColumn;
UFragIndex = cpi->pb.YPlaneFragments + UVFragOffset;
VFragIndex = cpi->pb.YPlaneFragments + cpi->pb.UVPlaneFragments +
UVFragOffset;
/**************************************************************
Find the block choice with the lowest error
NOTE THAT if U or V is coded but no Y from a macro block then
the mode will be CODE_INTER_NO_MV as this is the default
state to which the mode data structure is initialised in
encoder and decoder at the start of each frame. */
BestError = HUGE_ERROR;
/* Look at the intra coding error. */
MBIntraError = GetMBIntraError( cpi, YFragIndex, PixelsPerLine );
BestError = (BestError > MBIntraError) ? MBIntraError : BestError;
/* Get the golden frame error */
MBGFError = GetMBInterError( cpi, cpi->ConvDestBuffer,
cpi->pb.GoldenFrame, YFragIndex,
0, 0, PixelsPerLine );
BestError = (BestError > MBGFError) ? MBGFError : BestError;
/* Calculate the 0,0 case. */
MBInterError = GetMBInterError( cpi, cpi->ConvDestBuffer,
cpi->pb.LastFrameRecon,
YFragIndex, 0, 0, PixelsPerLine );
BestError = (BestError > MBInterError) ? MBInterError : BestError;
/* Measure error for last MV */
MBLastInterError = GetMBInterError( cpi, cpi->ConvDestBuffer,
cpi->pb.LastFrameRecon,
YFragIndex, LastInterMVect.x,
LastInterMVect.y, PixelsPerLine );
BestError = (BestError > MBLastInterError) ?
MBLastInterError : BestError;
/* Measure error for prior last MV */
MBPriorLastInterError = GetMBInterError( cpi, cpi->ConvDestBuffer,
cpi->pb.LastFrameRecon,
YFragIndex,
PriorLastInterMVect.x,
PriorLastInterMVect.y,
PixelsPerLine );
BestError = (BestError > MBPriorLastInterError) ?
MBPriorLastInterError : BestError;
/* Temporarily force usage of no motionvector blocks */
MBInterMVError = HUGE_ERROR;
InterMVect.x = 0; /* Set 0,0 motion vector */
InterMVect.y = 0;
/* If the best error is above the required threshold search
for a new inter MV */
if ( BestError > cpi->MinImprovementForNewMV ) {
/* Use a mix of heirachical and exhaustive searches for
quick mode. */
if ( cpi->QuickCompress ) {
MBInterMVError = GetMBMVInterError( cpi, cpi->pb.LastFrameRecon,
YFragIndex, PixelsPerLine,
cpi->MVPixelOffsetY,
&InterMVect );
/* If we still do not have a good match try an exhaustive
MBMV search */
if ( (MBInterMVError > cpi->ExhaustiveSearchThresh) &&
(BestError > cpi->ExhaustiveSearchThresh) ) {
MBInterMVExError =
GetMBMVExhaustiveSearch( cpi, cpi->pb.LastFrameRecon,
YFragIndex, PixelsPerLine,
&InterMVectEx );
/* Is the Variance measure for the EX search
better... If so then use it. */
if ( MBInterMVExError < MBInterMVError ) {
MBInterMVError = MBInterMVExError;
InterMVect.x = InterMVectEx.x;
InterMVect.y = InterMVectEx.y;
}
}
}else{
/* Use an exhaustive search */
MBInterMVError =
GetMBMVExhaustiveSearch( cpi, cpi->pb.LastFrameRecon,
YFragIndex, PixelsPerLine,
&InterMVect );
}
/* Is the improvement, if any, good enough to justify a new MV */
if ( (16 * MBInterMVError < (BestError * cpi->MVChangeFactor)) &&
((MBInterMVError + cpi->MinImprovementForNewMV) < BestError) ){
BestError = MBInterMVError;
}
}
/* If the best error is still above the required threshold
search for a golden frame MV */
MBGF_MVError = HUGE_ERROR;
GFMVect.x = 0; /* Set 0,0 motion vector */
GFMVect.y = 0;
if ( BestError > cpi->MinImprovementForNewMV ) {
/* Do an MV search in the golden reference frame */
MBGF_MVError = GetMBMVInterError( cpi, cpi->pb.GoldenFrame,
YFragIndex, PixelsPerLine,
cpi->MVPixelOffsetY, &GFMVect );
/* Measure error for last GFMV */
LastMBGF_MVError = GetMBInterError( cpi, cpi->ConvDestBuffer,
cpi->pb.GoldenFrame,
YFragIndex, LastGFMVect.x,
LastGFMVect.y, PixelsPerLine );
/* Check against last GF motion vector and reset if the
search has thrown a worse result. */
if ( LastMBGF_MVError < MBGF_MVError ) {
GFMVect.x = LastGFMVect.x;
GFMVect.y = LastGFMVect.y;
MBGF_MVError = LastMBGF_MVError;
}else{
LastGFMVect.x = GFMVect.x;
LastGFMVect.y = GFMVect.y;
}
/* Is the improvement, if any, good enough to justify a new MV */
if ( (16 * MBGF_MVError < (BestError * cpi->MVChangeFactor)) &&
((MBGF_MVError + cpi->MinImprovementForNewMV) < BestError) ) {
BestError = MBGF_MVError;
}
}
/* Finally... If the best error is still to high then consider
the 4MV mode */
MBInterFOURMVError = HUGE_ERROR;
if ( BestError > cpi->FourMVThreshold ) {
/* Get the 4MV error. */
MBInterFOURMVError =
GetFOURMVExhaustiveSearch( cpi, cpi->pb.LastFrameRecon,
YFragIndex, PixelsPerLine, FourMVect );
/* If the improvement is great enough then use the four MV mode */
if ( ((MBInterFOURMVError + cpi->MinImprovementForFourMV) <
BestError) && (16 * MBInterFOURMVError <
(BestError * cpi->FourMvChangeFactor))) {
BestError = MBInterFOURMVError;
}
}
/********************************************************
end finding the best error
*******************************************************
Figure out what to do with the block we chose
Over-ride and force intra if error high and Intra error similar
Now choose a mode based on lowest error (with bias towards no MV) */
if ( (BestError > cpi->InterTripOutThresh) &&
(10 * BestError > MBIntraError * 7 ) ) {
cpi->MBCodingMode = CODE_INTRA;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&ZeroVect);
} else if ( BestError == MBInterError ) {
cpi->MBCodingMode = CODE_INTER_NO_MV;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&ZeroVect);
} else if ( BestError == MBGFError ) {
cpi->MBCodingMode = CODE_USING_GOLDEN;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&ZeroVect);
} else if ( BestError == MBLastInterError ) {
cpi->MBCodingMode = CODE_INTER_LAST_MV;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&LastInterMVect);
} else if ( BestError == MBPriorLastInterError ) {
cpi->MBCodingMode = CODE_INTER_PRIOR_LAST;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&PriorLastInterMVect);
/* Swap the prior and last MV cases over */
TmpMVect.x = PriorLastInterMVect.x;
TmpMVect.y = PriorLastInterMVect.y;
PriorLastInterMVect.x = LastInterMVect.x;
PriorLastInterMVect.y = LastInterMVect.y;
LastInterMVect.x = TmpMVect.x;
LastInterMVect.y = TmpMVect.y;
} else if ( BestError == MBInterMVError ) {
cpi->MBCodingMode = CODE_INTER_PLUS_MV;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&InterMVect);
/* Update Prior last mv with last mv */
PriorLastInterMVect.x = LastInterMVect.x;
PriorLastInterMVect.y = LastInterMVect.y;
/* Note last inter MV for future use */
LastInterMVect.x = InterMVect.x;
LastInterMVect.y = InterMVect.y;
AddMotionVector( cpi, &InterMVect);
} else if ( BestError == MBGF_MVError ) {
cpi->MBCodingMode = CODE_GOLDEN_MV;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&GFMVect);
/* Note last inter GF MV for future use */
LastGFMVect.x = GFMVect.x;
LastGFMVect.y = GFMVect.y;
AddMotionVector( cpi, &GFMVect);
} else if ( BestError == MBInterFOURMVError ) {
cpi->MBCodingMode = CODE_INTER_FOURMV;
/* Calculate the UV vectors as the average of the Y plane ones. */
/* First .x component */
FourMVect[4].x = FourMVect[0].x + FourMVect[1].x +
FourMVect[2].x + FourMVect[3].x;
if ( FourMVect[4].x >= 0 )
FourMVect[4].x = (FourMVect[4].x + 2) / 4;
else
FourMVect[4].x = (FourMVect[4].x - 2) / 4;
FourMVect[5].x = FourMVect[4].x;
/* Then .y component */
FourMVect[4].y = FourMVect[0].y + FourMVect[1].y +
FourMVect[2].y + FourMVect[3].y;
if ( FourMVect[4].y >= 0 )
FourMVect[4].y = (FourMVect[4].y + 2) / 4;
else
FourMVect[4].y = (FourMVect[4].y - 2) / 4;
FourMVect[5].y = FourMVect[4].y;
SetFragMotionVectorAndMode(cpi, YFragIndex, &FourMVect[0]);
SetFragMotionVectorAndMode(cpi, YFragIndex + 1, &FourMVect[1]);
SetFragMotionVectorAndMode(cpi, YFragIndex + cpi->pb.HFragments,
&FourMVect[2]);
SetFragMotionVectorAndMode(cpi, YFragIndex + cpi->pb.HFragments + 1,
&FourMVect[3]);
SetFragMotionVectorAndMode(cpi, UFragIndex, &FourMVect[4]);
SetFragMotionVectorAndMode(cpi, VFragIndex, &FourMVect[5]);
/* Note the four MVs values for current macro-block. */
AddMotionVector( cpi, &FourMVect[0]);
AddMotionVector( cpi, &FourMVect[1]);
AddMotionVector( cpi, &FourMVect[2]);
AddMotionVector( cpi, &FourMVect[3]);
/* Update Prior last mv with last mv */
PriorLastInterMVect.x = LastInterMVect.x;
PriorLastInterMVect.y = LastInterMVect.y;
/* Note last inter MV for future use */
LastInterMVect.x = FourMVect[3].x;
LastInterMVect.y = FourMVect[3].y;
} else {
cpi->MBCodingMode = CODE_INTRA;
SetMBMotionVectorsAndMode(cpi,YFragIndex,UFragIndex,
VFragIndex,&ZeroVect);
}
/* setting up mode specific block types
*******************************************************/
*InterError += (BestError>>8);
*IntraError += (MBIntraError>>8);
}
SB++;
}
}
cpi->pb.ClearSysState();
/* Return number of pixels coded */
return 0;
}
ogg_uint32_t QuadCodeDisplayFragments (CP_INSTANCE *cpi) {
ogg_int32_t i,j;
ogg_uint32_t coded_pixels=0;
ogg_uint16_t QIndex;
int k,m,n;
/* predictor multiplier up-left, up, up-right,left, shift */
ogg_int16_t pc[16][6]={
{0,0,0,0,0,0},
{0,0,0,1,0,0}, /* PL */
{0,0,1,0,0,0}, /* PUR */
{0,0,53,75,7,127}, /* PUR|PL */
{0,1,0,0,0,0}, /* PU */
{0,1,0,1,1,1}, /* PU|PL */
{0,1,0,0,0,0}, /* PU|PUR */
{0,0,53,75,7,127}, /* PU|PUR|PL */
{1,0,0,0,0,0}, /* PUL| */
{0,0,0,1,0,0}, /* PUL|PL */
{1,0,1,0,1,1}, /* PUL|PUR */
{0,0,53,75,7,127}, /* PUL|PUR|PL */
{0,1,0,0,0,0}, /* PUL|PU */
{-26,29,0,29,5,31}, /* PUL|PU|PL */
{3,10,3,0,4,15}, /* PUL|PU|PUR */
{-26,29,0,29,5,31} /* PUL|PU|PUR|PL */
};
struct SearchPoints {
int RowOffset;
int ColOffset;
} DCSearchPoints[]= {
{0,-2},{-2,0},{-1,-2},{-2,-1},{-2,1},{-1,2},{-2,-2},{-2,2},{0,-3},
{-3,0},{-1,-3},{-3,-1},{-3,1},{-1,3},{-2,-3},{-3,-2},{-3,2},{-2,3},
{0,-4},{-4,0},{-1,-4},{-4,-1},{-4,1},{-1,4},{-3,-3},{-3,3}
};
int DCSearchPointCount = 0;
/* fragment left fragment up-left, fragment up, fragment up-right */
int fl,ful,fu,fur;
/* value left value up-left, value up, value up-right */
int vl,vul,vu,vur;
/* fragment number left, up-left, up, up-right */
int l,ul,u,ur;
/*which predictor constants to use */
ogg_int16_t wpc;
/* last used inter predictor (Raster Order) */
ogg_int16_t Last[3]; /* last value used for given frame */
ogg_int16_t TempInter = 0;
int FragsAcross=cpi->pb.HFragments;
int FragsDown = cpi->pb.VFragments;
int FromFragment,ToFragment;
ogg_int32_t FragIndex;
int WhichFrame;
int WhichCase;
ogg_int16_t Mode2Frame[] = {
1, /* CODE_INTER_NO_MV 0 => Encoded diff from same MB last frame */
0, /* CODE_INTRA 1 => DCT Encoded Block */
1, /* CODE_INTER_PLUS_MV 2 => Encoded diff from included MV MB last frame */
1, /* CODE_INTER_LAST_MV 3 => Encoded diff from MRU MV MB last frame */
1, /* CODE_INTER_PRIOR_MV 4 => Encoded diff from included 4 separate MV blocks */
2, /* CODE_USING_GOLDEN 5 => Encoded diff from same MB golden frame */
2, /* CODE_GOLDEN_MV 6 => Encoded diff from included MV MB golden frame */
1 /* CODE_INTER_FOUR_MV 7 => Encoded diff from included 4 separate MV blocks */
};
ogg_int16_t PredictedDC;
/* Initialise the coded block indices variables. These allow
subsequent linear access to the quad tree ordered list of coded
blocks */
cpi->pb.CodedBlockIndex = 0;
/* Set the inter/intra descision control variables. */
QIndex = Q_TABLE_SIZE - 1;
while ( (ogg_int32_t) QIndex >= 0 ) {
if ( (QIndex == 0) ||
( cpi->pb.QThreshTable[QIndex] >= cpi->pb.ThisFrameQualityValue) )
break;
QIndex --;
}
/* Encode and tokenise the Y, U and V components */
coded_pixels = QuadCodeComponent(cpi, 0, cpi->pb.YSBRows, cpi->pb.YSBCols,
cpi->pb.HFragments%4,
cpi->pb.VFragments%4,
cpi->pb.Configuration.VideoFrameWidth );
coded_pixels += QuadCodeComponent(cpi, cpi->pb.YSuperBlocks,
cpi->pb.UVSBRows,
cpi->pb.UVSBCols,
(cpi->pb.HFragments/2)%4,
(cpi->pb.VFragments/2)%4,
cpi->pb.Configuration.VideoFrameWidth>>1 );
coded_pixels += QuadCodeComponent(cpi,
cpi->pb.YSuperBlocks+cpi->pb.UVSuperBlocks,
cpi->pb.UVSBRows, cpi->pb.UVSBCols,
(cpi->pb.HFragments/2)%4,
(cpi->pb.VFragments/2)%4,
cpi->pb.Configuration.VideoFrameWidth>>1 );
/* for y,u,v */
for ( j = 0; j < 3 ; j++) {
/* pick which fragments based on Y, U, V */
switch(j){
case 0: /* y */
FromFragment = 0;
ToFragment = cpi->pb.YPlaneFragments;
FragsAcross = cpi->pb.HFragments;
FragsDown = cpi->pb.VFragments;
break;
case 1: /* u */
FromFragment = cpi->pb.YPlaneFragments;
ToFragment = cpi->pb.YPlaneFragments + cpi->pb.UVPlaneFragments ;
FragsAcross = cpi->pb.HFragments >> 1;
FragsDown = cpi->pb.VFragments >> 1;
break;
case 2: /* v */
FromFragment = cpi->pb.YPlaneFragments + cpi->pb.UVPlaneFragments;
ToFragment = cpi->pb.YPlaneFragments + (2 * cpi->pb.UVPlaneFragments) ;
FragsAcross = cpi->pb.HFragments >> 1;
FragsDown = cpi->pb.VFragments >> 1;
break;
}
/* initialize our array of last used DC Components */
for(k=0;k<3;k++)Last[k]=0;
i=FromFragment;
/* do prediction on all of Y, U or V */
for ( m = 0 ; m < FragsDown ; m++) {
for ( n = 0 ; n < FragsAcross ; n++, i++) {
cpi->OriginalDC[i] = cpi->pb.QFragData[i][0];
/* only do 2 prediction if fragment coded and on non intra or
if all fragments are intra */
if( cpi->pb.display_fragments[i] ||
(GetFrameType(&cpi->pb) == BASE_FRAME) ) {
/* Type of Fragment */
WhichFrame = Mode2Frame[cpi->pb.FragCodingMethod[i]];
/* Check Borderline Cases */
WhichCase = (n==0) + ((m==0) << 1) + ((n+1 == FragsAcross) << 2);
switch(WhichCase) {
case 0: /* normal case no border condition */
/* calculate values left, up, up-right and up-left */
l = i-1;
u = i - FragsAcross;
ur = i - FragsAcross + 1;
ul = i - FragsAcross - 1;
/* calculate values */
vl = cpi->OriginalDC[l];
vu = cpi->OriginalDC[u];
vur = cpi->OriginalDC[ur];
vul = cpi->OriginalDC[ul];
/* fragment valid for prediction use if coded and it comes
from same frame as the one we are predicting */
fl = cpi->pb.display_fragments[l] &&
(Mode2Frame[cpi->pb.FragCodingMethod[l]] == WhichFrame);
fu = cpi->pb.display_fragments[u] &&
(Mode2Frame[cpi->pb.FragCodingMethod[u]] == WhichFrame);
fur = cpi->pb.display_fragments[ur] &&
(Mode2Frame[cpi->pb.FragCodingMethod[ur]] == WhichFrame);
ful = cpi->pb.display_fragments[ul] &&
(Mode2Frame[cpi->pb.FragCodingMethod[ul]] == WhichFrame);
/* calculate which predictor to use */
wpc = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR);
break;
case 1: /* n == 0 Left Column */
/* calculate values left, up, up-right and up-left */
u = i - FragsAcross;
ur = i - FragsAcross + 1;
/* calculate values */
vu = cpi->OriginalDC[u];
vur = cpi->OriginalDC[ur];
/* fragment valid for prediction if coded and it comes
from same frame as the one we are predicting */
fu = cpi->pb.display_fragments[u] &&
(Mode2Frame[cpi->pb.FragCodingMethod[u]] == WhichFrame);
fur = cpi->pb.display_fragments[ur] &&
(Mode2Frame[cpi->pb.FragCodingMethod[ur]] == WhichFrame);
/* calculate which predictor to use */
wpc = (fu*PU) | (fur*PUR);
break;
case 2: /* m == 0 Top Row */
case 6: /* m == 0 and n+1 == FragsAcross or Top Row Right Column */
/* calculate values left, up, up-right and up-left */
l = i-1;
/* calculate values */
vl = cpi->OriginalDC[l];
/* fragment valid for prediction if coded and it comes
from same frame as the one we are predicting */
fl = cpi->pb.display_fragments[l] &&
(Mode2Frame[cpi->pb.FragCodingMethod[l]] == WhichFrame);
/* calculate which predictor to use */
wpc = (fl*PL) ;
break;
case 3: /* n == 0 & m == 0 Top Row Left Column */
wpc = 0;
break;
case 4: /* n+1 == FragsAcross : Right Column */
/* calculate values left, up, up-right and up-left */
l = i-1;
u = i - FragsAcross;
ul = i - FragsAcross - 1;
/* calculate values */
vl = cpi->OriginalDC[l];
vu = cpi->OriginalDC[u];
vul = cpi->OriginalDC[ul];
/* fragment valid for prediction if coded and it comes
from same frame as the one we are predicting */
fl = cpi->pb.display_fragments[l] &&
(Mode2Frame[cpi->pb.FragCodingMethod[l]] == WhichFrame);
fu = cpi->pb.display_fragments[u] &&
(Mode2Frame[cpi->pb.FragCodingMethod[u]] == WhichFrame);
ful = cpi->pb.display_fragments[ul] &&
(Mode2Frame[cpi->pb.FragCodingMethod[ul]] == WhichFrame);
/* calculate which predictor to use */
wpc = (fl*PL) | (fu*PU) | (ful*PUL) ;
break;
}
if(wpc==0) {
FragIndex = 1;
/* find the nearest one that is coded */
for( k = 0; k < DCSearchPointCount ; k++) {
FragIndex = i + DCSearchPoints[k].RowOffset *
FragsAcross + DCSearchPoints[k].ColOffset;
if( FragIndex - FromFragment > 0 ) {
if(cpi->pb.display_fragments[FragIndex] &&
(Mode2Frame[cpi->pb.FragCodingMethod[FragIndex]] ==
WhichFrame)) {
cpi->pb.QFragData[i][0] -= cpi->OriginalDC[FragIndex];
FragIndex = 0;
break;
}
}
}
/* if none matched fall back to the last one ever */
if(FragIndex) cpi->pb.QFragData[i][0] -= Last[WhichFrame];
} else {
/* don't do divide if divisor is 1 or 0 */
PredictedDC = (pc[wpc][0]*vul + pc[wpc][1] * vu +
pc[wpc][2] * vur + pc[wpc][3] * vl );
/* if we need to do a shift */
if(pc[wpc][4] != 0 ) {
/* If negative add in the negative correction factor */
PredictedDC += (HIGHBITDUPPED(PredictedDC) & pc[wpc][5]);
/* Shift in lieu of a divide */
PredictedDC >>= pc[wpc][4];
}
/* check for outranging on the two predictors that can outrange */
switch(wpc) {
case 13: /* pul pu pl */
case 15: /* pul pu pur pl */
if( abs(PredictedDC - vu) > 128)
PredictedDC = vu;
else if( abs(PredictedDC - vl) > 128)
PredictedDC = vl;
else if( abs(PredictedDC - vul) > 128)
PredictedDC = vul;
break;
}
cpi->pb.QFragData[i][0] -= PredictedDC;
}
/* Save the last fragment coded for whatever frame we are
predicting from */
Last[WhichFrame] = cpi->OriginalDC[i];
}
}
}
}
/* Pack DC tokens and adjust the ones we couldn't predict 2d */
for ( i = 0; i < cpi->pb.CodedBlockIndex; i++ ) {
/* Get the linear index for the current coded fragment. */
FragIndex = cpi->pb.CodedBlockList[i];
coded_pixels += DPCMTokenizeBlock ( cpi, FragIndex,
cpi->pb.Configuration.VideoFrameWidth);
}
<p> /* Bit pack the video data data */
PackCodedVideo(cpi);
/* End the bit packing run. */
/* EndAddBitsToBuffer(cpi); */
/* Reconstruct the reference frames */
ReconRefFrames(&cpi->pb);
UpdateFragQIndex(&cpi->pb);
/* Measure the inter reconstruction error for all the blocks that
were coded */
// for use as part of the recovery monitoring process in subsequent frames.
for ( i = 0; i < cpi->pb.CodedBlockIndex; i++ ) {
cpi->LastCodedErrorScore[ cpi->pb.CodedBlockList[i] ] =
cpi->GetBlockReconError( cpi, cpi->pb.CodedBlockList[i] );
}
cpi->pb.ClearSysState();
/* Return total number of coded pixels */
return coded_pixels;
}
void WriteFrameHeader( CP_INSTANCE *cpi) {
ogg_uint32_t i;
oggpack_buffer *opb=cpi.oggbuffer;
/* Output the frame type (base/key frame or inter frame) */
oggpackB_write( opb, (UINT32)cpi->pb.FrameType, 1 );
/* usused set to 0 allways */
oggpackB_write( opb, 0, 1 );
/* Write out details of the current value of Q... variable resolution. */
for ( i = 0; i < Q_TABLE_SIZE; i++ ) {
if ( cpi->pb.ThisFrameQualityValue == cpi->pb.QThreshTable[i] ) {
oggpackB_write( opb, i, 6 );
break;
}
}
if ( i == Q_TABLE_SIZE ) {
/* An invalid DCT value was specified. */
IssueWarning( "Invalid Q Multiplier" );
oggpackB_write( opb, 31, 6 );
}
/* If the frame was a base frame then write out the frame dimensions. */
if ( cpi->pb.FrameType == BASE_FRAME ) {
oggpackB_write( opb, (UINT32)0, 8 );
oggpackB_write( opb, (UINT32)cpi->pb.Vp3VersionNo, 5 );
/* Key frame type / method */
oggpackB_write( opb, (UINT32)cpi->pb.KeyFrameType, 1 );
/* Spare configuration bits */
oggpackB_write( opb, (UINT32)0, 2 );
}
}
<p><p><p>1.1 theora/lib/encoder_internal.h
Index: encoder_internal.h
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: encoder_internal.h,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
#include <ogg/ogg.h>
typedef struct CONFIG_TYPE2{
UINT32 TargetBandwidth;
UINT32 OutputFrameRate;
UINT32 FirstFrameQ;
UINT32 BaseQ;
UINT32 MaxQ; /* Absolute Max Q allowed. */
UINT32 ActiveMaxQ; /* Currently active Max Q */
} CONFIG_TYPE2;
typedef struct CP_INSTANCE {
/* Compressor Configuration */
SCAN_CONFIG_DATA ScanConfig;
CONFIG_TYPE2 Configuration;
BOOL QuickCompress;
BOOL GoldenFrameEnabled;
BOOL InterPrediction;
BOOL MotionCompensation;
BOOL AutoKeyFrameEnabled ;
INT32 ForceKeyFrameEvery ;
INT32 AutoKeyFrameThreshold ;
UINT32 LastKeyFrame ;
UINT32 MinimumDistanceToKeyFrame ;
UINT32 KeyFrameDataTarget ; /* Data rate target for key frames */
UINT32 KeyFrameFrequency ;
BOOL DropFramesAllowed ;
INT32 DropCount ;
INT32 MaxConsDroppedFrames ;
INT32 DropFrameTriggerBytes;
BOOL DropFrameCandidate;
UINT32 QualitySetting;
UINT32 Sharpness;
UINT32 PreProcFilterLevel;
BOOL NoDrops;
/* Compressor Statistics */
double TotErrScore;
INT64 KeyFrameCount; /* Count of key frames. */
INT64 TotKeyFrameBytes;
UINT32 LastKeyFrameSize;
UINT32 PriorKeyFrameSize[KEY_FRAME_CONTEXT];
UINT32 PriorKeyFrameDistance[KEY_FRAME_CONTEXT];
INT32 FrameQuality[6];
int DecoderErrorCode; /* Decoder error flag. */
INT32 ThreshMapThreshold;
INT32 TotalMotionScore;
INT64 TotalByteCount;
INT32 FixedQ;
/* Frame Statistics */
INT8 InterCodeCount;
INT64 CurrentFrame;
INT64 CarryOver ;
UINT32 LastFrameSize;
/*UINT32 ThisFrameSize; use the oggpackB primitive */
/*UINT32 BufferedOutputBytes; */
UINT32 FrameBitCount;
BOOL ThisIsFirstFrame;
BOOL ThisIsKeyFrame;
INT32 MotionScore;
UINT32 RegulationBlocks;
INT32 RecoveryMotionScore;
BOOL RecoveryBlocksAdded ;
double ProportionRecBlocks;
double MaxRecFactor ;
/* Rate Targeting variables. */
UINT32 ThisFrameTargetBytes;
double BpbCorrectionFactor;
/* Up regulation variables */
UINT32 FinalPassLastPos; /* Used to regulate a final unrestricted
high quality pass. */
UINT32 LastEndSB; /* Where we were in the loop last time. */
UINT32 ResidueLastEndSB; /* Where we were in the residue update
loop last time. */
/* Controlling Block Selection */
UINT32 MVChangeFactor;
UINT32 FourMvChangeFactor;
UINT32 MinImprovementForNewMV;
UINT32 ExhaustiveSearchThresh;
UINT32 MinImprovementForFourMV;
UINT32 FourMVThreshold;
/* Module shared data structures. */
INT32 frame_target_rate;
INT32 BaseLineFrameTargetRate;
INT32 min_blocks_per_frame;
UINT32 tot_bytes_old;
//********************************************************************/
/* Frames Used in the selecetive convolution filtering of the Y plane. */
UINT8 *ConvDestBuffer;
YUV_BUFFER_ENTRY *yuv0ptr;
YUV_BUFFER_ENTRY *yuv1ptr;
UINT8 *ConvDestBufferAlloc;
YUV_BUFFER_ENTRY *yuv0ptrAlloc;
YUV_BUFFER_ENTRY *yuv1ptrAlloc;
/*********************************************************************/
/*********************************************************************/
/* Token Buffers */
UINT32 *OptimisedTokenListEb; /* Optimised token list extra bits */
UINT8 *OptimisedTokenList; /* Optimised token list. */
UINT8 *OptimisedTokenListHi; /* Optimised token list huffman
table index */
UINT32 *OptimisedTokenListEbAlloc; /* Optimised token list extra bits */
UINT8 *OptimisedTokenListAlloc; /* Optimised token list. */
UINT8 *OptimisedTokenListHiAlloc; /* Optimised token list
huffman table index */
UINT8 *OptimisedTokenListPlAlloc; /* Optimised token list
huffman table index */
UINT8 *OptimisedTokenListPl; /* Plane to which the token belongs
Y = 0 or UV = 1 */
INT32 OptimisedTokenCount; /* Count of Optimized tokens */
UINT32 RunHuffIndex; /* Huffman table in force at the
start of a run */
UINT32 RunPlaneIndex; /* The plane (Y=0 UV=1) to which
the first token in an EOB run
belonged. */
UINT32 TotTokenCount;
INT32 TokensToBeCoded;
INT32 TokensCoded;
/********************************************************************/
/* SuperBlock, MacroBLock and Fragment Information */
/* Coded flag arrays and counters for them */
UINT8 *PartiallyCodedFlags;
UINT8 *PartiallyCodedMbPatterns;
UINT8 *UncodedMbFlags;
UINT8 *extra_fragments; /* extra updates not recommended by
pre-processor */
INT16 *OriginalDC;
UINT32 *FragmentLastQ; /* Array used to keep track of quality at
which each fragment was last
updated. */
UINT8 *FragTokens;
UINT32 *FragTokenCounts; /* Number of tokens per fragment */
UINT32 *RunHuffIndices;
UINT32 *LastCodedErrorScore;
UINT32 *ModeList;
MOTION_VECTOR *MVList;
UINT8 *BlockCodedFlags;
UINT32 MvListCount;
UINT32 ModeListCount;
UINT8 *DataOutputBuffer;
/*********************************************************************/
UINT32 RunLength;
UINT32 MaxBitTarget; /* Cut off target for rate capping */
double BitRateCapFactor; /* Factor relating normal frame target
to cut off target. */
UINT8 MBCodingMode; /* Coding mode flags */
INT32 MVPixelOffsetY[MAX_SEARCH_SITES];
UINT32 InterTripOutThresh;
UINT8 MVEnabled;
UINT32 MotionVectorSearchCount;
UINT32 FrameMVSearcOunt;
INT32 MVSearchSteps;
INT32 MVOffsetX[MAX_SEARCH_SITES];
INT32 MVOffsetY[MAX_SEARCH_SITES];
INT32 HalfPixelRef2Offset[9]; /* Offsets for half pixel compensation */
INT8 HalfPixelXOffset[9]; /* Half pixel MV offsets for X */
INT8 HalfPixelYOffset[9]; /* Half pixel MV offsets for Y */
UINT32 bit_pattern ;
UINT8 bits_so_far ;
UINT32 lastval ;
UINT32 lastrun ;
Q_LIST_ENTRY *quantized_list;
Q_LIST_ENTRY *quantized_listAlloc;
MOTION_VECTOR MVector;
UINT32 TempBitCount;
INT16 *DCT_codes; /* Buffer that stores the result of Forward DCT */
INT16 *DCTDataBuffer; /* Input data buffer for Forward DCT */
/* Motion compensation related variables */
UINT32 MvMaxExtent;
/* copied from cbitman.c */
INT32 byte_bit_offset;
UINT32 DataBlock;
UINT32 mybits;
UINT32 ByteBitsLeft;
double QTargetModifier[Q_TABLE_SIZE];
/* instances (used for reconstructing buffers and to hold tokens etc.) */
PP_INSTANCE *pp; /* preprocessor */
PB_INSTANCE pb; /* playback */
/* ogg bitpacker for use in packet coding */
oggpack_buffer oggbuffer;
} CP_INSTANCE;
<p><p>1.1 theora/lib/encoder_lookup.h
Index: encoder_lookup.h
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function: simple static lookups for VP3 frame encoder
last mod: $Id: encoder_lookup.h,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
ogg_uint32_t MvPattern[(MAX_MV_EXTENT * 2) + 1] = {
0x000000ff, 0x000000fd, 0x000000fb, 0x000000f9,
0x000000f7, 0x000000f5, 0x000000f3, 0x000000f1,
0x000000ef, 0x000000ed, 0x000000eb, 0x000000e9,
0x000000e7, 0x000000e5, 0x000000e3, 0x000000e1,
0x0000006f, 0x0000006d, 0x0000006b, 0x00000069,
0x00000067, 0x00000065, 0x00000063, 0x00000061,
0x0000002f, 0x0000002d, 0x0000002b, 0x00000029,
0x00000009, 0x00000007, 0x00000002, 0x00000000,
0x00000001, 0x00000006, 0x00000008, 0x00000028,
0x0000002a, 0x0000002c, 0x0000002e, 0x00000060,
0x00000062, 0x00000064, 0x00000066, 0x00000068,
0x0000006a, 0x0000006c, 0x0000006e, 0x000000e0,
0x000000e2, 0x000000e4, 0x000000e6, 0x000000e8,
0x000000ea, 0x000000ec, 0x000000ee, 0x000000f0,
0x000000f2, 0x000000f4, 0x000000f6, 0x000000f8,
0x000000fa, 0x000000fc, 0x000000fe,
};
ogg_uint32_t MvBits[(MAX_MV_EXTENT * 2) + 1] = {
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
7, 7, 7, 7, 7, 7, 7, 7,
6, 6, 6, 6, 4, 4, 3, 3,
3, 4, 4, 6, 6, 6, 6, 7,
7, 7, 7, 7, 7, 7, 7, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8,
};
ogg_uint32_t MvPattern2[(MAX_MV_EXTENT * 2) + 1] = {
0x0000003f, 0x0000003d, 0x0000003b, 0x00000039,
0x00000037, 0x00000035, 0x00000033, 0x00000031,
0x0000002f, 0x0000002d, 0x0000002b, 0x00000029,
0x00000027, 0x00000025, 0x00000023, 0x00000021,
0x0000001f, 0x0000001d, 0x0000001b, 0x00000019,
0x00000017, 0x00000015, 0x00000013, 0x00000011,
0x0000000f, 0x0000000d, 0x0000000b, 0x00000009,
0x00000007, 0x00000005, 0x00000003, 0x00000000,
0x00000002, 0x00000004, 0x00000006, 0x00000008,
0x0000000a, 0x0000000c, 0x0000000e, 0x00000010,
0x00000012, 0x00000014, 0x00000016, 0x00000018,
0x0000001a, 0x0000001c, 0x0000001e, 0x00000020,
0x00000022, 0x00000024, 0x00000026, 0x00000028,
0x0000002a, 0x0000002c, 0x0000002e, 0x00000030,
0x00000032, 0x00000034, 0x00000036, 0x00000038,
0x0000003a, 0x0000003c, 0x0000003e,
};
ogg_uint32_t MvBits2[(MAX_MV_EXTENT * 2) + 1] = {
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6,
};
ogg_uint32_t ModeBitPatterns[MAX_MODES] = {
0x00, 0x02, 0x06, 0x0E, 0x1E, 0x3E, 0x7E, 0x7F };
ogg_int32_t ModeBitLengths[MAX_MODES] = {
1, 2, 3, 4, 5, 6, 7, 7 };
unsigned char ModeSchemes[MODE_METHODS-1][MAX_MODES] = {
/* Reserved for optimal */
{ 0, 0, 0, 0, 0, 0, 0, 0 },
/* Last Mv dominates */
{ 3, 4, 2, 0, 1, 5, 6, 7 }, // L P M N I G GM 4
{ 2, 4, 3, 0, 1, 5, 6, 7 }, // L P N M I G GM 4
{ 3, 4, 1, 0, 2, 5, 6, 7 }, // L M P N I G GM 4
{ 2, 4, 1, 0, 3, 5, 6, 7 }, // L M N P I G GM 4
/* No MV dominates */
{ 0, 4, 3, 1, 2, 5, 6, 7 }, // N L P M I G GM 4
{ 0, 5, 4, 2, 3, 1, 6, 7 }, // N G L P M I GM 4
};
<p>ogg_uint32_t MvThreshTable[Q_TABLE_SIZE] = {
65, 65, 65, 65, 50, 50, 50, 50,
40, 40, 40, 40, 40, 40, 40, 40,
30, 30, 30, 30, 30, 30, 30, 30,
20, 20, 20, 20, 20, 20, 20, 20,
15, 15, 15, 15, 15, 15, 15, 15,
10, 10, 10, 10, 10, 10, 10, 10,
5, 5, 5, 5, 5, 5, 5, 5,
0, 0, 0, 0, 0, 0, 0, 0
};
ogg_uint32_t MVChangeFactorTable[Q_TABLE_SIZE] = {
11, 11, 11, 11, 12, 12, 12, 12,
13, 13, 13, 13, 13, 13, 13, 13,
14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14,
15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15
};
<p>ogg_uint32_t PriorKeyFrameWeight[KEY_FRAME_CONTEXT] = { 1,2,3,4,5 };
/* Data structures controlling addition of residue blocks */
ogg_uint32_t ResidueErrorThresh[Q_TABLE_SIZE] = {
750, 700, 650, 600, 590, 580, 570, 560,
550, 540, 530, 520, 510, 500, 490, 480,
470, 460, 450, 440, 430, 420, 410, 400,
390, 380, 370, 360, 350, 340, 330, 320,
310, 300, 290, 280, 270, 260, 250, 245,
240, 235, 230, 225, 220, 215, 210, 205,
200, 195, 190, 185, 180, 175, 170, 165,
160, 155, 150, 145, 140, 135, 130, 130 };
ogg_uint32_t ResidueBlockFactor[Q_TABLE_SIZE] = {
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2 };
<p><p><p>1.1 theora/lib/frarray.c
Index: frarray.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: frarray.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
#include "ogg/ogg.h"
#include "encoder_internal.h"
void PackAndWriteDFArray( CP_INSTANCE *cpi ){
ogg_uint32_t i;
unsigned char val;
ogg_uint32_t run_count;
ogg_uint32_t SB, MB, B; /* Block, MB and SB loop variables */
ogg_uint32_t LastSbMbIndex = 0;
ogg_uint32_t BListIndex = 0;
ogg_uint32_t LastSbBIndex = 0;
ogg_int32_t DfBlockIndex; /* Block index in display_fragments */
/* Initialise workspaces */
memset( cpi->pb.SBFullyFlags, 1, cpi->pb.SuperBlocks);
memset( cpi->pb.SBCodedFlags, 0, cpi->pb.SuperBlocks );
memset( cpi->PartiallyCodedFlags, 0, cpi->pb.SuperBlocks );
memset( cpi->BlockCodedFlags, 0, cpi->pb.UnitFragments);
for( SB = 0; SB < cpi->pb.SuperBlocks; SB++ ) {
/* Check for coded blocks and macro-blocks */
for ( MB=0; MB<4; MB++ ) {
/* If MB in frame */
if ( QuadMapToMBTopLeft(cpi->pb.BlockMap,SB,MB) >= 0 ) {
for ( B=0; B<4; B++ ) {
DfBlockIndex = QuadMapToIndex1( cpi->pb.BlockMap,SB, MB, B );
/* Does Block lie in frame: */
if ( DfBlockIndex >= 0 ) {
/* In Frame: If it is not coded then this SB is only
partly coded.: */
if ( cpi->pb.display_fragments[DfBlockIndex] ) {
cpi->pb.SBCodedFlags[SB] = 1; /* SB at least partly coded */
cpi->BlockCodedFlags[BListIndex] = 1; /* Block is coded */
}else{
cpi->pb.SBFullyFlags[SB] = 0; /* SB not fully coded */
cpi->BlockCodedFlags[BListIndex] = 0; /* Block is not coded */
}
BListIndex++;
}
}
}
}
/* Is the SB fully coded or uncoded.
If so then backup BListIndex and MBListIndex */
if ( cpi->pb.SBFullyFlags[SB] || !cpi->pb.SBCodedFlags[SB] ) {
BListIndex = LastSbBIndex; /* Reset to values from previous SB */
}else{
cpi->PartiallyCodedFlags[SB] = 1; /* Set up list of partially
coded SBs */
LastSbBIndex = BListIndex;
}
}
/* Code list of partially coded Super-Block. */
val = cpi->PartiallyCodedFlags[0];
AddBitsToBuffer( cpi, (ogg_uint32_t)val, 1);
i = 0;
while ( i < cpi->pb.SuperBlocks ) {
run_count = 0;
while ( (i<cpi->pb.SuperBlocks) && (cpi->PartiallyCodedFlags[i]==val) ) {
i++;
run_count++;
}
/* Code the run */
FrArrayCodeSBRun( cpi, run_count );
val = ( val == 0 ) ? 1 : 0;
}
/* RLC Super-Block fully/not coded. */
i = 0;
/* Skip partially coded blocks */
while( (i < cpi->pb.SuperBlocks) && cpi->PartiallyCodedFlags[i] )
i++;
if ( i < cpi->pb.SuperBlocks ) {
val = cpi->pb.SBFullyFlags[i];
AddBitsToBuffer( cpi, (ogg_uint32_t)val, 1);
while ( i < cpi->pb.SuperBlocks ) {
run_count = 0;
while ( (i < cpi->pb.SuperBlocks) && (cpi->pb.SBFullyFlags[i] == val) ) {
i++;
/* Skip partially coded blocks */
while( (i < cpi->pb.SuperBlocks) && cpi->PartiallyCodedFlags[i] )
i++;
run_count++;
}
/* Code the run */
FrArrayCodeSBRun( cpi, run_count );
val = ( val == 0 ) ? 1 : 0;
}
}
/* Now code the block flags */
if ( BListIndex > 0 ) {
/* Code the block flags start value */
val = cpi->BlockCodedFlags[0];
AddBitsToBuffer( cpi, (ogg_uint32_t)val, 1);
/* Now code the block flags. */
for ( i = 0; i < BListIndex; ) {
run_count = 0;
while ( (cpi->BlockCodedFlags[i] == val) && (i < BListIndex) ) {
i++;
run_count++;
}
FrArrayCodeBlockRun( cpi, run_count );
val = ( val == 0 ) ? 1 : 0;
}
}
}
ogg_uint32_t FrArrayCodeSBRun( CP_INSTANCE *cpi, ogg_uint32_t value ){
ogg_uint32_t CodedVal = 0;
ogg_uint32_t CodedBits = 0;
// Coding scheme:
// Codeword RunLength
// 0 1
// 10x 2-3
// 110x 4-5
// 1110xx 6-9
// 11110xxx 10-17
// 111110xxxx 18-33
// 111111xxxxxxxxxxxx 34-4129
if ( value == 1 ){
CodedVal = 0;
CodedBits = 1;
} else if ( value <= 3 ) {
CodedVal = 0x0004 + (value - 2);
CodedBits = 3;
} else if ( value <= 5 ) {
CodedVal = 0x000C + (value - 4);
CodedBits = 4;
} else if ( value <= 9 ) {
CodedVal = 0x0038 + (value - 6);
CodedBits = 6;
} else if ( value <= 17 ) {
CodedVal = 0x00F0 + (value - 10);
CodedBits = 8;
} else if ( value <= 33 ) {
CodedVal = 0x03E0 + (value - 18);
CodedBits = 10;
} else {
CodedVal = 0x3F000 + (value - 34);
CodedBits = 18;
}
/* Add the bits to the encode holding buffer. */
AddBitsToBuffer( cpi, CodedVal, (ogg_uint32_t)CodedBits );
return CodedBits;
}
ogg_uint32_t FrArrayCodeBlockRun( CP_INSTANCE *cpi, ogg_uint32_t value ) {
ogg_uint32_t CodedVal = 0;
ogg_uint32_t CodedBits = 0;
// Coding scheme:
// Codeword RunLength
// 0x 1-2
// 10x 3-4
// 110x 5-6
// 1110xx 7-10
// 11110xx 11-14
// 11111xxxx 15-30
if ( value <= 2 ) {
CodedVal = value - 1;
CodedBits = 2;
} else if ( value <= 4 ) {
CodedVal = 0x0004 + (value - 3);
CodedBits = 3;
} else if ( value <= 6 ) {
CodedVal = 0x000C + (value - 5);
CodedBits = 4;
} else if ( value <= 10 ) {
CodedVal = 0x0038 + (value - 7);
CodedBits = 6;
} else if ( value <= 14 ) {
CodedVal = 0x0078 + (value - 11);
CodedBits = 7;
} else {
CodedVal = 0x01F0 + (value - 15);
CodedBits = 9;
}
/* Add the bits to the encode holding buffer. */
AddBitsToBuffer( cpi, CodedVal, (ogg_uint32_t)CodedBits );
return CodedBits;
}
<p><p><p>1.1 theora/lib/mcomp.c
Index: mcomp.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: mcomp.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
//#define NEW_ERROR_METRIC
#include "mcomp.h"
/* Initialises motion compentsation. */
void InitMotionCompensation ( CP_INSTANCE *cpi ){
int i;
int SearchSite=0;
int Len;
int LineStepY = (ogg_int32_t)cpi->pb.Configuration.YStride;
Len=((MvMaxExtent/2)+1)/2;
/* How many search stages are there. */
cpi->MVSearchSteps = 0;
/* Set up offsets arrays used in half pixel correction. */
cpi->HalfPixelRef2Offset[0] = -LineStepY - 1;
cpi->HalfPixelRef2Offset[1] = -LineStepY;
cpi->HalfPixelRef2Offset[2] = -LineStepY + 1;
cpi->HalfPixelRef2Offset[3] = - 1;
cpi->HalfPixelRef2Offset[4] = 0;
cpi->HalfPixelRef2Offset[5] = 1;
cpi->HalfPixelRef2Offset[6] = LineStepY - 1;
cpi->HalfPixelRef2Offset[7] = LineStepY;
cpi->HalfPixelRef2Offset[8] = LineStepY + 1;
cpi->HalfPixelXOffset[0] = -1;
cpi->HalfPixelXOffset[1] = 0;
cpi->HalfPixelXOffset[2] = 1;
cpi->HalfPixelXOffset[3] = -1;
cpi->HalfPixelXOffset[4] = 0;
cpi->HalfPixelXOffset[5] = 1;
cpi->HalfPixelXOffset[6] = -1;
cpi->HalfPixelXOffset[7] = 0;
cpi->HalfPixelXOffset[8] = 1;
cpi->HalfPixelYOffset[0] = -1;
cpi->HalfPixelYOffset[1] = -1;
cpi->HalfPixelYOffset[2] = -1;
cpi->HalfPixelYOffset[3] = 0;
cpi->HalfPixelYOffset[4] = 0;
cpi->HalfPixelYOffset[5] = 0;
cpi->HalfPixelYOffset[6] = 1;
cpi->HalfPixelYOffset[7] = 1;
cpi->HalfPixelYOffset[8] = 1;
/* Generate offsets for 8 search sites per step. */
while ( Len>0 ) {
/* Another step. */
cpi->MVSearchSteps += 1;
/* Compute offsets for search sites. */
cpi->MVOffsetX[SearchSite] = -Len;
cpi->MVOffsetY[SearchSite++] = -Len;
cpi->MVOffsetX[SearchSite] = 0;
cpi->MVOffsetY[SearchSite++] = -Len;
cpi->MVOffsetX[SearchSite] = Len;
cpi->MVOffsetY[SearchSite++] = -Len;
cpi->MVOffsetX[SearchSite] = -Len;
cpi->MVOffsetY[SearchSite++] = 0;
cpi->MVOffsetX[SearchSite] = Len;
cpi->MVOffsetY[SearchSite++] = 0;
cpi->MVOffsetX[SearchSite] = -Len;
cpi->MVOffsetY[SearchSite++] = Len;
cpi->MVOffsetX[SearchSite] = 0;
cpi->MVOffsetY[SearchSite++] = Len;
cpi->MVOffsetX[SearchSite] = Len;
cpi->MVOffsetY[SearchSite++] = Len;
/* Contract. */
Len /= 2;
}
/* Compute pixel index offsets. */
for ( i=SearchSite-1; i>=0; i-- )
cpi->MVPixelOffsetY[i] = (cpi->MVOffsetY[i]*LineStepY) + cpi->MVOffsetX[i];
}
ogg_uint32_t GetInterErr (unsigned char * NewDataPtr,
unsigned char * RefDataPtr1,
unsigned char * RefDataPtr2,
ogg_uint32_t PixelsPerLine ) {
ogg_uint32_t i;
ogg_int32_t XSum=0;
ogg_int32_t XXSum=0;
ogg_int32_t DiffVal;
ogg_int32_t AbsRefOffset = abs((int)(RefDataPtr1 - RefDataPtr2));
/* Mode of interpolation chosen based upon on the offset of the
second reference pointer */
if ( AbsRefOffset == 0 ) {
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ) {
DiffVal = ((int)NewDataPtr[0]) - (int)RefDataPtr1[0];
XSum += DiffVal;
/* negative array indexes are strictly forbidden by ANSI C and C99 */
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[1]) - (int)RefDataPtr1[1];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[2]) - (int)RefDataPtr1[2];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[3]) - (int)RefDataPtr1[3];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[4]) - (int)RefDataPtr1[4];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[5]) - (int)RefDataPtr1[5];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[6]) - (int)RefDataPtr1[6];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[7]) - (int)RefDataPtr1[7];
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
/* Step to next row of block. */
NewDataPtr += PixelsPerLine;
RefDataPtr1 += STRIDE_EXTRA + PixelsPerLine;
}
}else{
/* Simple two reference interpolation */
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ) {
DiffVal = ((int)NewDataPtr[0]) -
(((int)RefDataPtr1[0] + (int)RefDataPtr2[0]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[1]) -
(((int)RefDataPtr1[1] + (int)RefDataPtr2[1]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[2]) -
(((int)RefDataPtr1[2] + (int)RefDataPtr2[2]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[3]) -
(((int)RefDataPtr1[3] + (int)RefDataPtr2[3]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[4]) -
(((int)RefDataPtr1[4] + (int)RefDataPtr2[4]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[5]) -
(((int)RefDataPtr1[5] + (int)RefDataPtr2[5]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[6]) -
(((int)RefDataPtr1[6] + (int)RefDataPtr2[6]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
DiffVal = ((int)NewDataPtr[7]) -
(((int)RefDataPtr1[7] + (int)RefDataPtr2[7]) / 2);
XSum += DiffVal;
XXSum += XX_LUT[DiffVal+255];
/* Step to next row of block. */
NewDataPtr += PixelsPerLine;
RefDataPtr1 += STRIDE_EXTRA+PixelsPerLine;
RefDataPtr2 += STRIDE_EXTRA+PixelsPerLine;
}
}
/* Compute and return population variance as mis-match metric. */
return (( (XXSum<<6) - XSum*XSum ));
}
ogg_uint32_t GetSumAbsDiffs (unsigned char * NewDataPtr,
unsigned char * RefDataPtr,
ogg_uint32_t PixelsPerLine,
ogg_uint32_t ErrorSoFar,
ogg_uint32_t BestSoFar ) {
ogg_uint32_t i;
ogg_uint32_t DiffVal = ErrorSoFar;
/* Decide on standard or MMX implementation */
for ( i=0; i < BLOCK_HEIGHT_WIDTH; i++ ) {
DiffVal += abs( ((int)NewDataPtr[0]) - ((int)RefDataPtr[0]) );
DiffVal += abs( ((int)NewDataPtr[1]) - ((int)RefDataPtr[1]) );
DiffVal += abs( ((int)NewDataPtr[2]) - ((int)RefDataPtr[2]) );
DiffVal += abs( ((int)NewDataPtr[3]) - ((int)RefDataPtr[3]) );
DiffVal += abs( ((int)NewDataPtr[4]) - ((int)RefDataPtr[4]) );
DiffVal += abs( ((int)NewDataPtr[5]) - ((int)RefDataPtr[5]) );
DiffVal += abs( ((int)NewDataPtr[6]) - ((int)RefDataPtr[6]) );
DiffVal += abs( ((int)NewDataPtr[7]) - ((int)RefDataPtr[7]) );
/* Step to next row of block. */
NewDataPtr += PixelsPerLine;
RefDataPtr += STRIDE_EXTRA+PixelsPerLine;
}
return DiffVal;
}
ogg_uint32_t GetNextSumAbsDiffs (unsigned char * NewDataPtr,
unsigned char * RefDataPtr,
ogg_uint32_t PixelsPerLine,
ogg_uint32_t ErrorSoFar,
ogg_uint32_t BestSoFar ) {
ogg_uint32_t i;
ogg_uint32_t DiffVal = ErrorSoFar;
for ( i=0; i < BLOCK_HEIGHT_WIDTH; i++ ) {
DiffVal += abs( ((int)NewDataPtr[0]) - ((int)RefDataPtr[0]) );
DiffVal += abs( ((int)NewDataPtr[1]) - ((int)RefDataPtr[1]) );
DiffVal += abs( ((int)NewDataPtr[2]) - ((int)RefDataPtr[2]) );
DiffVal += abs( ((int)NewDataPtr[3]) - ((int)RefDataPtr[3]) );
DiffVal += abs( ((int)NewDataPtr[4]) - ((int)RefDataPtr[4]) );
DiffVal += abs( ((int)NewDataPtr[5]) - ((int)RefDataPtr[5]) );
DiffVal += abs( ((int)NewDataPtr[6]) - ((int)RefDataPtr[6]) );
DiffVal += abs( ((int)NewDataPtr[7]) - ((int)RefDataPtr[7]) );
if ( DiffVal > BestSoFar )break;
/* Step to next row of block. */
NewDataPtr += PixelsPerLine;
RefDataPtr += STRIDE_EXTRA+PixelsPerLine;
}
return DiffVal;
}
ogg_uint32_t GetHalfPixelSumAbsDiffs (unsigned char * SrcData,
unsigned char * RefDataPtr1,
unsigned char * RefDataPtr2,
ogg_uint32_t PixelsPerLine,
ogg_uint32_t ErrorSoFar,
ogg_uint32_t BestSoFar ) {
ogg_uint32_t i;
ogg_uint32_t DiffVal = ErrorSoFar;
ogg_int32_t RefOffset = (int)(RefDataPtr1 - RefDataPtr2);
ogg_uint32_t RefPixelsPerLine = PixelsPerLine + STRIDE_EXTRA;
if ( RefOffset == 0 ) {
/* Simple case as for non 0.5 pixel */
DiffVal += GetSumAbsDiffs( SrcData, RefDataPtr1, PixelsPerLine,
ErrorSoFar, BestSoFar );
} else {
for ( i=0; i < BLOCK_HEIGHT_WIDTH; i++ ) {
DiffVal += abs( ((int)SrcData[0]) - (((int)RefDataPtr1[0] +
(int)RefDataPtr2[0]) / 2) );
DiffVal += abs( ((int)SrcData[1]) - (((int)RefDataPtr1[1] +
(int)RefDataPtr2[1]) / 2) );
DiffVal += abs( ((int)SrcData[2]) - (((int)RefDataPtr1[2] +
(int)RefDataPtr2[2]) / 2) );
DiffVal += abs( ((int)SrcData[3]) - (((int)RefDataPtr1[3] +
(int)RefDataPtr2[3]) / 2) );
DiffVal += abs( ((int)SrcData[4]) - (((int)RefDataPtr1[4] +
(int)RefDataPtr2[4]) / 2) );
DiffVal += abs( ((int)SrcData[5]) - (((int)RefDataPtr1[5] +
(int)RefDataPtr2[5]) / 2) );
DiffVal += abs( ((int)SrcData[6]) - (((int)RefDataPtr1[6] +
(int)RefDataPtr2[6]) / 2) );
DiffVal += abs( ((int)SrcData[7]) - (((int)RefDataPtr1[7] +
(int)RefDataPtr2[7]) / 2) );
if ( DiffVal > BestSoFar ) break;
/* Step to next row of block. */
SrcData += PixelsPerLine;
RefDataPtr1 += RefPixelsPerLine;
RefDataPtr2 += RefPixelsPerLine;
}
}
return DiffVal;
}
ogg_uint32_t GetIntraError (unsigned char * DataPtr,
ogg_uint32_t PixelsPerLine ) {
ogg_uint32_t i;
ogg_uint32_t XSum=0;
ogg_uint32_t XXSum=0;
unsigned char *DiffPtr;
/* Loop expanded out for speed. */
DiffPtr = DataPtr;
for ( i=0; i<BLOCK_HEIGHT_WIDTH; i++ ) {
/* Examine alternate pixel locations. */
XSum += DiffPtr[0];
XXSum += XX_LUT[DiffPtr[0]+255];
XSum += DiffPtr[1];
XXSum += XX_LUT[DiffPtr[1]+255];
XSum += DiffPtr[2];
XXSum += XX_LUT[DiffPtr[2]+255];
XSum += DiffPtr[3];
XXSum += XX_LUT[DiffPtr[3]+255];
XSum += DiffPtr[4];
XXSum += XX_LUT[DiffPtr[4]+255];
XSum += DiffPtr[5];
XXSum += XX_LUT[DiffPtr[5]+255];
XSum += DiffPtr[6];
XXSum += XX_LUT[DiffPtr[6]+255];
XSum += DiffPtr[7];
XXSum += XX_LUT[DiffPtr[7]+277];
/* Step to next row of block. */
DiffPtr += PixelsPerLine;
}
/* Compute population variance as mis-match metric. */
return (( (XXSum<<6) - XSum*XSum ) );
}
ogg_uint32_t GetMBIntraError (CP_INSTANCE *cpi, ogg_uint32_t FragIndex,
ogg_uint32_t PixelsPerLine ) {
ogg_uint32_t LocalFragIndex = FragIndex;
ogg_uint32_t IntraError = 0;
ogg_uint32_t TmpError = 0;
/* Add together the intra errors for those blocks in the macro block
that are coded (Y only) */
if ( cpi->pb.display_fragments[LocalFragIndex] )
IntraError +=
cpi->GetIntraError(&cpi->
ConvDestBuffer[GetFragIndex(cpi->
pb.pixel_index_table,
LocalFragIndex)],
PixelsPerLine );
LocalFragIndex++;
if ( cpi->pb.display_fragments[LocalFragIndex] )
IntraError +=
cpi->GetIntraError(&cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,
LocalFragIndex)],
PixelsPerLine );
LocalFragIndex = FragIndex + cpi->pb.HFragments;
if ( cpi->pb.display_fragments[LocalFragIndex] )
IntraError +=
cpi->GetIntraError(&cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,
LocalFragIndex)],
PixelsPerLine );
LocalFragIndex++;
if ( cpi->pb.display_fragments[LocalFragIndex] )
IntraError +=
cpi->GetIntraError(&cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,
LocalFragIndex)],
PixelsPerLine );
return IntraError;
}
ogg_uint32_t GetMBInterError (CP_INSTANCE *cpi, unsigned char * SrcPtr,
unsigned char * RefPtr, ogg_uint32_t FragIndex,
ogg_int32_t LastXMV, ogg_int32_t LastYMV,
ogg_uint32_t PixelsPerLine ) {
ogg_uint32_t RefPixelsPerLine = cpi->pb.Configuration.YStride;
ogg_uint32_t LocalFragIndex = FragIndex;
ogg_int32_t PixelIndex;
ogg_int32_t RefPixelIndex;
ogg_int32_t RefPixelOffset;
ogg_int32_t RefPtr2Offset;
ogg_uint32_t TmpError = 0;
ogg_uint32_t InterError = 0;
unsigned char * SrcPtr1;
unsigned char * RefPtr1;
/* Work out pixel offset into source buffer. */
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table,LocalFragIndex);
/* Work out the pixel offset in reference buffer for the default
motion vector */
RefPixelIndex = GetFragIndex(cpi->pb.recon_pixel_index_table,LocalFragIndex);
RefPixelOffset = ((LastYMV/2) * RefPixelsPerLine) + (LastXMV/2);
/* Work out the second reference pointer offset. */
RefPtr2Offset = 0;
if ( LastXMV % 2 ) {
if ( LastXMV > 0 )
RefPtr2Offset += 1;
else
RefPtr2Offset -= 1;
}
if ( LastYMV % 2 ) {
if ( LastYMV > 0 )
RefPtr2Offset += RefPixelsPerLine;
else
RefPtr2Offset -= RefPixelsPerLine;
}
/* Add together the errors for those blocks in the macro block that
are coded (Y only) */
if ( cpi->pb.display_fragments[LocalFragIndex] ) {
SrcPtr1 = &SrcPtr[PixelIndex];
RefPtr1 = &RefPtr[RefPixelIndex + RefPixelOffset];
InterError += cpi->GetInterError( SrcPtr1, RefPtr1,
&RefPtr1[RefPtr2Offset], PixelsPerLine );
}
LocalFragIndex++;
if ( cpi->pb.display_fragments[LocalFragIndex] ) {
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table,LocalFragIndex);
RefPixelIndex = GetFragIndex(cpi->pb.recon_pixel_index_table,
LocalFragIndex);
SrcPtr1 = &SrcPtr[PixelIndex];
RefPtr1 = &RefPtr[RefPixelIndex + RefPixelOffset];
InterError += cpi->GetInterError( SrcPtr1, RefPtr1,
&RefPtr1[RefPtr2Offset], PixelsPerLine );
}
LocalFragIndex = FragIndex + cpi->pb.HFragments;
if ( cpi->pb.display_fragments[LocalFragIndex] ) {
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table,LocalFragIndex);
RefPixelIndex = GetFragIndex(cpi->pb.recon_pixel_index_table,
LocalFragIndex);
SrcPtr1 = &SrcPtr[PixelIndex];
RefPtr1 = &RefPtr[RefPixelIndex + RefPixelOffset];
InterError += cpi->GetInterError( SrcPtr1, RefPtr1,
&RefPtr1[RefPtr2Offset], PixelsPerLine );
}
LocalFragIndex++;
if ( cpi->pb.display_fragments[LocalFragIndex] ) {
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table,LocalFragIndex);
RefPixelIndex = GetFragIndex(cpi->pb.recon_pixel_index_table,
LocalFragIndex);
SrcPtr1 = &SrcPtr[PixelIndex];
RefPtr1 = &RefPtr[RefPixelIndex + RefPixelOffset];
InterError += cpi->GetInterError( SrcPtr1, RefPtr1,
&RefPtr1[RefPtr2Offset], PixelsPerLine );
}
return InterError;
}
ogg_uint32_t GetMBMVInterError (CP_INSTANCE *cpi,
unsigned char * RefFramePtr,
ogg_uint32_t FragIndex,
ogg_uint32_t PixelsPerLine,
ogg_int32_t *MVPixelOffset,
MOTION_VECTOR *MV ) {
ogg_uint32_t Error = 0;
ogg_uint32_t MinError;
ogg_uint32_t InterMVError = 0;
ogg_int32_t i;
ogg_int32_t x=0, y=0;
ogg_int32_t step;
ogg_int32_t SearchSite=0;
unsigned char *SrcPtr[4] = {NULL,NULL,NULL,NULL};
unsigned char *RefPtr=NULL;
unsigned char *CandidateBlockPtr=NULL;
unsigned char *BestBlockPtr=NULL;
ogg_uint32_t RefRow2Offset = cpi->pb.Configuration.YStride * 8;
int MBlockDispFrags[4];
/* Half pixel variables */
ogg_int32_t HalfPixelError;
ogg_int32_t BestHalfPixelError;
unsigned char BestHalfOffset;
unsigned char * RefDataPtr1;
unsigned char * RefDataPtr2;
/* Note which of the four blocks in the macro block are to be
included in the search. */
MBlockDispFrags[0] =
cpi->pb.display_fragments[FragIndex];
MBlockDispFrags[1] =
cpi->pb.display_fragments[FragIndex + 1];
MBlockDispFrags[2] =
cpi->pb.display_fragments[FragIndex + cpi->pb.HFragments];
MBlockDispFrags[3] =
cpi->pb.display_fragments[FragIndex + cpi->pb.HFragments + 1];
/* Set up the source pointers for the four source blocks. */
SrcPtr[0] = &cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,FragIndex)];
SrcPtr[1] = SrcPtr[0] + 8;
SrcPtr[2] = SrcPtr[0] + (PixelsPerLine * 8);
SrcPtr[3] = SrcPtr[2] + 8;
/* Set starting reference point for search. */
RefPtr = &RefFramePtr[GetFragIndex(cpi->
pb.recon_pixel_index_table,FragIndex)];
/* Check the 0,0 candidate. */
if ( MBlockDispFrags[0] ) {
Error = cpi->GetSAD( SrcPtr[0], RefPtr,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[1] ) {
Error = cpi->GetSAD( SrcPtr[1], RefPtr + 8,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[2] ) {
Error = cpi->GetSAD( SrcPtr[2], RefPtr + RefRow2Offset,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[3] ) {
Error = cpi->GetSAD( SrcPtr[3], RefPtr + RefRow2Offset + 8,
PixelsPerLine, Error, HUGE_ERROR );
}
/* Set starting values to results of 0, 0 vector. */
MinError = Error;
BestBlockPtr = RefPtr;
x = 0;
y = 0;
MV->x = 0;
MV->y = 0;
/* Proceed through N-steps. */
for ( step=0; step<cpi->MVSearchSteps; step++ ) {
/* Search the 8-neighbours at distance pertinent to current step.*/
for ( i=0; i<8; i++ ) {
/* Set pointer to next candidate matching block. */
CandidateBlockPtr = RefPtr + MVPixelOffset[SearchSite];
/* Reset error */
Error = 0;
/* Get the score for the current offset */
if ( MBlockDispFrags[0] ) {
Error = cpi->GetSAD( SrcPtr[0], CandidateBlockPtr,
PixelsPerLine, Error, MinError );
}
if ( MBlockDispFrags[1] && (Error < MinError) ) {
Error = cpi->GetNextSAD( SrcPtr[1], CandidateBlockPtr + 8,
PixelsPerLine, Error, MinError );
}
if ( MBlockDispFrags[2] && (Error < MinError) ) {
Error = cpi->GetNextSAD( SrcPtr[2], CandidateBlockPtr + RefRow2Offset,
PixelsPerLine, Error, MinError );
}
if ( MBlockDispFrags[3] && (Error < MinError) ) {
Error = cpi->GetNextSAD( SrcPtr[3],
CandidateBlockPtr + RefRow2Offset + 8,
PixelsPerLine, Error, MinError );
}
if ( Error < MinError ) {
/* Remember best match. */
MinError = Error;
BestBlockPtr = CandidateBlockPtr;
/* Where is it. */
x = MV->x + cpi->MVOffsetX[SearchSite];
y = MV->y + cpi->MVOffsetY[SearchSite];
}
/* Move to next search location. */
SearchSite += 1;
}
/* Move to best location this step. */
RefPtr = BestBlockPtr;
MV->x = x;
MV->y = y;
}
/* Factor vectors to 1/2 pixel resoultion. */
MV->x = (MV->x * 2);
MV->y = (MV->y * 2);
/* Now do the half pixel pass */
BestHalfOffset = 4; // Default to the no offset case.
BestHalfPixelError = MinError;
/* Get the half pixel error for each half pixel offset */
for ( i=0; i < 9; i++ ) {
HalfPixelError = 0;
if ( MBlockDispFrags[0] ) {
RefDataPtr1 = BestBlockPtr;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[0], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[1] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + 8;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[1], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[2] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + RefRow2Offset;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[2], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[3] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + RefRow2Offset + 8;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[3], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( HalfPixelError < BestHalfPixelError ) {
BestHalfOffset = (unsigned char)i;
BestHalfPixelError = HalfPixelError;
}
}
/* Half pixel adjust the MV */
MV->x += cpi->HalfPixelXOffset[BestHalfOffset];
MV->y += cpi->HalfPixelYOffset[BestHalfOffset];
cpi->pb.ClearSysState();
/* Get the error score for the chosen 1/2 pixel offset as a variance. */
InterMVError = GetMBInterError( cpi, cpi->ConvDestBuffer, RefFramePtr,
FragIndex, MV->x, MV->y, PixelsPerLine );
/* Return score of best matching block. */
return InterMVError;
}
ogg_uint32_t GetMBMVExhaustiveSearch (CP_INSTANCE *cpi,
unsigned char * RefFramePtr,
ogg_uint32_t FragIndex,
ogg_uint32_t PixelsPerLine,
MOTION_VECTOR *MV ) {
ogg_uint32_t Error = 0;
ogg_uint32_t MinError = HUGE_ERROR;
ogg_uint32_t InterMVError = 0;
ogg_int32_t i, j;
ogg_int32_t x=0, y=0;
unsigned char *SrcPtr[4] = {NULL,NULL,NULL,NULL};
unsigned char *RefPtr;
unsigned char *CandidateBlockPtr=NULL;
unsigned char *BestBlockPtr=NULL;
ogg_uint32_t RefRow2Offset = cpi->pb.Configuration.YStride * 8;
int MBlockDispFrags[4];
/* Half pixel variables */
ogg_int32_t HalfPixelError;
ogg_int32_t BestHalfPixelError;
unsigned char BestHalfOffset;
unsigned char * RefDataPtr1;
unsigned char * RefDataPtr2;
/* Note which of the four blocks in the macro block are to be
included in the search. */
MBlockDispFrags[0] = cpi->
pb.display_fragments[FragIndex];
MBlockDispFrags[1] = cpi->
pb.display_fragments[FragIndex + 1];
MBlockDispFrags[2] = cpi->
pb.display_fragments[FragIndex + cpi->pb.HFragments];
MBlockDispFrags[3] = cpi->
pb.display_fragments[FragIndex + cpi->pb.HFragments + 1];
/* Set up the source pointers for the four source blocks. */
SrcPtr[0] = &cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,FragIndex)];
SrcPtr[1] = SrcPtr[0] + 8;
SrcPtr[2] = SrcPtr[0] + (PixelsPerLine * 8);
SrcPtr[3] = SrcPtr[2] + 8;
RefPtr = &RefFramePtr[GetFragIndex(cpi->
pb.recon_pixel_index_table,FragIndex)];
RefPtr = RefPtr - ((MvMaxExtent/2) * cpi->
pb.Configuration.YStride) - (MvMaxExtent/2);
/* Search each pixel alligned site */
for ( i = 0; i < (ogg_int32_t)MvMaxExtent; i ++ ) {
/* Starting position in row */
CandidateBlockPtr = RefPtr;
for ( j = 0; j < (ogg_int32_t)MvMaxExtent; j++ ) {
/* Reset error */
Error = 0;
/* Summ errors for each block. */
if ( MBlockDispFrags[0] ) {
Error = cpi->GetSAD( SrcPtr[0], CandidateBlockPtr,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[1] ){
Error = cpi->GetSAD( SrcPtr[1], CandidateBlockPtr + 8,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[2] ){
Error = cpi->GetSAD( SrcPtr[2], CandidateBlockPtr + RefRow2Offset,
PixelsPerLine, Error, HUGE_ERROR );
}
if ( MBlockDispFrags[3] ){
Error = cpi->GetSAD( SrcPtr[3], CandidateBlockPtr + RefRow2Offset + 8,
PixelsPerLine, Error, HUGE_ERROR );
}
/* Was this the best so far */
if ( Error < MinError ) {
MinError = Error;
BestBlockPtr = CandidateBlockPtr;
x = 16 + j - MvMaxExtent;
y = 16 + i - MvMaxExtent;
}
/* Move the the next site */
CandidateBlockPtr ++;
}
/* Move on to the next row. */
RefPtr += cpi->pb.Configuration.YStride;
}
/* Factor vectors to 1/2 pixel resoultion. */
MV->x = (x * 2);
MV->y = (y * 2);
/* Now do the half pixel pass */
BestHalfOffset = 4; /* Default to the no offset case. */
BestHalfPixelError = MinError;
/* Get the half pixel error for each half pixel offset */
for ( i=0; i < 9; i++ ) {
HalfPixelError = 0;
if ( MBlockDispFrags[0] ) {
RefDataPtr1 = BestBlockPtr;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[0], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[1] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + 8;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[1], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[2] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + RefRow2Offset;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[2], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( MBlockDispFrags[3] && (HalfPixelError < BestHalfPixelError) ) {
RefDataPtr1 = BestBlockPtr + RefRow2Offset + 8;
RefDataPtr2 = RefDataPtr1 + cpi->HalfPixelRef2Offset[i];
HalfPixelError = cpi->
GetSadHalfPixel( SrcPtr[3], RefDataPtr1, RefDataPtr2,
PixelsPerLine, HalfPixelError, BestHalfPixelError );
}
if ( HalfPixelError < BestHalfPixelError ){
BestHalfOffset = (unsigned char)i;
BestHalfPixelError = HalfPixelError;
}
}
/* Half pixel adjust the MV */
MV->x += cpi->HalfPixelXOffset[BestHalfOffset];
MV->y += cpi->HalfPixelYOffset[BestHalfOffset];
/* Get the error score for the chosen 1/2 pixel offset as a variance. */
InterMVError = GetMBInterError( cpi, cpi->ConvDestBuffer, RefFramePtr,
FragIndex, MV->x, MV->y, PixelsPerLine );
/* Return score of best matching block. */
return InterMVError;
}
ogg_uint32_t GetFOURMVExhaustiveSearch (CP_INSTANCE *cpi,
unsigned char * RefFramePtr,
ogg_uint32_t FragIndex,
ogg_uint32_t PixelsPerLine,
MOTION_VECTOR *MV ) {
ogg_uint32_t InterMVError;
/* For the moment the 4MV mode is only deemd to be valid if all four
Y blocks are to be updated */
/* This May be adapted later. */
if ( cpi->pb.display_fragments[FragIndex] &&
cpi->pb.display_fragments[FragIndex + 1] &&
cpi->pb.display_fragments[FragIndex + cpi->pb.HFragments] &&
cpi->pb.display_fragments[FragIndex + cpi->pb.HFragments + 1] ) {
/* Reset the error score. */
InterMVError = 0;
/* Get the error component from each coded block */
InterMVError +=
GetBMVExhaustiveSearch(cpi, RefFramePtr, FragIndex,
PixelsPerLine, &(MV[0]) );
InterMVError +=
GetBMVExhaustiveSearch(cpi, RefFramePtr, (FragIndex + 1),
PixelsPerLine, &(MV[1]) );
InterMVError +=
GetBMVExhaustiveSearch(cpi, RefFramePtr,
(FragIndex + cpi->pb.HFragments),
PixelsPerLine, &(MV[2]) );
InterMVError +=
GetBMVExhaustiveSearch(cpi, RefFramePtr,
(FragIndex + cpi->pb.HFragments + 1),
PixelsPerLine, &(MV[3]) );
}else{
InterMVError = HUGE_ERROR;
}
/* Return score of best matching block. */
return InterMVError;
}
ogg_uint32_t GetBMVExhaustiveSearch (CP_INSTANCE *cpi,
unsigned char * RefFramePtr,
ogg_uint32_t FragIndex,
ogg_uint32_t PixelsPerLine,
MOTION_VECTOR *MV ) {
ogg_uint32_t Error = 0;
ogg_uint32_t MinError = HUGE_ERROR;
ogg_uint32_t InterMVError = 0;
ogg_int32_t i, j;
ogg_int32_t x=0, y=0;
unsigned char *SrcPtr = NULL;
unsigned char *RefPtr;
unsigned char *CandidateBlockPtr=NULL;
unsigned char *BestBlockPtr=NULL;
/* Half pixel variables */
ogg_int32_t HalfPixelError;
ogg_int32_t BestHalfPixelError;
unsigned char BestHalfOffset;
unsigned char * RefDataPtr2;
/* Set up the source pointer for the block. */
SrcPtr = &cpi->
ConvDestBuffer[GetFragIndex(cpi->pb.pixel_index_table,FragIndex)];
RefPtr = &RefFramePtr[GetFragIndex(cpi->
pb.recon_pixel_index_table,FragIndex)];
RefPtr = RefPtr - ((MvMaxExtent/2) *
cpi->pb.Configuration.YStride) - (MvMaxExtent/2);
/* Search each pixel alligned site */
for ( i = 0; i < (ogg_int32_t)MvMaxExtent; i ++ ) {
/* Starting position in row */
CandidateBlockPtr = RefPtr;
for ( j = 0; j < (ogg_int32_t)MvMaxExtent; j++ ){
/* Get the block error score. */
Error = cpi->GetSAD( SrcPtr, CandidateBlockPtr,
PixelsPerLine, 0, HUGE_ERROR );
/* Was this the best so far */
if ( Error < MinError ) {
MinError = Error;
BestBlockPtr = CandidateBlockPtr;
x = 16 + j - MvMaxExtent;
y = 16 + i - MvMaxExtent;
}
/* Move the the next site */
CandidateBlockPtr ++;
}
/* Move on to the next row. */
RefPtr += cpi->pb.Configuration.YStride;
}
/* Factor vectors to 1/2 pixel resoultion. */
MV->x = (x * 2);
MV->y = (y * 2);
/* Now do the half pixel pass */
BestHalfOffset = 4; /* Default to the no offset case. */
BestHalfPixelError = MinError;
/* Get the half pixel error for each half pixel offset */
for ( i=0; i < 9; i++ ) {
RefDataPtr2 = BestBlockPtr + cpi->HalfPixelRef2Offset[i];
HalfPixelError =
cpi->GetSadHalfPixel( SrcPtr, BestBlockPtr, RefDataPtr2,
PixelsPerLine, 0, BestHalfPixelError );
if ( HalfPixelError < BestHalfPixelError ){
BestHalfOffset = (unsigned char)i;
BestHalfPixelError = HalfPixelError;
}
}
/* Half pixel adjust the MV */
MV->x += cpi->HalfPixelXOffset[BestHalfOffset];
MV->y += cpi->HalfPixelYOffset[BestHalfOffset];
/* Get the variance score at the chosen offset */
RefDataPtr2 = BestBlockPtr + cpi->HalfPixelRef2Offset[BestHalfOffset];
#ifndef NEW_ERROR_METRIC
InterMVError =
cpi->GetInterError( SrcPtr, BestBlockPtr, RefDataPtr2, PixelsPerLine );
#else
InterMVError =
GetInterDCTErr(cpi, SrcPtr, BestBlockPtr, RefDataPtr2, PixelsPerLine );
#endif
/* Return score of best matching block. */
return InterMVError;
}
<p><p><p>1.1 theora/lib/mcomp.h
Index: mcomp.h
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function: simple static lookups for VP3 codec
last mod: $Id: mcomp.h,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
ogg_int32_t XX_LUT[511]={
65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504,
61009, 60516, 60025, 59536, 59049, 58564, 58081, 57600,
57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824,
53361, 52900, 52441, 51984, 51529, 51076, 50625, 50176,
49729, 49284, 48841, 48400, 47961, 47524, 47089, 46656,
46225, 45796, 45369, 44944, 44521, 44100, 43681, 43264,
42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000,
39601, 39204, 38809, 38416, 38025, 37636, 37249, 36864,
36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856,
33489, 33124, 32761, 32400, 32041, 31684, 31329, 30976,
30625, 30276, 29929, 29584, 29241, 28900, 28561, 28224,
27889, 27556, 27225, 26896, 26569, 26244, 25921, 25600,
25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104,
22801, 22500, 22201, 21904, 21609, 21316, 21025, 20736,
20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496,
18225, 17956, 17689, 17424, 17161, 16900, 16641, 16384,
16129, 15876, 15625, 15376, 15129, 14884, 14641, 14400,
14161, 13924, 13689, 13456, 13225, 12996, 12769, 12544,
12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816,
10609, 10404, 10201, 10000, 9801, 9604, 9409, 9216,
9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744,
7569, 7396, 7225, 7056, 6889, 6724, 6561, 6400,
6241, 6084, 5929, 5776, 5625, 5476, 5329, 5184,
5041, 4900, 4761, 4624, 4489, 4356, 4225, 4096,
3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136,
3025, 2916, 2809, 2704, 2601, 2500, 2401, 2304,
2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600,
1521, 1444, 1369, 1296, 1225, 1156, 1089, 1024,
961, 900, 841, 784, 729, 676, 625, 576,
529, 484, 441, 400, 361, 324, 289, 256,
225, 196, 169, 144, 121, 100, 81, 64,
49, 36, 25, 16, 9, 4, 1, 0,
1, 4, 9, 16, 25, 36, 49, 64,
81, 100, 121, 144, 169, 196, 225, 256,
289, 324, 361, 400, 441, 484, 529, 576,
625, 676, 729, 784, 841, 900, 961, 1024,
1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600,
1681, 1764, 1849, 1936, 2025, 2116, 2209, 2304,
2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136,
3249, 3364, 3481, 3600, 3721, 3844, 3969, 4096,
4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184,
5329, 5476, 5625, 5776, 5929, 6084, 6241, 6400,
6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744,
7921, 8100, 8281, 8464, 8649, 8836, 9025, 9216,
9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816,
11025, 11236, 11449, 11664, 11881, 12100, 12321, 12544,
12769, 12996, 13225, 13456, 13689, 13924, 14161, 14400,
14641, 14884, 15129, 15376, 15625, 15876, 16129, 16384,
16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496,
18769, 19044, 19321, 19600, 19881, 20164, 20449, 20736,
21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104,
23409, 23716, 24025, 24336, 24649, 24964, 25281, 25600,
25921, 26244, 26569, 26896, 27225, 27556, 27889, 28224,
28561, 28900, 29241, 29584, 29929, 30276, 30625, 30976,
31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856,
34225, 34596, 34969, 35344, 35721, 36100, 36481, 36864,
37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000,
40401, 40804, 41209, 41616, 42025, 42436, 42849, 43264,
43681, 44100, 44521, 44944, 45369, 45796, 46225, 46656,
47089, 47524, 47961, 48400, 48841, 49284, 49729, 50176,
50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824,
54289, 54756, 55225, 55696, 56169, 56644, 57121, 57600,
58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504,
62001, 62500, 63001, 63504, 64009, 64516, 65025};
<p><p><p><p>1.1 theora/lib/misc_common.c
Index: misc_common.c
===================================================================
/********************************************************************
* *
* THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
* USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
* THE OggVorbis SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
function:
last mod: $Id: misc_common.c,v 1.1 2002/09/16 07:10:02 xiphmont Exp $
********************************************************************/
#define FIXED_Q 150
#define MAX_UP_REG_LOOPS 2
double GetEstimatedBpb( CP_INSTANCE *cpi, ogg_uint32_t TargetQ ){
ogg_uint32_t i;
ogg_int32_t ThreshTableIndex = Q_TABLE_SIZE - 1;
double BytesPerBlock;
/* Search for the Q table index that matches the given Q. */
for ( i = 0; i < Q_TABLE_SIZE; i++ ) {
if ( TargetQ >= cpi->pb.QThreshTable[i] ) {
ThreshTableIndex = i;
break;
}
}
/* Adjust according to Q shift and type of frame */
if ( GetFrameType(&cpi->pb) == BASE_FRAME ) {
/* Get primary prediction */
BytesPerBlock = KfBpbTable[ThreshTableIndex];
} else {
/* Get primary prediction */
BytesPerBlock = BpbTable[ThreshTableIndex];
BytesPerBlock = BytesPerBlock * cpi->BpbCorrectionFactor;
}
return BytesPerBlock;
}
void UpRegulateMB( CP_INSTANCE *cpi, ogg_uint32_t RegulationQ,
ogg_uint32_t SB, ogg_uint32_t MB, BOOL NoCheck ) {
ogg_int32_t FragIndex;
ogg_uint32_t B;
/* Variables used in calculating corresponding row,col and index in
UV planes */
ogg_uint32_t UVRow;
ogg_uint32_t UVColumn;
ogg_uint32_t UVFragOffset;
/* There may be MB's lying out of frame which must be ignored. For
these MB's Top left block will have a negative Fragment Index. */
if ( QuadMapToMBTopLeft(cpi->pb.BlockMap, SB, MB ) >= 0 ) {
/* Up regulate the component blocks Y then UV. */
for ( B=0; B<4; B++ ){
FragIndex = QuadMapToIndex1( cpi->pb.BlockMap, SB, MB, B );
if ( ( !cpi->pb.display_fragments[FragIndex] ) &&
( (NoCheck) || (cpi->FragmentLastQ[FragIndex] > RegulationQ) ) ){
cpi->pb.display_fragments[FragIndex] = 1;
cpi->extra_fragments[FragIndex] = 1;
cpi->FragmentLastQ[FragIndex] = RegulationQ;
cpi->MotionScore++;
}
}
/* Check the two UV blocks */
FragIndex = QuadMapToMBTopLeft(cpi->pb.BlockMap, SB, MB );
UVRow = (FragIndex / (cpi->pb.HFragments * 2));
UVColumn = (FragIndex % cpi->pb.HFragments) / 2;
UVFragOffset = (UVRow * (cpi->pb.HFragments / 2)) + UVColumn;
FragIndex = cpi->pb.YPlaneFragments + UVFragOffset;
if ( ( !cpi->pb.display_fragments[FragIndex] ) &&
( (NoCheck) || (cpi->FragmentLastQ[FragIndex] > RegulationQ) ) ) {
cpi->pb.display_fragments[FragIndex] = 1;
cpi->extra_fragments[FragIndex] = 1;
cpi->FragmentLastQ[FragIndex] = RegulationQ;
cpi->MotionScore++;
}
FragIndex += cpi->pb.UVPlaneFragments;
if ( ( !cpi->pb.display_fragments[FragIndex] ) &&
( (NoCheck) || (cpi->FragmentLastQ[FragIndex] > RegulationQ) ) ) {
cpi->pb.display_fragments[FragIndex] = 1;
cpi->extra_fragments[FragIndex] = 1;
cpi->FragmentLastQ[FragIndex] = RegulationQ;
cpi->MotionScore++;
}
}
}
void UpRegulateBlocks (CP_INSTANCE *cpi, ogg_uint32_t RegulationQ,
ogg_int32_t RecoveryBlocks,
ogg_uint32_t * LastSB, ogg_uint32_t * LastMB ) {
ogg_uint32_t LoopTimesRound = 0;
ogg_uint32_t MaxSB = cpi->pb.YSBRows *
cpi->pb.YSBCols; /* Tot super blocks in image */
ogg_uint32_t SB, MB; /* Super-Block and macro block indices. */
/* First scan for blocks for which a residue update is outstanding. */
while ( (cpi->MotionScore < RecoveryBlocks) &&
(LoopTimesRound < MAX_UP_REG_LOOPS) ) {
LoopTimesRound++;
for ( SB = (*LastSB); SB < MaxSB; SB++ ) {
/* Check its four Macro-Blocks */
for ( MB=(*LastMB); MB<4; MB++ ) {
/* Mark relevant blocks for update */
UpRegulateMB( cpi, RegulationQ, SB, MB, FALSE );
/* Keep track of the last refresh MB. */
(*LastMB) += 1;
if ( (*LastMB) == 4 )
(*LastMB) = 0;
/* Termination clause */
if (cpi->MotionScore >= RecoveryBlocks) {
/* Make sure we don't stall at SB level */
if ( *LastMB == 0 )
SB++;
break;
}
}
/* Termination clause */
if (cpi->MotionScore >= RecoveryBlocks)
break;
}
/* Update super block start index */
if ( SB >= MaxSB){
(*LastSB) = 0;
}else{
(*LastSB) = SB;
}
}
}
void UpRegulateDataStream (CP_INSTANCE *cpi, ogg_uint32_t RegulationQ,
ogg_int32_t RecoveryBlocks ) {
ogg_uint32_t LastPassMBPos = 0;
ogg_uint32_t StdLastMBPos = 0;
ogg_uint32_t i = 0;
ogg_uint32_t LoopTimesRound = 0;
ogg_uint32_t MaxSB = cpi->pb.YSBRows *
cpi->pb.YSBCols; /* Tot super blocks in image */
ogg_uint32_t SB=0; /* Super-Block index */
ogg_uint32_t MB; /* Macro-Block index */
/* Decduct the number of blocks in an MB / 2 from the recover block count.
This will compensate for the fact that once we start checking an MB
we test every block in that macro block */
if ( RecoveryBlocks > 3 )
RecoveryBlocks -= 3;
/* Up regulate blocks last coded at higher Q */
UpRegulateBlocks( cpi, RegulationQ, RecoveryBlocks,
&cpi->LastEndSB, &StdLastMBPos );
/* If we have still not used up the minimum number of blocks and are
at the minimum Q then run through a final pass of the data to
insure that each block gets a final refresh. */
if ( (RegulationQ == VERY_BEST_Q) &&
(cpi->MotionScore < RecoveryBlocks) ) {
if ( cpi->FinalPassLastPos < MaxSB ) {
for ( SB = cpi->FinalPassLastPos; SB < MaxSB; SB++ ) {
/* Check its four Macro-Blocks */
for ( MB=LastPassMBPos; MB<4; MB++ ) {
/* Mark relevant blocks for update */
UpRegulateMB( cpi, RegulationQ, SB, MB, TRUE );
/* Keep track of the last refresh MB. */
LastPassMBPos += 1;
if ( LastPassMBPos == 4 ) {
LastPassMBPos = 0;
/* Increment SB index */
cpi->FinalPassLastPos += 1;
}
/* Termination clause */
if (cpi->MotionScore >= RecoveryBlocks)
break;
}
/* Termination clause */
if (cpi->MotionScore >= RecoveryBlocks)
break;
}
}
}
}
void RegulateQ( CP_INSTANCE *cpi, ogg_int32_t UpdateScore ) {
double TargetUnitScoreBytes = (double)cpi->ThisFrameTargetBytes /
(double)UpdateScore;
double PredUnitScoreBytes;
double LastBitError = 10000.0; /* Silly high number */
ogg_uint32_t QIndex = Q_TABLE_SIZE - 1;
ogg_uint32_t i;
/* Search for the best Q for the target bitrate. */
for ( i = 0; i < Q_TABLE_SIZE; i++ ) {
PredUnitScoreBytes = GetEstimatedBpb( cpi, cpi->pb.QThreshTable[i] );
if ( PredUnitScoreBytes > TargetUnitScoreBytes ) {
if ( (PredUnitScoreBytes - TargetUnitScoreBytes) <= LastBitError ) {
QIndex = i;
} else {
QIndex = i - 1;
}
break;
} else {
LastBitError = TargetUnitScoreBytes - PredUnitScoreBytes;
}
}
/* QIndex should now indicate the optimal Q. */
cpi->pb.ThisFrameQualityValue = cpi->pb.QThreshTable[QIndex];
/* Apply range restrictions for key frames. */
if ( GetFrameType(&cpi->pb) == BASE_FRAME ) {
if ( cpi->pb.ThisFrameQualityValue > cpi->pb.QThreshTable[20] )
cpi->pb.ThisFrameQualityValue = cpi->pb.QThreshTable[20];
else if ( cpi->pb.ThisFrameQualityValue < cpi->pb.QThreshTable[50] )
cpi->pb.ThisFrameQualityValue = cpi->pb.QThreshTable[50];
}
/* Limit the Q value to the maximum available value */
if (cpi->pb.ThisFrameQualityValue >
cpi->pb.QThreshTable[cpi->Configuration.ActiveMaxQ]) {
cpi->pb.ThisFrameQualityValue =
(ogg_uint32_t)cpi->pb.QThreshTable[cpi->Configuration.ActiveMaxQ];
}
if(cpi->FixedQ) {
if ( GetFrameType(&cpi->pb) == BASE_FRAME ) {
cpi->pb.ThisFrameQualityValue = cpi->pb.QThreshTable[43];
cpi->pb.ThisFrameQualityValue = cpi->FixedQ;
} else {
cpi->pb.ThisFrameQualityValue = cpi->FixedQ;
}
}
/* If th quantiser value has changed then re-initialise it */
if ( cpi->pb.ThisFrameQualityValue != cpi->pb.LastFrameQualityValue ) {
/* Initialise quality tables. */
UpdateQC( cpi, cpi->pb.ThisFrameQualityValue );
cpi->pb.LastFrameQualityValue = cpi->pb.ThisFrameQualityValue;
}
}
void ConfigureQuality( CP_INSTANCE *cpi, ogg_uint32_t QualityValue ) {
/* Default first frame quality */
/* Set the worst case quality value. */
/* Note that the actual quality is determined by lookup into the
quantiser table QThreshTable[] */
cpi->Configuration.MaxQ = 63 - QualityValue;
/* Set the default Active MaxQ. */
cpi->Configuration.ActiveMaxQ = cpi->Configuration.MaxQ;
}
void CopyBackExtraFrags(CP_INSTANCE *cpi){
ogg_uint32_t i,j;
unsigned char * SrcPtr;
unsigned char * DestPtr;
ogg_uint32_t PlaneLineStep;
ogg_uint32_t PixelIndex;
/* Copy back for Y plane. */
PlaneLineStep = cpi->pb.Configuration.VideoFrameWidth;
for ( i = 0; i < cpi->pb.YPlaneFragments; i++ ) {
/* We are only interested in updated fragments. */
if ( cpi->extra_fragments[i] ) {
/* Get the start index for the fragment. */
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table, i);
SrcPtr = &cpi->yuv1ptr[PixelIndex];
DestPtr = &cpi->ConvDestBuffer[PixelIndex];
for ( j = 0; j < cpi->pb.Configuration.VFragPixels; j++ ) {
memcpy( DestPtr, SrcPtr, cpi->pb.Configuration.HFragPixels );
SrcPtr += PlaneLineStep;
DestPtr += PlaneLineStep;
}
}
}
/* Now the U and V planes */
PlaneLineStep = cpi->pb.Configuration.VideoFrameWidth / 2;
for ( i = cpi->pb.YPlaneFragments;
i < (cpi->pb.YPlaneFragments + (2 * cpi->pb.UVPlaneFragments)) ;
i++ ) {
/* We are only interested in updated fragments. */
if ( cpi->extra_fragments[i] ) {
/* Get the start index for the fragment. */
PixelIndex = GetFragIndex(cpi->pb.pixel_index_table, i);
SrcPtr = &cpi->yuv1ptr[PixelIndex];
DestPtr = &cpi->ConvDestBuffer[PixelIndex];
for ( j = 0; j < cpi->pb.Configuration.VFragPixels; j++ ) {
memcpy( DestPtr, SrcPtr, cpi->pb.Configuration.HFragPixels );
SrcPtr += PlaneLineStep;
DestPtr += PlaneLineStep;
}
}
}
}
<p><p><p>1.1 theora/lib/toplevel.c
Index: toplevel.c
===================================================================
<p><p>#define CommentString "Xiph.Org libTheora I 20020916"
#define A_TABLE_SIZE 29
#define DF_CANDIDATE_WINDOW 5
tatic void SetupKeyFrame(CP_INSTANCE *cpi) {
/* Make sure the "last frame" buffer contains the first frame data
as well. */
memcpy ( cpi->yuv0ptr, cpi->yuv1ptr,
cpi->pb.ReconYPlaneSize + 2 * cpi->pb.ReconUVPlaneSize );
/* Initialise the cpi->pb.display_fragments and other fragment
structures for the first frame. */
memset( cpi->pb.display_fragments, 1, cpi->pb.UnitFragments );
memset( cpi->extra_fragments, 1, cpi->pb.UnitFragments );
// Set up for a BASE/KEY FRAME
SetFrameType( &cpi->pb,BASE_FRAME );
}
tatic void AdjustKeyFrameContext(CP_INSTANCE *cpi) {
ogg_uint32_t i;
ogg_uint32_t AvKeyFrameFrequency =
(ogg_uint32_t) (cpi->CurrentFrame / cpi->KeyFrameCount);
ogg_uint32_t AvKeyFrameBytes =
(ogg_uint32_t) (cpi->TotKeyFrameBytes / cpi->KeyFrameCount);
ogg_uint32_t TotalWeight=0;
ogg_int32_t AvKeyFramesPerSecond;
ogg_int32_t MinFrameTargetRate;
/* Update the frame carry over. */
cpi->TotKeyFrameBytes += oggpackB_bytes(&cpi->opb);
/* reset keyframe context and calculate weighted average of last
KEY_FRAME_CONTEXT keyframes */
for( i = 0 ; i < KEY_FRAME_CONTEXT ; i ++ ) {
if ( i < KEY_FRAME_CONTEXT -1) {
cpi->PriorKeyFrameSize[i] = cpi->PriorKeyFrameSize[i+1];
cpi->PriorKeyFrameDistance[i] = cpi->PriorKeyFrameDistance[i+1];
} else {
cpi->PriorKeyFrameSize[KEY_FRAME_CONTEXT - 1] =
oggpackB_bytes(&cpi->opb);
cpi->PriorKeyFrameDistance[KEY_FRAME_CONTEXT - 1] =
cpi->LastKeyFrame;
}
AvKeyFrameBytes += PriorKeyFrameWeight[i] *
cpi->PriorKeyFrameSize[i];
AvKeyFrameFrequency += PriorKeyFrameWeight[i] *
cpi->PriorKeyFrameDistance[i];
TotalWeight += PriorKeyFrameWeight[i];
}
AvKeyFrameBytes /= TotalWeight;
AvKeyFrameFrequency /= TotalWeight;
AvKeyFramesPerSecond = 100 * cpi->Configuration.OutputFrameRate /
AvKeyFrameFrequency ;
/* Calculate a new target rate per frame allowing for average key
frame frequency over newest frames . */
if ( 100 * cpi->Configuration.TargetBandwidth >
AvKeyFrameBytes * AvKeyFramesPerSecond &&
(100 * cpi->Configuration.OutputFrameRate - AvKeyFramesPerSecond )){
cpi->frame_target_rate =
(ogg_int32_t)(100* cpi->Configuration.TargetBandwidth -
AvKeyFrameBytes * AvKeyFramesPerSecond ) /
( (100 * cpi->Configuration.OutputFrameRate - AvKeyFramesPerSecond ) );
} else {
/* don't let this number get too small!!! */
cpi->frame_target_rate = 1;
}
/* minimum allowable frame_target_rate */
MinFrameTargetRate = (cpi->Configuration.TargetBandwidth /
cpi->Configuration.OutputFrameRate) / 3;
if(cpi->frame_target_rate < MinFrameTargetRate ) {
cpi->frame_target_rate = MinFrameTargetRate;
}
cpi->LastKeyFrame = 1;
cpi->LastKeyFrameSize=oggpackB_bytes(&cpi->opb);
}
tatic void CompressFirstFrame(CP_INSTANCE *cpi) {
ogg_uint32_t register i;
/* if not AutoKeyframing cpi->ForceKeyFrameEvery = is frequency */
if(!cpi->AutoKeyFrameEnabled)
cpi->ForceKeyFrameEvery = cpi->KeyFrameFrequency;
/* set up context of key frame sizes and distances for more local
datarate control */
for( i = 0 ; i < KEY_FRAME_CONTEXT ; i ++ ) {
cpi->PriorKeyFrameSize[i] = cpi->KeyFrameDataTarget;
cpi->PriorKeyFrameDistance[i] = cpi->ForceKeyFrameEvery;
}
/* Keep track of the total number of Key Frames Coded. */
cpi->KeyFrameCount = 1;
cpi->LastKeyFrame = 1;
cpi->TotKeyFrameBytes = 0;
/* A key frame is not a dropped frame there for reset the count of
consequative dropped frames. */
cpi->DropCount = 0;
SetupKeyFrame(cpi);
/* Calculate a new target rate per frame allowing for average key
frame frequency and size thus far. */
if ( cpi->Configuration.TargetBandwidth >
((cpi->KeyFrameDataTarget * cpi->Configuration.OutputFrameRate)/
cpi->KeyFrameFrequency) ) {
cpi->frame_target_rate =
(ogg_int32_t)((cpi->Configuration.TargetBandwidth -
((cpi->KeyFrameDataTarget *
cpi->Configuration.OutputFrameRate)/
cpi->KeyFrameFrequency)) /
cpi->Configuration.OutputFrameRate);
}else
cpi->frame_target_rate = 1;
/* Set baseline frame target rate. */
cpi->BaseLineFrameTargetRate = cpi->frame_target_rate;
/* A key frame is not a dropped frame there for reset the count of
consequative dropped frames. */
cpi->DropCount = 0;
/* Initialise drop frame trigger to 5 frames worth of data. */
cpi->DropFrameTriggerBytes = cpi->frame_target_rate * DF_CANDIDATE_WINDOW;
/* Set a target size for this key frame based upon the baseline
target and frequency */
cpi->ThisFrameTargetBytes = cpi->KeyFrameDataTarget;
/* Get a DCT quantizer level for the key frame. */
cpi->MotionScore = cpi->pb.UnitFragments;
RegulateQ(cpi, cpi->pb.UnitFragments);
cpi->pb.LastFrameQualityValue = cpi->pb.ThisFrameQualityValue;
/* Initialise quantizer. */
UpdateQC(cpi, cpi->pb.ThisFrameQualityValue );
/* Initialise the cpi->pb.display_fragments and other fragment
structures for the first frame. */
for ( i = 0; i < cpi->pb.UnitFragments; i ++ )
cpi->FragmentLastQ[i] = cpi->pb.ThisFrameQualityValue;
/* Compress and output the frist frame. */
PickIntra( cpi, cpi->pb.YSBRows, cpi->pb.YSBCols,
cpi->pb.HFragments%4, cpi->pb.VFragments%4,
cpi->pb.Configuration.VideoFrameWidth);
UpdateFrame(cpi);
/* Initialise the carry over rate targeting variables. */
cpi->CarryOver = 0;
}
tatic void CompressKeyFrame(CP_INSTANCE *cpi){
ogg_uint32_t i;
/* Average key frame frequency and size */
ogg_int32_t AvKeyFrameFrequency =
(ogg_int32_t) (cpi->CurrentFrame / cpi->KeyFrameCount);
ogg_int32_t AvKeyFrameBytes =
(ogg_int32_t) (cpi->TotKeyFrameBytes / cpi->KeyFrameCount);
/* Before we compress reset the carry over to the actual frame carry over */
cpi->CarryOver = cpi->Configuration.TargetBandwidth * cpi->CurrentFrame /
cpi->Configuration.OutputFrameRate - cpi->TotalByteCount;
/* Keep track of the total number of Key Frames Coded */
cpi->KeyFrameCount += 1;
/* A key frame is not a dropped frame there for reset the count of
consequative dropped frames. */
cpi->DropCount = 0;
SetupKeyFrame(cpi);
/* set a target size for this frame */
cpi->ThisFrameTargetBytes = (ogg_int32_t) cpi->frame_target_rate +
( (cpi->KeyFrameDataTarget - cpi->frame_target_rate) *
cpi->LastKeyFrame / cpi->ForceKeyFrameEvery );
if ( cpi->ThisFrameTargetBytes > cpi->KeyFrameDataTarget )
cpi->ThisFrameTargetBytes = cpi->KeyFrameDataTarget;
/* Get a DCT quantizer level for the key frame. */
cpi->MotionScore = cpi->pb.UnitFragments;
RegulateQ(cpi, cpi->pb.UnitFragments);
cpi->pb.LastFrameQualityValue = cpi->pb.ThisFrameQualityValue;
/* Initialise DCT tables. */
UpdateQC(cpi, cpi->pb.ThisFrameQualityValue );
/* Initialise the cpi->pb.display_fragments and other fragment
structures for the first frame. */
for ( i = 0; i < cpi->pb.UnitFragments; i ++ )
cpi->FragmentLastQ[i] = cpi->pb.ThisFrameQualityValue;
/* Compress and output the frist frame. */
PickIntra( cpi, cpi->pb.YSBRows, cpi->pb.YSBCols,
cpi->pb.HFragments%4, cpi->pb.VFragments%4,
cpi->pb.Configuration.VideoFrameWidth);
UpdateFrame(cpi);
}
tatic void CompressFrame( CP_INSTANCE *cpi, ogg_uint32_t FrameNumber ) {
ogg_int32_t min_blocks_per_frame;
ogg_uint32_t i;
int DropFrame = 0;
ogg_uint32_t ResidueBlocksAdded=0;
ogg_uint32_t KFIndicator = 0;
double QModStep;
double QModifier = 1.0;
/* Clear down the macro block level mode and MV arrays. */
for ( i = 0; i < cpi->pb.UnitFragments; i++ ) {
cpi->pb.FragCodingMethod[i] = CODE_INTER_NO_MV; /* Default coding mode */
cpi->pb.FragMVect[i].x = 0;
cpi->pb.FragMVect[i].y = 0;
}
/* Default to normal frames. */
SetFrameType( &cpi->pb, NORMAL_FRAME );
/* Clear down the difference arrays for the current frame. */
memset( cpi->pb.display_fragments, 0, cpi->pb.UnitFragments );
memset( cpi->extra_fragments, 0, cpi->pb.UnitFragments );
/* Calculate the target bytes for this frame. */
cpi->ThisFrameTargetBytes = cpi->frame_target_rate;
/* Correct target to try and compensate for any overall rate error
that is developing */
/* Set the max allowed Q for this frame based upon carry over
history. First set baseline worst Q for this frame */
cpi->Configuration.ActiveMaxQ = cpi->Configuration.MaxQ + 10;
if ( cpi->Configuration.ActiveMaxQ >= Q_TABLE_SIZE )
cpi->Configuration.ActiveMaxQ = Q_TABLE_SIZE - 1;
/* Make a further adjustment based upon the carry over and recent
history.. cpi->Configuration.ActiveMaxQ reduced by 1 for each 1/2
seconds worth of -ve carry over up to a limit of 6. Also
cpi->Configuration.ActiveMaxQ reduced if frame is a
"DropFrameCandidate". Remember that if we are behind the bit
target carry over is -ve. */
if ( cpi->CarryOver < 0 ) {
if ( cpi->DropFrameCandidate ) {
cpi->Configuration.ActiveMaxQ -= 4;
}
if ( cpi->CarryOver <
-((ogg_int32_t)cpi->Configuration.TargetBandwidth*3) )
cpi->Configuration.ActiveMaxQ -= 6;
else
cpi->Configuration.ActiveMaxQ +=
(ogg_int32_t) ((cpi->CarryOver*2) /
(ogg_int32_t)cpi->Configuration.TargetBandwidth);
/* Check that we have not dropped quality too far */
if ( cpi->Configuration.ActiveMaxQ < cpi->Configuration.MaxQ )
cpi->Configuration.ActiveMaxQ = cpi->Configuration.MaxQ;
}
/* Calculate the Q Modifier step size required to cause a step down
from full target bandwidth to 40% of target between max Q and
best Q */
QModStep = 0.5 / (double)((Q_TABLE_SIZE - 1) -
cpi->Configuration.ActiveMaxQ);
/* Set up the cpi->QTargetModifier[] table. */
for ( i = 0; i < cpi->Configuration.ActiveMaxQ; i++ ) {
cpi->QTargetModifier[i] = QModifier;
}
for ( i = cpi->Configuration.ActiveMaxQ; i < Q_TABLE_SIZE; i++ ) {
cpi->QTargetModifier[i] = QModifier;
QModifier -= QModStep;
}
/* if we are allowed to drop frames and are falling behind (eg more
than x frames worth of bandwidth) */
if ( cpi->DropFramesAllowed &&
( cpi->DropCount < cpi->MaxConsDroppedFrames) &&
( cpi->CarryOver <
-((ogg_int32_t)cpi->Configuration.TargetBandwidth)) &&
( cpi->DropFrameCandidate) ) {
/* (we didn't do this frame so we should have some left over for
the next frame) */
cpi->CarryOver += cpi->frame_target_rate;
DropFrame = TRUE;
cpi->DropCount ++;
/* Adjust DropFrameTriggerBytes to account for the saving achieved. */
cpi->DropFrameTriggerBytes =
(cpi->DropFrameTriggerBytes *
(DF_CANDIDATE_WINDOW-1))/DF_CANDIDATE_WINDOW;
/* Even if we drop a frame we should account for it when
considering key frame seperation. */
cpi->LastKeyFrame++;
} else if ( cpi->CarryOver <
-((ogg_int32_t)cpi->Configuration.TargetBandwidth * 2) ) {
/* Reduce frame bit target by 1.75% for each 1/10th of a seconds
worth of -ve carry over down to a minimum of 65% of its
un-modified value. */
cpi->ThisFrameTargetBytes =
(ogg_uint32_t)(cpi->ThisFrameTargetBytes * 0.65);
} else if ( cpi->CarryOver < 0 ) {
/* Note that cpi->CarryOver is a -ve here hence 1.0 "+" ... */
cpi->ThisFrameTargetBytes =
(ogg_uint32_t)(cpi->ThisFrameTargetBytes *
(1.0 + ( ((cpi->CarryOver * 10)/
((ogg_int32_t)cpi->
Configuration.TargetBandwidth)) * 0.0175) ));
}
if ( !DropFrame ) {
/* pick all the macroblock modes and motion vectors */
ogg_uint32_t InterError;
ogg_uint32_t IntraError;
/* Set Baseline filter level. */
SetScanParam( cpi->pp, SCP_CONFIGURE_PP, cpi->PreProcFilterLevel );
/* Score / analyses the fragments. */
cpi->MotionScore = YUVAnalyseFrame(cpi->pp, &KFIndicator );
/* Get the baseline Q value */
RegulateQ( cpi, cpi->MotionScore );
/* Recode blocks if the error score in last frame was high. */
ResidueBlocksAdded = 0;
for ( i = 0; i < cpi->pb.UnitFragments; i++ ){
if ( !cpi->pb.display_fragments[i] ){
if ( cpi->LastCodedErrorScore[i] >=
ResidueErrorThresh[cpi->pb.FrameQIndex] ) {
cpi->pb.display_fragments[i] = 1; /* Force block update */
cpi->extra_fragments[i] = 1; /* Insures up to date
pixel data is used. */
ResidueBlocksAdded ++;
}
}
}
/* Adjust the motion score to allow for residue blocks
added. These are assumed to have below average impact on
bitrate (Hence ResidueBlockFactor). */
cpi->MotionScore = cpi->MotionScore +
(ResidueBlocksAdded / ResidueBlockFactor[cpi->pb.FrameQIndex]);
/* Estimate the min number of blocks at best Q */
min_blocks_per_frame =
(ogg_int32_t)(cpi->ThisFrameTargetBytes /
GetEstimatedBpb( cpi, VERY_BEST_Q ));
if ( min_blocks_per_frame == 0 )
min_blocks_per_frame = 1;
/* If we have less than this number then consider adding in some
extra blocks */
if ( cpi->MotionScore < min_blocks_per_frame ) {
min_blocks_per_frame =
cpi->MotionScore +
(ogg_int32_t)(((min_blocks_per_frame - cpi->MotionScore) * 4) / 3 );
UpRegulateDataStream( cpi, VERY_BEST_Q, min_blocks_per_frame );
}else{
/* Reset control variable for best quality final pass. */
cpi->FinalPassLastPos = 0;
}
/* Get the modified Q prediction taking into account extra blocks added. */
RegulateQ( cpi, cpi->MotionScore );
/* Unless we are already well ahead (4 seconds of data) of the
projected bitrate */
if ( cpi->CarryOver <
(ogg_int32_t)(cpi->Configuration.TargetBandwidth * 4) ){
/* Look at the predicted Q (pbi->FrameQIndex). Adjust the
target bits for this frame based upon projected Q and
re-calculate. The idea is that if the Q is better than a
given (good enough) level then we will try and save some bits
for use in more difficult segments. */
cpi->ThisFrameTargetBytes =
(ogg_int32_t) (cpi->ThisFrameTargetBytes *
cpi->QTargetModifier[cpi->pb.FrameQIndex]);
/* Recalculate Q again */
RegulateQ( cpi, cpi->MotionScore );
}
<p> /* Select modes and motion vectors for each of the blocks : return
an error score for inter and intra */
PickModes( cpi, cpi->pb.YSBRows, cpi->pb.YSBCols, cpi->pb.HFragments%4,
cpi->pb.VFragments%4, cpi->pb.Configuration.VideoFrameWidth,
&InterError, &IntraError );
/* decide whether we really should have made this frame a key frame */
if( cpi->AutoKeyFrameEnabled ) {
if( ( ( 2* IntraError < 5 * InterError )
&& ( KFIndicator >= (ogg_uint32_t) cpi->AutoKeyFrameThreshold)
&& ( cpi->LastKeyFrame > cpi->MinimumDistanceToKeyFrame)
) ||
(cpi->LastKeyFrame >= (ogg_uint32_t)cpi->ForceKeyFrameEvery) ) {
CompressKeyFrame(cpi); // Code a key frame
return;
}
}
/* Increment the frames since last key frame count */
cpi->LastKeyFrame++;
if ( cpi->MotionScore > 0 ){
cpi->DropCount = 0;
/* Proceed with the frame update. */
UpdateFrame(cpi);
/* Note the Quantizer used for each block coded. */
for ( i = 0; i < cpi->pb.UnitFragments; i++ ){
if ( cpi->pb.display_fragments[i] ){
cpi->FragmentLastQ[i] = cpi->pb.ThisFrameQualityValue;
}
}
}
}else{
if (cpi->NoDrops == 1){
UpdateFrame(cpi);
}
}
}
void UpdateFrame(CP_INSTANCE *cpi){
ogg_int32_t AvKeyFrameFrequency =
(ogg_int32_t) (cpi->CurrentFrame / cpi->KeyFrameCount);
ogg_int32_t AvKeyFrameBytes =
(ogg_int32_t) (cpi->TotKeyFrameBytes / cpi->KeyFrameCount);
ogg_int32_t TotalWeight = 0;
double CorrectionFactor;
ogg_uint32_t fragment_count = 0;
ogg_uint32_t diff_tokens = 0;
ogg_uint32_t bits_per_token = 0;
/* Reset the DC predictors. */
cpi->pb.LastIntraDC = 0;
cpi->pb.InvLastIntraDC = 0;
cpi->pb.LastInterDC = 0;
cpi->pb.InvLastInterDC = 0;
/* Initialise bit packing mechanism. */
InitAddBitsToBuffer(cpi);
/* Write out the frame header information including size. */
WriteFrameHeader(cpi);
/* Copy back any extra frags that are to be updated by the codec
as part of the background cleanup task */
CopyBackExtraFrags(cpi);
/* Encode the data. */
EncodeData(cpi);
/* Adjust drop frame trigger. */
if ( GetFrameType(&cpi->pb) != BASE_FRAME ) {
/* Apply decay factor then add in the last frame size. */
cpi->DropFrameTriggerBytes =
((cpi->DropFrameTriggerBytes * (DF_CANDIDATE_WINDOW-1)) /
DF_CANDIDATE_WINDOW) + oggpackB_bytes(&cpi->opb);
}else{
/* Increase cpi->DropFrameTriggerBytes a little. Just after a key
frame may actually be a good time to drop a frame. */
cpi->DropFrameTriggerBytes =
(cpi->DropFrameTriggerBytes * DF_CANDIDATE_WINDOW) /
(DF_CANDIDATE_WINDOW-1);
}
/* Test for overshoot which may require a dropped frame next time
around. If we are already in a drop frame condition but the
previous frame was not dropped then the threshold for continuing
to allow dropped frames is reduced. */
if ( cpi->DropFrameCandidate ) {
if ( cpi->DropFrameTriggerBytes >
(cpi->frame_target_rate * (DF_CANDIDATE_WINDOW+1)) )
cpi->DropFrameCandidate = 1;
else
cpi->DropFrameCandidate = 0;
} else {
if ( cpi->DropFrameTriggerBytes >
(cpi->frame_target_rate * ((DF_CANDIDATE_WINDOW*2)-2)) )
cpi->DropFrameCandidate = 1;
else
cpi->DropFrameCandidate = 0;
}
/* Update the BpbCorrectionFactor variable according to whether or
not we were close enough with our selection of DCT quantiser. */
if ( GetFrameType(&cpi->pb) != BASE_FRAME ) {
/* Work out a size correction factor. */
CorrectionFactor = (double)oggpackB_bytes(&cpi->opb) /
(double)cpi->ThisFrameTargetBytes;
if ( (CorrectionFactor > 1.05) &&
(cpi->pb.ThisFrameQualityValue <
cpi->pb.QThreshTable[cpi->Configuration.ActiveMaxQ]) ) {
CorrectionFactor = 1.0 + ((CorrectionFactor - 1.0)/2);
if ( CorrectionFactor > 1.5 )
cpi->BpbCorrectionFactor *= 1.5;
else
cpi->BpbCorrectionFactor *= CorrectionFactor;
/* Keep BpbCorrectionFactor within limits */
if ( cpi->BpbCorrectionFactor > MAX_BPB_FACTOR )
cpi->BpbCorrectionFactor = MAX_BPB_FACTOR;
} else if ( (CorrectionFactor < 0.95) &&
(cpi->pb.ThisFrameQualityValue > VERY_BEST_Q) ){
CorrectionFactor = 1.0 - ((1.0 - CorrectionFactor)/2);
if ( CorrectionFactor < 0.75 )
cpi->BpbCorrectionFactor *= 0.75;
else
cpi->BpbCorrectionFactor *= CorrectionFactor;
/* Keep BpbCorrectionFactor within limits */
if ( cpi->BpbCorrectionFactor < MIN_BPB_FACTOR )
cpi->BpbCorrectionFactor = MIN_BPB_FACTOR;
}
}
/* Adjust carry over and or key frame context. */
if ( GetFrameType(&cpi->pb) == BASE_FRAME ) {
/* Adjust the key frame context unless the key frame was very small */
AdjustKeyFrameContext(cpi);
} else {
/* Update the frame carry over */
cpi->CarryOver += ((ogg_int32_t)cpi->frame_target_rate -
(ogg_int32_t)oggpackB_bytes(&cpi->opb));
}
cpi->TotalByteCount += oggpackB_bytes(&cpi->opb);
}
/********************** The toplevel ***********************/
const char *theora_encode_version(void){
return CommentString;
}
int theora_encode_init( CP_INSTANCE *cpi , COMP_CONFIG *CompConfig ) {
int i;
InitCPInstance(cpi);
for(i=0;i<=64;i++) {
if(i<=1)cpi->pb.idct[i]=IDct1;
else if(i<=10)cpi->pb.idct[i]=IDct10;
else cpi->pb.idct[i]=IDctSlow;
}
cpi->pb.Configuration.HFragPixels = 8;
cpi->pb.Configuration.VFragPixels = 8;
/* set the version number */
cpi->pb.Vp3VersionNo = CURRENT_ENCODE_VERSION;
/* Set the video frame size. */
cpi->pb.YPlaneSize = 0xFFF;
cpi->pb.Configuration.VideoFrameHeight =0xFFF;
cpi->pb.Configuration.VideoFrameHeight =
(CompConfig->FrameSize & 0x0000FFFF);
cpi->pb.Configuration.VideoFrameWidth =
((CompConfig->FrameSize & 0xFFFF0000) >> 16);
/* Note the height and width in the pre-processor control structure. */
cpi->ScanConfig.VideoFrameHeight = cpi->pb.Configuration.VideoFrameHeight;
cpi->ScanConfig.VideoFrameWidth = cpi->pb.Configuration.VideoFrameWidth;
/* Set data rate related variables. */
cpi->Configuration.TargetBandwidth = (CompConfig->TargetBitRate * 1000) / 8;
/* Set the target minimum key frame frequency */
cpi->KeyFrameFrequency = CompConfig->KeyFrameFrequency;
/* Set key frame data rate target */
cpi->KeyFrameDataTarget = (CompConfig->KeyFrameDataTarget * 1000) / 8;
/* Set the quality settings. */
ConfigureQuality( cpi, CompConfig->Quality );
/* Set the frame rate variables. */
cpi->Configuration.OutputFrameRate = CompConfig->FrameRate;
if ( cpi->Configuration.OutputFrameRate < 1 )
cpi->Configuration.OutputFrameRate = 1;
else if ( cpi->Configuration.OutputFrameRate > 30 )
cpi->Configuration.OutputFrameRate = 30;
cpi->frame_target_rate =
(cpi->Configuration.TargetBandwidth / cpi->Configuration.OutputFrameRate);
/* Initialise image format details */
if(!InitFrameDetails(&cpi->pb)){
return -1;
}
if(!EAllocateFragmentInfo(cpi)){
DeleteFragmentInfo(&cpi->pb);
DeleteFrameInfo(&cpi->pb);
return -1;
}
if(!EAllocateFrameInfo(cpi)){
DeleteFragmentInfo(&cpi->pb);
DeleteFrameInfo(&cpi->pb);
EDeleteFragmentInfo(cpi);
return -1;
}
/* Set up pre-processor config pointers. */
cpi->ScanConfig.Yuv0ptr = cpi->yuv0ptr;
cpi->ScanConfig.Yuv1ptr = cpi->yuv1ptr;
cpi->ScanConfig.SrfWorkSpcPtr = cpi->ConvDestBuffer;
cpi->ScanConfig.disp_fragments = cpi->pb.display_fragments;
cpi->ScanConfig.RegionIndex = cpi->pb.pixel_index_table;
cpi->ScanConfig.HFragPixels =
(unsigned char)cpi->pb.Configuration.HFragPixels;
cpi->ScanConfig.VFragPixels =
(unsigned char)cpi->pb.Configuration.VFragPixels;
/* Initialise the pre-processor module. */
if(!ScanYUVInit(cpi->pp, &(cpi->ScanConfig))) {
DeleteFragmentInfo(&cpi->pb);
DeleteFrameInfo(&cpi->pb);
EDeleteFragmentInfo(cpi);
EDeleteFrameInfo(cpi);
return -1;
}
/* Set encoder flags. */
cpi->DropFramesAllowed = CompConfig->AllowDF;
cpi->QuickCompress = CompConfig->QuickCompress;
cpi->AutoKeyFrameEnabled = CompConfig->AutoKeyFrameEnabled;
cpi->MinimumDistanceToKeyFrame = CompConfig->MinimumDistanceToKeyFrame;
cpi->ForceKeyFrameEvery = CompConfig->ForceKeyFrameEvery;
cpi->PreProcFilterLevel = CompConfig->NoiseSensitivity;
cpi->AutoKeyFrameThreshold = CompConfig->AutoKeyFrameThreshold;
cpi->Sharpness = CompConfig->Sharpness;
/* Initialise Motion compensation */
InitMotionCompensation(cpi);
/* Initialise the compression process. */
/* We always start at frame 1 */
cpi->CurrentFrame = 1;
/* Reset the rate targeting correction factor. */
cpi->BpbCorrectionFactor = 1.0;
cpi->TotalByteCount = 0;
cpi->TotalMotionScore = 0;
/* Up regulation variables. */
cpi->FinalPassLastPos = 0; /* Used to regulate a final unrestricted pass. */
cpi->LastEndSB = 0; /* Where we were in the loop last time. */
cpi->ResidueLastEndSB = 0; /* Where we were in the residue update
loop last time. */
/* Select the appropriate huffman set. */
SelectHuffmanSet(&cpi->pb);
/* This makes sure encoder version specific tables are initialised */
InitQTables(&cpi->pb);
/* Indicate that the next frame to be compressed is the first in the
current clip. */
cpi->ThisIsFirstFrame = TRUE;
return 0;
}
int theora_encode_framein( CP_INSTANCE *cpi,
YUV_INPUT_BUFFER_CONFIG *YuvInputData){
ogg_int32_t i;
unsigned char *LocalDataPtr;
unsigned char *InputDataPtr;
/* If frame size has changed, abort out for now */
if (YuvInputData->YHeight != (INT32)cpi->pb.Configuration.VideoFrameHeight ||
YuvInputData->YWidth != (INT32)cpi->pb.Configuration.VideoFrameWidth )
return(-1);
<p> /* Copy over input YUV to internal YUV buffers. */
/* First copy over the Y data */
LocalDataPtr = cpi->yuv1ptr;
InputDataPtr = (unsigned char *)YuvInputData->YBuffer;
for ( i = 0; i < YuvInputData->YHeight; i++ ){
memcpy( LocalDataPtr, InputDataPtr, YuvInputData->YWidth );
LocalDataPtr += YuvInputData->YWidth;
InputDataPtr += YuvInputData->YStride;
}
/* Now copy over the U data */
LocalDataPtr = &cpi->yuv1ptr[(YuvInputData->YHeight * YuvInputData->YWidth)];
InputDataPtr = (unsigned char *)YuvInputData->UBuffer;
for ( i = 0; i < YuvInputData->UVHeight; i++ ){
memcpy( LocalDataPtr, InputDataPtr, YuvInputData->UVWidth );
LocalDataPtr += YuvInputData->UVWidth;
InputDataPtr += YuvInputData->UVStride;
}
/* Now copy over the V data */
LocalDataPtr =
&cpi->yuv1ptr[((YuvInputData->YHeight*YuvInputData->YWidth)*5)/4];
InputDataPtr = (unsigned char *)YuvInputData->VBuffer;
for ( i = 0; i < YuvInputData->UVHeight; i++ ){
memcpy( LocalDataPtr, InputDataPtr, YuvInputData->UVWidth );
LocalDataPtr += YuvInputData->UVWidth;
InputDataPtr += YuvInputData->UVStride;
}
/* Special case for first frame */
if ( cpi->ThisIsFirstFrame ){
CompressFirstFrame(cpi);
cpi->ThisIsFirstFrame = FALSE;
cpi->ThisIsKeyFrame = FALSE;
} else if ( cpi->ThisIsKeyFrame ) {
CompressKeyFrame(cpi);
cpi->ThisIsKeyFrame = FALSE;
} else {
/* Compress the frame. */
CompressFrame( cpi, (unsigned int) cpi->CurrentFrame );
}
/* Update stats variables. */
cpi->LastFrameSize = oggpackB_bytes(&cpi->opb);
cpi->CurrentFrame++;
return 0;
}
<p> /* return whether or not we are a key frame */
iskey=GetFrameType(&cpi->pb);
if ( iskey == 0 )
*is_key = 1;
else
*is_key = 0;
return ret_val;
}
/****************************************************************************
*
* ROUTINE : StopEncoder
*
* INPUTS : None
*
* OUTPUTS : None.
*
* RETURNS : None .
*
* FUNCTION : Stops the encoder and grabber
*
* SPECIAL NOTES : None.
*
*
* ERRORS : None.
*
****************************************************************************/
BOOL CCONV StopEncoder(CP_INSTANCE **cpi)
{
if(*cpi)
{
DeleteFragmentInfo(&(*cpi)->pb);
DeleteFrameInfo(&(*cpi)->pb);
EDeleteFragmentInfo((*cpi));
EDeleteFrameInfo((*cpi));
/* Re-set Output buffer. */
(*cpi)->BufferedOutputBytes = 0;
DeleteCPInstance(cpi);
}
return TRUE;
}
<p><p><p><p>--- >8 ----
List archives: http://www.xiph.org/archives/
Ogg project homepage: http://www.xiph.org/ogg/
To unsubscribe from this list, send a message to 'cvs-request at xiph.org'
containing only the word 'unsubscribe' in the body. No subject is needed.
Unsubscribe messages sent to the list will be ignored/filtered.
More information about the commits
mailing list