[xiph-commits] r15071 - branches/theora-thusnelda/lib/enc
xiphmont at svn.xiph.org
xiphmont at svn.xiph.org
Thu Jun 26 04:28:07 PDT 2008
Author: xiphmont
Date: 2008-06-26 04:28:07 -0700 (Thu, 26 Jun 2008)
New Revision: 15071
Modified:
branches/theora-thusnelda/lib/enc/codec_internal.h
branches/theora-thusnelda/lib/enc/dct_encode.c
branches/theora-thusnelda/lib/enc/encode.c
branches/theora-thusnelda/lib/enc/encoder_huffman.h
branches/theora-thusnelda/lib/enc/frinit.c
Log:
Commit for distributed debugging. THIS VERSION IS BROKEN: DON'T USE IT
Modified: branches/theora-thusnelda/lib/enc/codec_internal.h
===================================================================
--- branches/theora-thusnelda/lib/enc/codec_internal.h 2008-06-25 03:10:22 UTC (rev 15070)
+++ branches/theora-thusnelda/lib/enc/codec_internal.h 2008-06-26 11:28:07 UTC (rev 15071)
@@ -205,7 +205,6 @@
/* SuperBlock, MacroBLock and Fragment Information */
unsigned char *frag_coded;
ogg_uint32_t *frag_buffer_index;
- unsigned char *frag_nonzero;
ogg_int16_t *frag_dc;
dct_t *frag_dct;
Modified: branches/theora-thusnelda/lib/enc/dct_encode.c
===================================================================
--- branches/theora-thusnelda/lib/enc/dct_encode.c 2008-06-25 03:10:22 UTC (rev 15070)
+++ branches/theora-thusnelda/lib/enc/dct_encode.c 2008-06-26 11:28:07 UTC (rev 15071)
@@ -30,6 +30,41 @@
64,64,64,64,64,64,64,64,
64,64,64,64,64,64,64,64};
+/* used for the DC encoding after AC encoding has finished; need to be
+ able to undo AC tokens when zero runs cause things to be moved into
+ the DC token stack from the AC coefficient 1 token stack */
+static void replace_AC1_token(CP_INSTANCE *cpi, int bump, int pos,
+ int token, int eb, int fi){
+ int oldchroma = (pos>=cpi->dct_token_ycount[1]);
+ int newchroma = (oldchroma || (bump && (pos+1 == cpi->dct_token_ycount[1])));
+ int oldtoken = cpi->dct_token[1][pos];
+ int i,offset = acoffset[1];
+
+ /* update huffman tree choice metrics */
+ for ( i = 0; i < AC_HUFF_CHOICES; i++)
+ cpi->ac_bits[oldchroma][i] -= cpi->HuffCodeLengthArray_VP3x[offset+i][oldtoken];
+ if(token < DCT_NOOP)
+ for ( i = 0; i < AC_HUFF_CHOICES; i++)
+ cpi->ac_bits[newchroma][i] -= cpi->HuffCodeLengthArray_VP3x[offset+i][token];
+
+ /* replace the token itself */
+ cpi->dct_token[1][pos] = token;
+ cpi->dct_token_eb[1][pos] = eb;
+#ifdef COLLECT_METRICS
+ cpi->dct_token_frag[1][pos] = fi;
+#endif
+
+ /* although we have the same token count after as before, the
+ preceeding token may have been in the Y plane where the new one
+ is in a chroma plane. This could happen, eg, when an EOB run
+ begins on the last fragment of the Y plane and continues into
+ chroma, but subsequent DC encoding moves the EOB start for that
+ last fragment into the DC stack. The new coeff 1 EOB token would
+ go into chroma. */
+ if(!oldchroma && newchroma) cpi->dct_token_ycount[1]--;
+
+}
+
static void add_token(CP_INSTANCE *cpi, int chroma, int coeff,
unsigned char token, ogg_uint16_t eb, int fi){
@@ -81,52 +116,45 @@
}
}
-static void emit_eob_run(CP_INSTANCE *cpi, int chroma, int pos, int run){
+static void tokenize_eob_run(int run, int *token, int *eb){
if ( run <= 3 ) {
if ( run == 1 ) {
- add_token(cpi, chroma, pos, DCT_EOB_TOKEN, 0, 0);
+ *token = DCT_EOB_TOKEN;
} else if ( run == 2 ) {
- add_token(cpi, chroma, pos, DCT_EOB_PAIR_TOKEN, 0, 0);
+ *token = DCT_EOB_PAIR_TOKEN;
} else {
- add_token(cpi, chroma, pos, DCT_EOB_TRIPLE_TOKEN, 0, 0);
+ *token = DCT_EOB_TRIPLE_TOKEN;
}
+ *eb=0;
} else {
if ( run < 8 ) {
- add_token(cpi, chroma, pos, DCT_REPEAT_RUN_TOKEN, run-4, 0);
+ *token = DCT_REPEAT_RUN_TOKEN;
+ *eb = run-4;
} else if ( run < 16 ) {
- add_token(cpi, chroma, pos, DCT_REPEAT_RUN2_TOKEN, run-8, 0);
+ *token = DCT_REPEAT_RUN2_TOKEN;
+ *eb = run-8;
} else if ( run < 32 ) {
- add_token(cpi, chroma, pos, DCT_REPEAT_RUN3_TOKEN, run-16, 0);
+ *token = DCT_REPEAT_RUN3_TOKEN;
+ *eb = run-16;
} else if ( run < 4096) {
- add_token(cpi, chroma, pos, DCT_REPEAT_RUN4_TOKEN, run, 0);
+ *token = DCT_REPEAT_RUN4_TOKEN;
+ *eb = run;
}
}
}
+static void emit_eob_run(CP_INSTANCE *cpi, int chroma, int pos, int run){
+ int token=0,eb=0;
+ tokenize_eob_run(run, &token, &eb);
+ add_token(cpi, chroma, pos, token, eb, 0);
+}
+
static void prepend_eob_run(CP_INSTANCE *cpi, int chroma, int pos, int run){
- if ( run <= 3 ) {
- if ( run == 1 ) {
- prepend_token(cpi, chroma, pos, DCT_EOB_TOKEN, 0, 0);
- } else if ( run == 2 ) {
- prepend_token(cpi, chroma, pos, DCT_EOB_PAIR_TOKEN, 0, 0);
- } else {
- prepend_token(cpi, chroma, pos, DCT_EOB_TRIPLE_TOKEN, 0, 0);
- }
-
- } else {
-
- if ( run < 8 ) {
- prepend_token(cpi, chroma, pos, DCT_REPEAT_RUN_TOKEN, run-4, 0);
- } else if ( run < 16 ) {
- prepend_token(cpi, chroma, pos, DCT_REPEAT_RUN2_TOKEN, run-8, 0);
- } else if ( run < 32 ) {
- prepend_token(cpi, chroma, pos, DCT_REPEAT_RUN3_TOKEN, run-16, 0);
- } else if ( run < 4096) {
- prepend_token(cpi, chroma, pos, DCT_REPEAT_RUN4_TOKEN, run, 0);
- }
- }
+ int token=0,eb=0;
+ tokenize_eob_run(run, &token, &eb);
+ prepend_token(cpi, chroma, pos, token, eb, 0);
}
static void TokenizeDctValue (CP_INSTANCE *cpi,
@@ -210,32 +238,32 @@
}
}
-static void tokenize_block(CP_INSTANCE *cpi, int fi, int chroma,
- int *eob_ypre, int *eob_pre,
- int *eob_yrun, int *eob_run){
-
+/* No final DC to encode yet (DC prediction hasn't been done) So
+ simply assume there will be a nonzero DC value and code. That's
+ not a true assumption but it can be fixed-up as DC is tokenized
+ later */
+static void tokenize_AC(CP_INSTANCE *cpi, int fi, int chroma,
+ int *eob_ypre, int *eob_pre,
+ int *eob_yrun, int *eob_run){
+
unsigned char *cp=cpi->frag_coded;
if ( cp[fi] ) {
- int coeff = 0;
+ int coeff = 1; /* skip DC for now */
dct_t *dct = &cpi->frag_dct[fi];
- cpi->frag_nonzero[fi] = 0;
while(coeff < BLOCK_SIZE){
ogg_int16_t val = dct->data[coeff];
int zero_run;
int i = coeff;
- cpi->frag_nonzero[fi] = coeff;
-
while( !val && (++i < BLOCK_SIZE) )
val = dct->data[i];
if ( i == BLOCK_SIZE ){
/* if there are no other tokens in this group yet, set up to be
- prepended later. Group 0 is the exception (can't be
- prepended) */
- if(cpi->dct_token_count[coeff] == 0 && coeff){
+ prepended later. */
+ if(cpi->dct_token_count[coeff] == 0){
/* prepending requires space to do so-- save some at front of token stack */
if(eob_pre[coeff]==0 || (eob_pre[coeff]&0x8ff)==0x8ff){ /* 0xfff is a safe overallocation,
saves a mod 4095 */
@@ -245,11 +273,11 @@
cpi->dct_token_frag[coeff]++;
#endif
}
-
+
/* finally, track pre-run */
eob_pre[coeff]++;
if(!chroma)eob_ypre[coeff]++;
-
+
#ifdef COLLECT_METRICS
cpi->dct_eob_fi_stack[coeff][cpi->dct_eob_fi_count[coeff]++]=fi;
#endif
@@ -262,7 +290,7 @@
eob_run[coeff]++;
if(!chroma)eob_yrun[coeff]++;
-
+
#ifdef COLLECT_METRICS
cpi->dct_eob_fi_stack[coeff][cpi->dct_eob_fi_count[coeff]++]=fi;
#endif
@@ -279,8 +307,12 @@
zero_run = i-coeff;
if (zero_run){
ogg_uint32_t absval = abs(val);
- if ( ((absval == 1) && (zero_run <= 17)) ||
- ((absval <= 3) && (zero_run <= 3)) ) {
+ int adj = (coeff>1); /* implement a minor restriction on
+ stack 1 so that we know during DC
+ fixups that extended a dctrun token
+ in stack 1 will never overflow */
+ if ( ((absval == 1) && (zero_run < 17+adj)) ||
+ ((absval <= 3) && (zero_run < 3+adj))){
TokenizeDctRunValue( cpi, chroma, coeff, zero_run, val, fi);
coeff = i+1;
}else{
@@ -299,6 +331,244 @@
}
}
+static int decode_eob_token(int token, int eb){
+ switch(token){
+ case DCT_EOB_TOKEN:
+ return 1;
+ case DCT_EOB_PAIR_TOKEN:
+ return 2;
+ case DCT_EOB_TRIPLE_TOKEN:
+ return 3;
+ case DCT_REPEAT_RUN_TOKEN:
+ return eb+4;
+ case DCT_REPEAT_RUN2_TOKEN:
+ return eb+8;
+ case DCT_REPEAT_RUN3_TOKEN:
+ return eb+16;
+ case DCT_REPEAT_RUN4_TOKEN:
+ return eb;
+ default:
+ return 0;
+ }
+}
+
+static int decode_zrl_token(int token, int eb){
+ switch(token){
+ case DCT_SHORT_ZRL_TOKEN:
+ case DCT_ZRL_TOKEN:
+ return eb+1;
+ default:
+ return 0;
+ }
+}
+
+static int decode_dct_token(int token, int eb){
+ switch(token){
+ case ONE_TOKEN:
+ return 1;
+ case MINUS_ONE_TOKEN:
+ return -1;
+ case TWO_TOKEN:
+ return 2;
+ case MINUS_TWO_TOKEN:
+ return -2;
+ case LOW_VAL_TOKENS:
+ case LOW_VAL_TOKENS+1:
+ case LOW_VAL_TOKENS+2:
+ case LOW_VAL_TOKENS+3:
+ return (eb ? -(DCT_VAL_CAT2_MIN+token-LOW_VAL_TOKENS) : DCT_VAL_CAT2_MIN+token-LOW_VAL_TOKENS);
+ case DCT_VAL_CATEGORY3:
+ return ((eb & 0x2) ? -(DCT_VAL_CAT3_MIN+(eb&0x1)) : DCT_VAL_CAT3_MIN+(eb&0x1));
+ case DCT_VAL_CATEGORY4:
+ return ((eb & 0x4) ? -(DCT_VAL_CAT4_MIN+(eb&0x3)) : DCT_VAL_CAT4_MIN+(eb&0x3));
+ case DCT_VAL_CATEGORY5:
+ return ((eb & 0x8) ? -(DCT_VAL_CAT5_MIN+(eb&0x7)) : DCT_VAL_CAT5_MIN+(eb&0x7));
+ case DCT_VAL_CATEGORY6:
+ return ((eb & 0x10) ? -(DCT_VAL_CAT6_MIN+(eb&0xf)) : DCT_VAL_CAT6_MIN+(eb&0xf));
+ case DCT_VAL_CATEGORY7:
+ return ((eb & 0x20) ? -(DCT_VAL_CAT7_MIN+(eb&0x1f)) : DCT_VAL_CAT7_MIN+(eb&0x1f));
+ case DCT_VAL_CATEGORY8:
+ return ((eb & 0x200) ? -(DCT_VAL_CAT8_MIN+(eb&0x1ff)) : DCT_VAL_CAT8_MIN+(eb&0x1ff));
+ default:
+ return 0;
+ }
+}
+
+static int decode_dctrun_token(int token, int eb, int *val){
+ switch(token){
+ case DCT_RUN_CATEGORY1:
+ case DCT_RUN_CATEGORY1+1:
+ case DCT_RUN_CATEGORY1+2:
+ case DCT_RUN_CATEGORY1+3:
+ case DCT_RUN_CATEGORY1+4:
+ *val = (eb ? -1 : 1);
+ return token - DCT_RUN_CATEGORY1 + 1;
+ case DCT_RUN_CATEGORY1B:
+ *val = ((eb&0x4) ? -1 : 1);
+ return (eb&0x3)+6;
+ case DCT_RUN_CATEGORY1C:
+ *val = ((eb&0x8) ? -1 : 1);
+ return (eb&0x7)+10;
+ case DCT_RUN_CATEGORY2:
+ *val = ( (eb&0x1) ? -((eb&0x1)+2) : (eb&0x1)+2 );
+ return 1;
+ case DCT_RUN_CATEGORY2B:
+ *val = ( (eb&0x4) ? -(((eb&0x2)>>1)+2) : ((eb&0x2)>>1)+2);
+ return (eb&0x1)+2;
+ default:
+ *val=0;
+ return 0;
+ }
+}
+
+#include<stdio.h>
+/* called after AC tokenization is complete, because DC coding has to
+ happen after DC predict, which has to happen after the
+ Hilbert-ordered TQT loop */
+static void tokenize_DC(CP_INSTANCE *cpi, int fi, int chroma,
+ int *eob_yrun, int *eob_run,
+ int *coeff1_idx, int *coeff1_run){
+
+ unsigned char *cp=cpi->frag_coded;
+ if ( cp[fi] ) {
+ dct_t *dct = &cpi->frag_dct[fi];
+ int val = dct->data[0];
+ int token1 = cpi->dct_token[1][*coeff1_idx];
+ int eb1 = cpi->dct_token_eb[1][*coeff1_idx];
+
+ /* track the coeff 1 token stack: do we need to start a new EOB run? */
+ if (*coeff1_run==0) *coeff1_run = decode_eob_token(token1, eb1);
+
+ if(val){
+ /* nonzero val, we're not going to need any fixup */
+ /* Emit DC EOB run if any in pending */
+ if(eob_run[0]){
+ emit_eob_run(cpi,(eob_yrun[0]==0),0,eob_run[0]);
+ eob_run[0]=0;
+ eob_yrun[0]=0;
+ }
+
+ /* Emit value token */
+ TokenizeDctValue(cpi, chroma, 0, val, fi);
+
+ }else{
+
+ /* zero DC value; that means the entry in coeff position 1
+ should have been a zero-run or eob token located in the DC coeff
+ position */
+
+ if(*coeff1_run > 0){
+
+ /* emit zero short run */
+ add_token(cpi, chroma, 0, DCT_SHORT_ZRL_TOKEN, 0, fi);
+
+#if 0
+ /* coeff1 stack is in the midst of an EOB run; subtract one from its EOB run */
+ int run = decode_eob_token(token1, eb1);
+
+ if(run==1){
+ /* the EOB 'run' was only one EOB; the token gets nopped */
+ replace_AC1_token(cpi, 0, *coeff1_idx, DCT_NOOP, 0, 0);
+ }else{
+ /* if the coeff 1 EOB run started at this fragment, we're
+ not just reducing the length of the EOB, we're also
+ 'bumping' the start to the next fragment. It's important
+ to know that if the token is bumped out of luma into
+ chroma */
+ int bump = (run == *coeff1_run);
+ int newtoken,neweb;
+ tokenize_eob_run(run-1, &newtoken, &neweb);
+ replace_AC1_token(cpi, bump, *coeff1_idx, newtoken, neweb, 0);
+ }
+
+ /* start/extend the stack zero eob run */
+ if(eob_run[0] == 4095){
+ emit_eob_run(cpi,(eob_yrun[0]==0),0,4095);
+ eob_run[0] = 0;
+ eob_yrun[0] = 0;
+ }
+
+ eob_run[0]++;
+ if(!chroma)eob_yrun[0]++;
+
+#ifdef COLLECT_METRICS
+ cpi->dct_eob_fi_stack[0][cpi->dct_eob_fi_count[0]++]=fi;
+#endif
+#endif
+
+ }else{
+
+ /* coeff 1 is one of: zerorun, dctrun or dctval */
+ /* A zero-run token is expanded and moved to token stack 0 (stack 1 entry noopped) */
+ /* A dctval may be transformed into a single dctrun that is moved to stack 0,
+ or if it does not fit in a dctrun, we leave the stack 1 entry alone and emit
+ a single length-1 zerorun token for stack 0 */
+ /* A dctrun is extended and moved to stack 0. During AC
+ coding, we restrict the run lengths on dctruns for stack 1
+ so we know there's no chance of overrunning the
+ representable range */
+
+ /* Emit DC EOB run if any in pending */
+ if(eob_run[0]){
+ emit_eob_run(cpi,(eob_yrun[0]==0),0,eob_run[0]);
+ eob_run[0]=0;
+ eob_yrun[0]=0;
+ }
+
+
+ if(token1 <= DCT_ZRL_TOKEN){
+
+ /* emit zero short run */
+ add_token(cpi, chroma, 0, DCT_SHORT_ZRL_TOKEN, 0, fi);
+
+#if 0
+ /* zero-run. Extend and move it */
+
+ int run = decode_zrl_token(token1,eb1);
+ replace_AC1_token(cpi, 0, *coeff1_idx, DCT_NOOP, 0, 0);
+
+ /* Emit zerorun token */
+ if ( run+1 <= 8 )
+ add_token(cpi, chroma, 0, DCT_SHORT_ZRL_TOKEN, run, fi);
+ else
+ add_token(cpi, chroma, 0, DCT_ZRL_TOKEN, run, fi);
+#endif
+ } else if(token1 <= DCT_VAL_CATEGORY8){
+
+ /* DCT value token; will it fit into a dctrun? */
+ int val = decode_dct_token(token1,eb1);
+
+ if(abs(val)<=3){
+ /* emit a dctrun in stack 0, replace the dct token in stack 1*/
+ TokenizeDctRunValue( cpi, chroma, 0, 1, val, fi);
+ replace_AC1_token(cpi, 0, *coeff1_idx, DCT_NOOP, 0, 0);
+ }else{
+ /* leave dct value token alone, emit a short zerorun */
+ add_token(cpi, chroma, 0, DCT_SHORT_ZRL_TOKEN, 0, fi);
+ }
+
+ } else {
+ /* emit zero short run */
+ add_token(cpi, chroma, 0, DCT_SHORT_ZRL_TOKEN, 0, fi);
+
+#if 0
+ /* dctrun token; extend the run by one and move it to stack 0 */
+ int val;
+ int run = decode_dctrun_token(token1,eb1,&val)+1;
+
+ TokenizeDctRunValue( cpi, chroma, 0, run, val, fi);
+ replace_AC1_token(cpi, 0, *coeff1_idx, DCT_NOOP, 0, 0);
+#endif
+ }
+ }
+ }
+
+ /* update stack 1 counters */
+ if (*coeff1_run > 0) (*coeff1_run)--;
+ if (*coeff1_run == 0) (*coeff1_idx)++;
+ }
+}
+
void DPCMTokenize (CP_INSTANCE *cpi){
int eob_run[64];
int eob_pre[64];
@@ -306,6 +576,7 @@
int eob_ypre[64];
int i,sbi;
+ int idx1=0,run1=0;
memset(eob_run, 0, sizeof(eob_run));
memset(eob_pre, 0, sizeof(eob_pre));
@@ -335,7 +606,7 @@
int bi;
for (bi=0; bi<16; bi++ ) {
int fi = sb->f[bi];
- tokenize_block(cpi, fi, 0, eob_ypre, eob_pre, eob_yrun, eob_run);
+ tokenize_AC(cpi, fi, 0, eob_ypre, eob_pre, eob_yrun, eob_run);
}
}
@@ -344,10 +615,29 @@
int bi;
for (bi=0; bi<16; bi++ ) {
int fi = sb->f[bi];
- tokenize_block(cpi, fi, 1, eob_ypre, eob_pre, eob_yrun, eob_run);
+ tokenize_AC(cpi, fi, 1, eob_ypre, eob_pre, eob_yrun, eob_run);
}
}
+ /* for testing; post-facto tokenization of DC with coeff 1 fixups */
+ for (sbi=0; sbi < cpi->super_n[0]; sbi++ ){
+ superblock_t *sb = &cpi->super[0][sbi];
+ int bi;
+ for (bi=0; bi<16; bi++ ) {
+ int fi = sb->f[bi];
+ tokenize_DC(cpi, fi, 0, eob_yrun, eob_run, &idx1, &run1);
+ }
+ }
+
+ for (; sbi < cpi->super_total; sbi++ ){
+ superblock_t *sb = &cpi->super[0][sbi];
+ int bi;
+ for (bi=0; bi<16; bi++ ) {
+ int fi = sb->f[bi];
+ tokenize_DC(cpi, fi, 1, eob_yrun, eob_run, &idx1, &run1);
+ }
+ }
+
/* tie together eob runs at the beginnings/ends of coeff groups */
{
int coeff = 0;
@@ -368,7 +658,7 @@
run = 0;
coeff = i;
chroma = (eob_ypre[i]<=0);
- if(eob_ypre[i]<0)eob_ypre[i]=0;
+ /* if(eob_ypre[i]<0)eob_ypre[i]=0; redundant */
}
if(run){
Modified: branches/theora-thusnelda/lib/enc/encode.c
===================================================================
--- branches/theora-thusnelda/lib/enc/encode.c 2008-06-25 03:10:22 UTC (rev 15070)
+++ branches/theora-thusnelda/lib/enc/encode.c 2008-06-26 11:28:07 UTC (rev 15071)
@@ -192,17 +192,21 @@
ogg_uint16_t *eb = cpi->dct_token_eb[group];
for(i=0; i<cpi->dct_token_ycount[group]; i++){
- oggpackB_write( opb, cpi->HuffCodeArray_VP3x[huffY][token[i]],
- cpi->HuffCodeLengthArray_VP3x[huffY][token[i]] );
- if (cpi->ExtraBitLengths_VP3x[token[i]] > 0)
- oggpackB_write( opb, eb[i], cpi->ExtraBitLengths_VP3x[token[i]] );
+ if(token[i] < DCT_NOOP){
+ oggpackB_write( opb, cpi->HuffCodeArray_VP3x[huffY][token[i]],
+ cpi->HuffCodeLengthArray_VP3x[huffY][token[i]] );
+ if (cpi->ExtraBitLengths_VP3x[token[i]] > 0)
+ oggpackB_write( opb, eb[i], cpi->ExtraBitLengths_VP3x[token[i]] );
+ }
}
for(; i<cpi->dct_token_count[group]; i++){
- oggpackB_write( opb, cpi->HuffCodeArray_VP3x[huffC][token[i]],
- cpi->HuffCodeLengthArray_VP3x[huffC][token[i]] );
- if (cpi->ExtraBitLengths_VP3x[token[i]] > 0)
- oggpackB_write( opb, eb[i], cpi->ExtraBitLengths_VP3x[token[i]] );
+ if(token[i] < DCT_NOOP){
+ oggpackB_write( opb, cpi->HuffCodeArray_VP3x[huffC][token[i]],
+ cpi->HuffCodeLengthArray_VP3x[huffC][token[i]] );
+ if (cpi->ExtraBitLengths_VP3x[token[i]] > 0)
+ oggpackB_write( opb, eb[i], cpi->ExtraBitLengths_VP3x[token[i]] );
+ }
}
}
Modified: branches/theora-thusnelda/lib/enc/encoder_huffman.h
===================================================================
--- branches/theora-thusnelda/lib/enc/encoder_huffman.h 2008-06-25 03:10:22 UTC (rev 15070)
+++ branches/theora-thusnelda/lib/enc/encoder_huffman.h 2008-06-26 11:28:07 UTC (rev 15071)
@@ -73,3 +73,4 @@
/* 32 */
#define MAX_ENTROPY_TOKENS (DCT_RUN_CATEGORY2B + 1) /* 32 */
+#define DCT_NOOP MAX_ENTROPY_TOKENS
Modified: branches/theora-thusnelda/lib/enc/frinit.c
===================================================================
--- branches/theora-thusnelda/lib/enc/frinit.c 2008-06-25 03:10:22 UTC (rev 15070)
+++ branches/theora-thusnelda/lib/enc/frinit.c 2008-06-26 11:28:07 UTC (rev 15071)
@@ -30,7 +30,6 @@
if(cpi->dct_token_eb_storage) _ogg_free(cpi->dct_token_eb_storage);
if(cpi->frag_coded) _ogg_free(cpi->frag_coded);
if(cpi->frag_buffer_index) _ogg_free(cpi->frag_buffer_index);
- if(cpi->frag_nonzero) _ogg_free(cpi->frag_nonzero);
if(cpi->frag_dct) _ogg_free(cpi->frag_dct);
if(cpi->frag_dc) _ogg_free(cpi->frag_dc);
#ifdef COLLECT_METRICS
@@ -121,7 +120,6 @@
/* +1; the last entry is the 'invalid' frag, which is always set to not coded as it doesn't really exist */
cpi->frag_coded = calloc(cpi->frag_total+1, sizeof(*cpi->frag_coded));
cpi->frag_buffer_index = calloc(cpi->frag_total, sizeof(*cpi->frag_buffer_index));
- cpi->frag_nonzero = calloc(cpi->frag_total, sizeof(*cpi->frag_nonzero));
cpi->frag_dct = calloc(cpi->frag_total, sizeof(*cpi->frag_dct));
cpi->frag_dc = calloc(cpi->frag_total, sizeof(*cpi->frag_dc));
More information about the commits
mailing list