Libav 0.7.1
|
00001 /* 00002 * ADPCM codecs 00003 * Copyright (c) 2001-2003 The ffmpeg Project 00004 * 00005 * This file is part of Libav. 00006 * 00007 * Libav is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * Libav is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with Libav; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 #include "avcodec.h" 00022 #include "get_bits.h" 00023 #include "put_bits.h" 00024 #include "bytestream.h" 00025 00057 #define BLKSIZE 1024 00058 00059 /* step_table[] and index_table[] are from the ADPCM reference source */ 00060 /* This is the index table: */ 00061 static const int index_table[16] = { 00062 -1, -1, -1, -1, 2, 4, 6, 8, 00063 -1, -1, -1, -1, 2, 4, 6, 8, 00064 }; 00065 00070 static const int step_table[89] = { 00071 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 00072 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, 00073 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, 00074 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, 00075 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, 00076 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, 00077 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, 00078 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, 00079 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 00080 }; 00081 00082 /* These are for MS-ADPCM */ 00083 /* AdaptationTable[], AdaptCoeff1[], and AdaptCoeff2[] are from libsndfile */ 00084 static const int AdaptationTable[] = { 00085 230, 230, 230, 230, 307, 409, 512, 614, 00086 768, 614, 512, 409, 307, 230, 230, 230 00087 }; 00088 00090 static const uint8_t AdaptCoeff1[] = { 00091 64, 128, 0, 48, 60, 115, 98 00092 }; 00093 00095 static const int8_t AdaptCoeff2[] = { 00096 0, -64, 0, 16, 0, -52, -58 00097 }; 00098 00099 /* These are for CD-ROM XA ADPCM */ 00100 static const int xa_adpcm_table[5][2] = { 00101 { 0, 0 }, 00102 { 60, 0 }, 00103 { 115, -52 }, 00104 { 98, -55 }, 00105 { 122, -60 } 00106 }; 00107 00108 static const int ea_adpcm_table[] = { 00109 0, 240, 460, 392, 0, 0, -208, -220, 0, 1, 00110 3, 4, 7, 8, 10, 11, 0, -1, -3, -4 00111 }; 00112 00113 // padded to zero where table size is less then 16 00114 static const int swf_index_tables[4][16] = { 00115 /*2*/ { -1, 2 }, 00116 /*3*/ { -1, -1, 2, 4 }, 00117 /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 }, 00118 /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 } 00119 }; 00120 00121 static const int yamaha_indexscale[] = { 00122 230, 230, 230, 230, 307, 409, 512, 614, 00123 230, 230, 230, 230, 307, 409, 512, 614 00124 }; 00125 00126 static const int yamaha_difflookup[] = { 00127 1, 3, 5, 7, 9, 11, 13, 15, 00128 -1, -3, -5, -7, -9, -11, -13, -15 00129 }; 00130 00131 /* end of tables */ 00132 00133 typedef struct ADPCMChannelStatus { 00134 int predictor; 00135 short int step_index; 00136 int step; 00137 /* for encoding */ 00138 int prev_sample; 00139 00140 /* MS version */ 00141 short sample1; 00142 short sample2; 00143 int coeff1; 00144 int coeff2; 00145 int idelta; 00146 } ADPCMChannelStatus; 00147 00148 typedef struct TrellisPath { 00149 int nibble; 00150 int prev; 00151 } TrellisPath; 00152 00153 typedef struct TrellisNode { 00154 uint32_t ssd; 00155 int path; 00156 int sample1; 00157 int sample2; 00158 int step; 00159 } TrellisNode; 00160 00161 typedef struct ADPCMContext { 00162 ADPCMChannelStatus status[6]; 00163 TrellisPath *paths; 00164 TrellisNode *node_buf; 00165 TrellisNode **nodep_buf; 00166 uint8_t *trellis_hash; 00167 } ADPCMContext; 00168 00169 #define FREEZE_INTERVAL 128 00170 00171 /* XXX: implement encoding */ 00172 00173 #if CONFIG_ENCODERS 00174 static av_cold int adpcm_encode_init(AVCodecContext *avctx) 00175 { 00176 ADPCMContext *s = avctx->priv_data; 00177 uint8_t *extradata; 00178 int i; 00179 if (avctx->channels > 2) 00180 return -1; /* only stereo or mono =) */ 00181 00182 if(avctx->trellis && (unsigned)avctx->trellis > 16U){ 00183 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n"); 00184 return -1; 00185 } 00186 00187 if (avctx->trellis) { 00188 int frontier = 1 << avctx->trellis; 00189 int max_paths = frontier * FREEZE_INTERVAL; 00190 FF_ALLOC_OR_GOTO(avctx, s->paths, max_paths * sizeof(*s->paths), error); 00191 FF_ALLOC_OR_GOTO(avctx, s->node_buf, 2 * frontier * sizeof(*s->node_buf), error); 00192 FF_ALLOC_OR_GOTO(avctx, s->nodep_buf, 2 * frontier * sizeof(*s->nodep_buf), error); 00193 FF_ALLOC_OR_GOTO(avctx, s->trellis_hash, 65536 * sizeof(*s->trellis_hash), error); 00194 } 00195 00196 switch(avctx->codec->id) { 00197 case CODEC_ID_ADPCM_IMA_WAV: 00198 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 / (4 * avctx->channels) + 1; /* each 16 bits sample gives one nibble */ 00199 /* and we have 4 bytes per channel overhead */ 00200 avctx->block_align = BLKSIZE; 00201 /* seems frame_size isn't taken into account... have to buffer the samples :-( */ 00202 break; 00203 case CODEC_ID_ADPCM_IMA_QT: 00204 avctx->frame_size = 64; 00205 avctx->block_align = 34 * avctx->channels; 00206 break; 00207 case CODEC_ID_ADPCM_MS: 00208 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2; /* each 16 bits sample gives one nibble */ 00209 /* and we have 7 bytes per channel overhead */ 00210 avctx->block_align = BLKSIZE; 00211 avctx->extradata_size = 32; 00212 extradata = avctx->extradata = av_malloc(avctx->extradata_size); 00213 if (!extradata) 00214 return AVERROR(ENOMEM); 00215 bytestream_put_le16(&extradata, avctx->frame_size); 00216 bytestream_put_le16(&extradata, 7); /* wNumCoef */ 00217 for (i = 0; i < 7; i++) { 00218 bytestream_put_le16(&extradata, AdaptCoeff1[i] * 4); 00219 bytestream_put_le16(&extradata, AdaptCoeff2[i] * 4); 00220 } 00221 break; 00222 case CODEC_ID_ADPCM_YAMAHA: 00223 avctx->frame_size = BLKSIZE * avctx->channels; 00224 avctx->block_align = BLKSIZE; 00225 break; 00226 case CODEC_ID_ADPCM_SWF: 00227 if (avctx->sample_rate != 11025 && 00228 avctx->sample_rate != 22050 && 00229 avctx->sample_rate != 44100) { 00230 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, 22050 or 44100\n"); 00231 goto error; 00232 } 00233 avctx->frame_size = 512 * (avctx->sample_rate / 11025); 00234 break; 00235 default: 00236 goto error; 00237 } 00238 00239 avctx->coded_frame= avcodec_alloc_frame(); 00240 avctx->coded_frame->key_frame= 1; 00241 00242 return 0; 00243 error: 00244 av_freep(&s->paths); 00245 av_freep(&s->node_buf); 00246 av_freep(&s->nodep_buf); 00247 av_freep(&s->trellis_hash); 00248 return -1; 00249 } 00250 00251 static av_cold int adpcm_encode_close(AVCodecContext *avctx) 00252 { 00253 ADPCMContext *s = avctx->priv_data; 00254 av_freep(&avctx->coded_frame); 00255 av_freep(&s->paths); 00256 av_freep(&s->node_buf); 00257 av_freep(&s->nodep_buf); 00258 av_freep(&s->trellis_hash); 00259 00260 return 0; 00261 } 00262 00263 00264 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c, short sample) 00265 { 00266 int delta = sample - c->prev_sample; 00267 int nibble = FFMIN(7, abs(delta)*4/step_table[c->step_index]) + (delta<0)*8; 00268 c->prev_sample += ((step_table[c->step_index] * yamaha_difflookup[nibble]) / 8); 00269 c->prev_sample = av_clip_int16(c->prev_sample); 00270 c->step_index = av_clip(c->step_index + index_table[nibble], 0, 88); 00271 return nibble; 00272 } 00273 00274 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c, short sample) 00275 { 00276 int predictor, nibble, bias; 00277 00278 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00279 00280 nibble= sample - predictor; 00281 if(nibble>=0) bias= c->idelta/2; 00282 else bias=-c->idelta/2; 00283 00284 nibble= (nibble + bias) / c->idelta; 00285 nibble= av_clip(nibble, -8, 7)&0x0F; 00286 00287 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00288 00289 c->sample2 = c->sample1; 00290 c->sample1 = av_clip_int16(predictor); 00291 00292 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00293 if (c->idelta < 16) c->idelta = 16; 00294 00295 return nibble; 00296 } 00297 00298 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, short sample) 00299 { 00300 int nibble, delta; 00301 00302 if(!c->step) { 00303 c->predictor = 0; 00304 c->step = 127; 00305 } 00306 00307 delta = sample - c->predictor; 00308 00309 nibble = FFMIN(7, abs(delta)*4/c->step) + (delta<0)*8; 00310 00311 c->predictor += ((c->step * yamaha_difflookup[nibble]) / 8); 00312 c->predictor = av_clip_int16(c->predictor); 00313 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00314 c->step = av_clip(c->step, 127, 24567); 00315 00316 return nibble; 00317 } 00318 00319 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples, 00320 uint8_t *dst, ADPCMChannelStatus *c, int n) 00321 { 00322 //FIXME 6% faster if frontier is a compile-time constant 00323 ADPCMContext *s = avctx->priv_data; 00324 const int frontier = 1 << avctx->trellis; 00325 const int stride = avctx->channels; 00326 const int version = avctx->codec->id; 00327 TrellisPath *paths = s->paths, *p; 00328 TrellisNode *node_buf = s->node_buf; 00329 TrellisNode **nodep_buf = s->nodep_buf; 00330 TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd 00331 TrellisNode **nodes_next = nodep_buf + frontier; 00332 int pathn = 0, froze = -1, i, j, k, generation = 0; 00333 uint8_t *hash = s->trellis_hash; 00334 memset(hash, 0xff, 65536 * sizeof(*hash)); 00335 00336 memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf)); 00337 nodes[0] = node_buf + frontier; 00338 nodes[0]->ssd = 0; 00339 nodes[0]->path = 0; 00340 nodes[0]->step = c->step_index; 00341 nodes[0]->sample1 = c->sample1; 00342 nodes[0]->sample2 = c->sample2; 00343 if((version == CODEC_ID_ADPCM_IMA_WAV) || (version == CODEC_ID_ADPCM_IMA_QT) || (version == CODEC_ID_ADPCM_SWF)) 00344 nodes[0]->sample1 = c->prev_sample; 00345 if(version == CODEC_ID_ADPCM_MS) 00346 nodes[0]->step = c->idelta; 00347 if(version == CODEC_ID_ADPCM_YAMAHA) { 00348 if(c->step == 0) { 00349 nodes[0]->step = 127; 00350 nodes[0]->sample1 = 0; 00351 } else { 00352 nodes[0]->step = c->step; 00353 nodes[0]->sample1 = c->predictor; 00354 } 00355 } 00356 00357 for(i=0; i<n; i++) { 00358 TrellisNode *t = node_buf + frontier*(i&1); 00359 TrellisNode **u; 00360 int sample = samples[i*stride]; 00361 int heap_pos = 0; 00362 memset(nodes_next, 0, frontier*sizeof(TrellisNode*)); 00363 for(j=0; j<frontier && nodes[j]; j++) { 00364 // higher j have higher ssd already, so they're likely to yield a suboptimal next sample too 00365 const int range = (j < frontier/2) ? 1 : 0; 00366 const int step = nodes[j]->step; 00367 int nidx; 00368 if(version == CODEC_ID_ADPCM_MS) { 00369 const int predictor = ((nodes[j]->sample1 * c->coeff1) + (nodes[j]->sample2 * c->coeff2)) / 64; 00370 const int div = (sample - predictor) / step; 00371 const int nmin = av_clip(div-range, -8, 6); 00372 const int nmax = av_clip(div+range, -7, 7); 00373 for(nidx=nmin; nidx<=nmax; nidx++) { 00374 const int nibble = nidx & 0xf; 00375 int dec_sample = predictor + nidx * step; 00376 #define STORE_NODE(NAME, STEP_INDEX)\ 00377 int d;\ 00378 uint32_t ssd;\ 00379 int pos;\ 00380 TrellisNode *u;\ 00381 uint8_t *h;\ 00382 dec_sample = av_clip_int16(dec_sample);\ 00383 d = sample - dec_sample;\ 00384 ssd = nodes[j]->ssd + d*d;\ 00385 /* Check for wraparound, skip such samples completely. \ 00386 * Note, changing ssd to a 64 bit variable would be \ 00387 * simpler, avoiding this check, but it's slower on \ 00388 * x86 32 bit at the moment. */\ 00389 if (ssd < nodes[j]->ssd)\ 00390 goto next_##NAME;\ 00391 /* Collapse any two states with the same previous sample value. \ 00392 * One could also distinguish states by step and by 2nd to last 00393 * sample, but the effects of that are negligible. 00394 * Since nodes in the previous generation are iterated 00395 * through a heap, they're roughly ordered from better to 00396 * worse, but not strictly ordered. Therefore, an earlier 00397 * node with the same sample value is better in most cases 00398 * (and thus the current is skipped), but not strictly 00399 * in all cases. Only skipping samples where ssd >= 00400 * ssd of the earlier node with the same sample gives 00401 * slightly worse quality, though, for some reason. */ \ 00402 h = &hash[(uint16_t) dec_sample];\ 00403 if (*h == generation)\ 00404 goto next_##NAME;\ 00405 if (heap_pos < frontier) {\ 00406 pos = heap_pos++;\ 00407 } else {\ 00408 /* Try to replace one of the leaf nodes with the new \ 00409 * one, but try a different slot each time. */\ 00410 pos = (frontier >> 1) + (heap_pos & ((frontier >> 1) - 1));\ 00411 if (ssd > nodes_next[pos]->ssd)\ 00412 goto next_##NAME;\ 00413 heap_pos++;\ 00414 }\ 00415 *h = generation;\ 00416 u = nodes_next[pos];\ 00417 if(!u) {\ 00418 assert(pathn < FREEZE_INTERVAL<<avctx->trellis);\ 00419 u = t++;\ 00420 nodes_next[pos] = u;\ 00421 u->path = pathn++;\ 00422 }\ 00423 u->ssd = ssd;\ 00424 u->step = STEP_INDEX;\ 00425 u->sample2 = nodes[j]->sample1;\ 00426 u->sample1 = dec_sample;\ 00427 paths[u->path].nibble = nibble;\ 00428 paths[u->path].prev = nodes[j]->path;\ 00429 /* Sift the newly inserted node up in the heap to \ 00430 * restore the heap property. */\ 00431 while (pos > 0) {\ 00432 int parent = (pos - 1) >> 1;\ 00433 if (nodes_next[parent]->ssd <= ssd)\ 00434 break;\ 00435 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\ 00436 pos = parent;\ 00437 }\ 00438 next_##NAME:; 00439 STORE_NODE(ms, FFMAX(16, (AdaptationTable[nibble] * step) >> 8)); 00440 } 00441 } else if((version == CODEC_ID_ADPCM_IMA_WAV)|| (version == CODEC_ID_ADPCM_IMA_QT)|| (version == CODEC_ID_ADPCM_SWF)) { 00442 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\ 00443 const int predictor = nodes[j]->sample1;\ 00444 const int div = (sample - predictor) * 4 / STEP_TABLE;\ 00445 int nmin = av_clip(div-range, -7, 6);\ 00446 int nmax = av_clip(div+range, -6, 7);\ 00447 if(nmin<=0) nmin--; /* distinguish -0 from +0 */\ 00448 if(nmax<0) nmax--;\ 00449 for(nidx=nmin; nidx<=nmax; nidx++) {\ 00450 const int nibble = nidx<0 ? 7-nidx : nidx;\ 00451 int dec_sample = predictor + (STEP_TABLE * yamaha_difflookup[nibble]) / 8;\ 00452 STORE_NODE(NAME, STEP_INDEX);\ 00453 } 00454 LOOP_NODES(ima, step_table[step], av_clip(step + index_table[nibble], 0, 88)); 00455 } else { //CODEC_ID_ADPCM_YAMAHA 00456 LOOP_NODES(yamaha, step, av_clip((step * yamaha_indexscale[nibble]) >> 8, 127, 24567)); 00457 #undef LOOP_NODES 00458 #undef STORE_NODE 00459 } 00460 } 00461 00462 u = nodes; 00463 nodes = nodes_next; 00464 nodes_next = u; 00465 00466 generation++; 00467 if (generation == 255) { 00468 memset(hash, 0xff, 65536 * sizeof(*hash)); 00469 generation = 0; 00470 } 00471 00472 // prevent overflow 00473 if(nodes[0]->ssd > (1<<28)) { 00474 for(j=1; j<frontier && nodes[j]; j++) 00475 nodes[j]->ssd -= nodes[0]->ssd; 00476 nodes[0]->ssd = 0; 00477 } 00478 00479 // merge old paths to save memory 00480 if(i == froze + FREEZE_INTERVAL) { 00481 p = &paths[nodes[0]->path]; 00482 for(k=i; k>froze; k--) { 00483 dst[k] = p->nibble; 00484 p = &paths[p->prev]; 00485 } 00486 froze = i; 00487 pathn = 0; 00488 // other nodes might use paths that don't coincide with the frozen one. 00489 // checking which nodes do so is too slow, so just kill them all. 00490 // this also slightly improves quality, but I don't know why. 00491 memset(nodes+1, 0, (frontier-1)*sizeof(TrellisNode*)); 00492 } 00493 } 00494 00495 p = &paths[nodes[0]->path]; 00496 for(i=n-1; i>froze; i--) { 00497 dst[i] = p->nibble; 00498 p = &paths[p->prev]; 00499 } 00500 00501 c->predictor = nodes[0]->sample1; 00502 c->sample1 = nodes[0]->sample1; 00503 c->sample2 = nodes[0]->sample2; 00504 c->step_index = nodes[0]->step; 00505 c->step = nodes[0]->step; 00506 c->idelta = nodes[0]->step; 00507 } 00508 00509 static int adpcm_encode_frame(AVCodecContext *avctx, 00510 unsigned char *frame, int buf_size, void *data) 00511 { 00512 int n, i, st; 00513 short *samples; 00514 unsigned char *dst; 00515 ADPCMContext *c = avctx->priv_data; 00516 uint8_t *buf; 00517 00518 dst = frame; 00519 samples = (short *)data; 00520 st= avctx->channels == 2; 00521 /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */ 00522 00523 switch(avctx->codec->id) { 00524 case CODEC_ID_ADPCM_IMA_WAV: 00525 n = avctx->frame_size / 8; 00526 c->status[0].prev_sample = (signed short)samples[0]; /* XXX */ 00527 /* c->status[0].step_index = 0; *//* XXX: not sure how to init the state machine */ 00528 bytestream_put_le16(&dst, c->status[0].prev_sample); 00529 *dst++ = (unsigned char)c->status[0].step_index; 00530 *dst++ = 0; /* unknown */ 00531 samples++; 00532 if (avctx->channels == 2) { 00533 c->status[1].prev_sample = (signed short)samples[0]; 00534 /* c->status[1].step_index = 0; */ 00535 bytestream_put_le16(&dst, c->status[1].prev_sample); 00536 *dst++ = (unsigned char)c->status[1].step_index; 00537 *dst++ = 0; 00538 samples++; 00539 } 00540 00541 /* stereo: 4 bytes (8 samples) for left, 4 bytes for right, 4 bytes left, ... */ 00542 if(avctx->trellis > 0) { 00543 FF_ALLOC_OR_GOTO(avctx, buf, 2*n*8, error); 00544 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n*8); 00545 if(avctx->channels == 2) 00546 adpcm_compress_trellis(avctx, samples+1, buf + n*8, &c->status[1], n*8); 00547 for(i=0; i<n; i++) { 00548 *dst++ = buf[8*i+0] | (buf[8*i+1] << 4); 00549 *dst++ = buf[8*i+2] | (buf[8*i+3] << 4); 00550 *dst++ = buf[8*i+4] | (buf[8*i+5] << 4); 00551 *dst++ = buf[8*i+6] | (buf[8*i+7] << 4); 00552 if (avctx->channels == 2) { 00553 uint8_t *buf1 = buf + n*8; 00554 *dst++ = buf1[8*i+0] | (buf1[8*i+1] << 4); 00555 *dst++ = buf1[8*i+2] | (buf1[8*i+3] << 4); 00556 *dst++ = buf1[8*i+4] | (buf1[8*i+5] << 4); 00557 *dst++ = buf1[8*i+6] | (buf1[8*i+7] << 4); 00558 } 00559 } 00560 av_free(buf); 00561 } else 00562 for (; n>0; n--) { 00563 *dst = adpcm_ima_compress_sample(&c->status[0], samples[0]); 00564 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels]) << 4; 00565 dst++; 00566 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]); 00567 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4; 00568 dst++; 00569 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]); 00570 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4; 00571 dst++; 00572 *dst = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]); 00573 *dst |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4; 00574 dst++; 00575 /* right channel */ 00576 if (avctx->channels == 2) { 00577 *dst = adpcm_ima_compress_sample(&c->status[1], samples[1]); 00578 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[3]) << 4; 00579 dst++; 00580 *dst = adpcm_ima_compress_sample(&c->status[1], samples[5]); 00581 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[7]) << 4; 00582 dst++; 00583 *dst = adpcm_ima_compress_sample(&c->status[1], samples[9]); 00584 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4; 00585 dst++; 00586 *dst = adpcm_ima_compress_sample(&c->status[1], samples[13]); 00587 *dst |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4; 00588 dst++; 00589 } 00590 samples += 8 * avctx->channels; 00591 } 00592 break; 00593 case CODEC_ID_ADPCM_IMA_QT: 00594 { 00595 int ch, i; 00596 PutBitContext pb; 00597 init_put_bits(&pb, dst, buf_size*8); 00598 00599 for(ch=0; ch<avctx->channels; ch++){ 00600 put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7); 00601 put_bits(&pb, 7, c->status[ch].step_index); 00602 if(avctx->trellis > 0) { 00603 uint8_t buf[64]; 00604 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64); 00605 for(i=0; i<64; i++) 00606 put_bits(&pb, 4, buf[i^1]); 00607 c->status[ch].prev_sample = c->status[ch].predictor & ~0x7F; 00608 } else { 00609 for (i=0; i<64; i+=2){ 00610 int t1, t2; 00611 t1 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+0)+ch]); 00612 t2 = adpcm_ima_compress_sample(&c->status[ch], samples[avctx->channels*(i+1)+ch]); 00613 put_bits(&pb, 4, t2); 00614 put_bits(&pb, 4, t1); 00615 } 00616 c->status[ch].prev_sample &= ~0x7F; 00617 } 00618 } 00619 00620 flush_put_bits(&pb); 00621 dst += put_bits_count(&pb)>>3; 00622 break; 00623 } 00624 case CODEC_ID_ADPCM_SWF: 00625 { 00626 int i; 00627 PutBitContext pb; 00628 init_put_bits(&pb, dst, buf_size*8); 00629 00630 n = avctx->frame_size-1; 00631 00632 //Store AdpcmCodeSize 00633 put_bits(&pb, 2, 2); //Set 4bits flash adpcm format 00634 00635 //Init the encoder state 00636 for(i=0; i<avctx->channels; i++){ 00637 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63); // clip step so it fits 6 bits 00638 put_sbits(&pb, 16, samples[i]); 00639 put_bits(&pb, 6, c->status[i].step_index); 00640 c->status[i].prev_sample = (signed short)samples[i]; 00641 } 00642 00643 if(avctx->trellis > 0) { 00644 FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); 00645 adpcm_compress_trellis(avctx, samples+2, buf, &c->status[0], n); 00646 if (avctx->channels == 2) 00647 adpcm_compress_trellis(avctx, samples+3, buf+n, &c->status[1], n); 00648 for(i=0; i<n; i++) { 00649 put_bits(&pb, 4, buf[i]); 00650 if (avctx->channels == 2) 00651 put_bits(&pb, 4, buf[n+i]); 00652 } 00653 av_free(buf); 00654 } else { 00655 for (i=1; i<avctx->frame_size; i++) { 00656 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels*i])); 00657 if (avctx->channels == 2) 00658 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1], samples[2*i+1])); 00659 } 00660 } 00661 flush_put_bits(&pb); 00662 dst += put_bits_count(&pb)>>3; 00663 break; 00664 } 00665 case CODEC_ID_ADPCM_MS: 00666 for(i=0; i<avctx->channels; i++){ 00667 int predictor=0; 00668 00669 *dst++ = predictor; 00670 c->status[i].coeff1 = AdaptCoeff1[predictor]; 00671 c->status[i].coeff2 = AdaptCoeff2[predictor]; 00672 } 00673 for(i=0; i<avctx->channels; i++){ 00674 if (c->status[i].idelta < 16) 00675 c->status[i].idelta = 16; 00676 00677 bytestream_put_le16(&dst, c->status[i].idelta); 00678 } 00679 for(i=0; i<avctx->channels; i++){ 00680 c->status[i].sample2= *samples++; 00681 } 00682 for(i=0; i<avctx->channels; i++){ 00683 c->status[i].sample1= *samples++; 00684 00685 bytestream_put_le16(&dst, c->status[i].sample1); 00686 } 00687 for(i=0; i<avctx->channels; i++) 00688 bytestream_put_le16(&dst, c->status[i].sample2); 00689 00690 if(avctx->trellis > 0) { 00691 int n = avctx->block_align - 7*avctx->channels; 00692 FF_ALLOC_OR_GOTO(avctx, buf, 2*n, error); 00693 if(avctx->channels == 1) { 00694 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); 00695 for(i=0; i<n; i+=2) 00696 *dst++ = (buf[i] << 4) | buf[i+1]; 00697 } else { 00698 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); 00699 adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); 00700 for(i=0; i<n; i++) 00701 *dst++ = (buf[i] << 4) | buf[n+i]; 00702 } 00703 av_free(buf); 00704 } else 00705 for(i=7*avctx->channels; i<avctx->block_align; i++) { 00706 int nibble; 00707 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++)<<4; 00708 nibble|= adpcm_ms_compress_sample(&c->status[st], *samples++); 00709 *dst++ = nibble; 00710 } 00711 break; 00712 case CODEC_ID_ADPCM_YAMAHA: 00713 n = avctx->frame_size / 2; 00714 if(avctx->trellis > 0) { 00715 FF_ALLOC_OR_GOTO(avctx, buf, 2*n*2, error); 00716 n *= 2; 00717 if(avctx->channels == 1) { 00718 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); 00719 for(i=0; i<n; i+=2) 00720 *dst++ = buf[i] | (buf[i+1] << 4); 00721 } else { 00722 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n); 00723 adpcm_compress_trellis(avctx, samples+1, buf+n, &c->status[1], n); 00724 for(i=0; i<n; i++) 00725 *dst++ = buf[i] | (buf[n+i] << 4); 00726 } 00727 av_free(buf); 00728 } else 00729 for (n *= avctx->channels; n>0; n--) { 00730 int nibble; 00731 nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++); 00732 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4; 00733 *dst++ = nibble; 00734 } 00735 break; 00736 default: 00737 error: 00738 return -1; 00739 } 00740 return dst - frame; 00741 } 00742 #endif //CONFIG_ENCODERS 00743 00744 static av_cold int adpcm_decode_init(AVCodecContext * avctx) 00745 { 00746 ADPCMContext *c = avctx->priv_data; 00747 unsigned int max_channels = 2; 00748 00749 switch(avctx->codec->id) { 00750 case CODEC_ID_ADPCM_EA_R1: 00751 case CODEC_ID_ADPCM_EA_R2: 00752 case CODEC_ID_ADPCM_EA_R3: 00753 max_channels = 6; 00754 break; 00755 } 00756 if(avctx->channels > max_channels){ 00757 return -1; 00758 } 00759 00760 switch(avctx->codec->id) { 00761 case CODEC_ID_ADPCM_CT: 00762 c->status[0].step = c->status[1].step = 511; 00763 break; 00764 case CODEC_ID_ADPCM_IMA_WAV: 00765 if (avctx->bits_per_coded_sample != 4) { 00766 av_log(avctx, AV_LOG_ERROR, "Only 4-bit ADPCM IMA WAV files are supported\n"); 00767 return -1; 00768 } 00769 break; 00770 case CODEC_ID_ADPCM_IMA_WS: 00771 if (avctx->extradata && avctx->extradata_size == 2 * 4) { 00772 c->status[0].predictor = AV_RL32(avctx->extradata); 00773 c->status[1].predictor = AV_RL32(avctx->extradata + 4); 00774 } 00775 break; 00776 default: 00777 break; 00778 } 00779 avctx->sample_fmt = AV_SAMPLE_FMT_S16; 00780 return 0; 00781 } 00782 00783 static inline short adpcm_ima_expand_nibble(ADPCMChannelStatus *c, char nibble, int shift) 00784 { 00785 int step_index; 00786 int predictor; 00787 int sign, delta, diff, step; 00788 00789 step = step_table[c->step_index]; 00790 step_index = c->step_index + index_table[(unsigned)nibble]; 00791 if (step_index < 0) step_index = 0; 00792 else if (step_index > 88) step_index = 88; 00793 00794 sign = nibble & 8; 00795 delta = nibble & 7; 00796 /* perform direct multiplication instead of series of jumps proposed by 00797 * the reference ADPCM implementation since modern CPUs can do the mults 00798 * quickly enough */ 00799 diff = ((2 * delta + 1) * step) >> shift; 00800 predictor = c->predictor; 00801 if (sign) predictor -= diff; 00802 else predictor += diff; 00803 00804 c->predictor = av_clip_int16(predictor); 00805 c->step_index = step_index; 00806 00807 return (short)c->predictor; 00808 } 00809 00810 static inline short adpcm_ms_expand_nibble(ADPCMChannelStatus *c, char nibble) 00811 { 00812 int predictor; 00813 00814 predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64; 00815 predictor += (signed)((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta; 00816 00817 c->sample2 = c->sample1; 00818 c->sample1 = av_clip_int16(predictor); 00819 c->idelta = (AdaptationTable[(int)nibble] * c->idelta) >> 8; 00820 if (c->idelta < 16) c->idelta = 16; 00821 00822 return c->sample1; 00823 } 00824 00825 static inline short adpcm_ct_expand_nibble(ADPCMChannelStatus *c, char nibble) 00826 { 00827 int sign, delta, diff; 00828 int new_step; 00829 00830 sign = nibble & 8; 00831 delta = nibble & 7; 00832 /* perform direct multiplication instead of series of jumps proposed by 00833 * the reference ADPCM implementation since modern CPUs can do the mults 00834 * quickly enough */ 00835 diff = ((2 * delta + 1) * c->step) >> 3; 00836 /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */ 00837 c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff); 00838 c->predictor = av_clip_int16(c->predictor); 00839 /* calculate new step and clamp it to range 511..32767 */ 00840 new_step = (AdaptationTable[nibble & 7] * c->step) >> 8; 00841 c->step = av_clip(new_step, 511, 32767); 00842 00843 return (short)c->predictor; 00844 } 00845 00846 static inline short adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, char nibble, int size, int shift) 00847 { 00848 int sign, delta, diff; 00849 00850 sign = nibble & (1<<(size-1)); 00851 delta = nibble & ((1<<(size-1))-1); 00852 diff = delta << (7 + c->step + shift); 00853 00854 /* clamp result */ 00855 c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256); 00856 00857 /* calculate new step */ 00858 if (delta >= (2*size - 3) && c->step < 3) 00859 c->step++; 00860 else if (delta == 0 && c->step > 0) 00861 c->step--; 00862 00863 return (short) c->predictor; 00864 } 00865 00866 static inline short adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, unsigned char nibble) 00867 { 00868 if(!c->step) { 00869 c->predictor = 0; 00870 c->step = 127; 00871 } 00872 00873 c->predictor += (c->step * yamaha_difflookup[nibble]) / 8; 00874 c->predictor = av_clip_int16(c->predictor); 00875 c->step = (c->step * yamaha_indexscale[nibble]) >> 8; 00876 c->step = av_clip(c->step, 127, 24567); 00877 return c->predictor; 00878 } 00879 00880 static void xa_decode(short *out, const unsigned char *in, 00881 ADPCMChannelStatus *left, ADPCMChannelStatus *right, int inc) 00882 { 00883 int i, j; 00884 int shift,filter,f0,f1; 00885 int s_1,s_2; 00886 int d,s,t; 00887 00888 for(i=0;i<4;i++) { 00889 00890 shift = 12 - (in[4+i*2] & 15); 00891 filter = in[4+i*2] >> 4; 00892 f0 = xa_adpcm_table[filter][0]; 00893 f1 = xa_adpcm_table[filter][1]; 00894 00895 s_1 = left->sample1; 00896 s_2 = left->sample2; 00897 00898 for(j=0;j<28;j++) { 00899 d = in[16+i+j*4]; 00900 00901 t = (signed char)(d<<4)>>4; 00902 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00903 s_2 = s_1; 00904 s_1 = av_clip_int16(s); 00905 *out = s_1; 00906 out += inc; 00907 } 00908 00909 if (inc==2) { /* stereo */ 00910 left->sample1 = s_1; 00911 left->sample2 = s_2; 00912 s_1 = right->sample1; 00913 s_2 = right->sample2; 00914 out = out + 1 - 28*2; 00915 } 00916 00917 shift = 12 - (in[5+i*2] & 15); 00918 filter = in[5+i*2] >> 4; 00919 00920 f0 = xa_adpcm_table[filter][0]; 00921 f1 = xa_adpcm_table[filter][1]; 00922 00923 for(j=0;j<28;j++) { 00924 d = in[16+i+j*4]; 00925 00926 t = (signed char)d >> 4; 00927 s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6); 00928 s_2 = s_1; 00929 s_1 = av_clip_int16(s); 00930 *out = s_1; 00931 out += inc; 00932 } 00933 00934 if (inc==2) { /* stereo */ 00935 right->sample1 = s_1; 00936 right->sample2 = s_2; 00937 out -= 1; 00938 } else { 00939 left->sample1 = s_1; 00940 left->sample2 = s_2; 00941 } 00942 } 00943 } 00944 00945 00946 /* DK3 ADPCM support macro */ 00947 #define DK3_GET_NEXT_NIBBLE() \ 00948 if (decode_top_nibble_next) \ 00949 { \ 00950 nibble = last_byte >> 4; \ 00951 decode_top_nibble_next = 0; \ 00952 } \ 00953 else \ 00954 { \ 00955 last_byte = *src++; \ 00956 if (src >= buf + buf_size) break; \ 00957 nibble = last_byte & 0x0F; \ 00958 decode_top_nibble_next = 1; \ 00959 } 00960 00961 static int adpcm_decode_frame(AVCodecContext *avctx, 00962 void *data, int *data_size, 00963 AVPacket *avpkt) 00964 { 00965 const uint8_t *buf = avpkt->data; 00966 int buf_size = avpkt->size; 00967 ADPCMContext *c = avctx->priv_data; 00968 ADPCMChannelStatus *cs; 00969 int n, m, channel, i; 00970 int block_predictor[2]; 00971 short *samples; 00972 short *samples_end; 00973 const uint8_t *src; 00974 int st; /* stereo */ 00975 00976 /* DK3 ADPCM accounting variables */ 00977 unsigned char last_byte = 0; 00978 unsigned char nibble; 00979 int decode_top_nibble_next = 0; 00980 int diff_channel; 00981 00982 /* EA ADPCM state variables */ 00983 uint32_t samples_in_chunk; 00984 int32_t previous_left_sample, previous_right_sample; 00985 int32_t current_left_sample, current_right_sample; 00986 int32_t next_left_sample, next_right_sample; 00987 int32_t coeff1l, coeff2l, coeff1r, coeff2r; 00988 uint8_t shift_left, shift_right; 00989 int count1, count2; 00990 int coeff[2][2], shift[2];//used in EA MAXIS ADPCM 00991 00992 if (!buf_size) 00993 return 0; 00994 00995 //should protect all 4bit ADPCM variants 00996 //8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels 00997 // 00998 if(*data_size/4 < buf_size + 8) 00999 return -1; 01000 01001 samples = data; 01002 samples_end= samples + *data_size/2; 01003 *data_size= 0; 01004 src = buf; 01005 01006 st = avctx->channels == 2 ? 1 : 0; 01007 01008 switch(avctx->codec->id) { 01009 case CODEC_ID_ADPCM_IMA_QT: 01010 n = buf_size - 2*avctx->channels; 01011 for (channel = 0; channel < avctx->channels; channel++) { 01012 cs = &(c->status[channel]); 01013 /* (pppppp) (piiiiiii) */ 01014 01015 /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */ 01016 cs->predictor = (*src++) << 8; 01017 cs->predictor |= (*src & 0x80); 01018 cs->predictor &= 0xFF80; 01019 01020 /* sign extension */ 01021 if(cs->predictor & 0x8000) 01022 cs->predictor -= 0x10000; 01023 01024 cs->predictor = av_clip_int16(cs->predictor); 01025 01026 cs->step_index = (*src++) & 0x7F; 01027 01028 if (cs->step_index > 88){ 01029 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 01030 cs->step_index = 88; 01031 } 01032 01033 cs->step = step_table[cs->step_index]; 01034 01035 samples = (short*)data + channel; 01036 01037 for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */ 01038 *samples = adpcm_ima_expand_nibble(cs, src[0] & 0x0F, 3); 01039 samples += avctx->channels; 01040 *samples = adpcm_ima_expand_nibble(cs, src[0] >> 4 , 3); 01041 samples += avctx->channels; 01042 src ++; 01043 } 01044 } 01045 if (st) 01046 samples--; 01047 break; 01048 case CODEC_ID_ADPCM_IMA_WAV: 01049 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01050 buf_size = avctx->block_align; 01051 01052 // samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1; 01053 01054 for(i=0; i<avctx->channels; i++){ 01055 cs = &(c->status[i]); 01056 cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src); 01057 01058 cs->step_index = *src++; 01059 if (cs->step_index > 88){ 01060 av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index); 01061 cs->step_index = 88; 01062 } 01063 if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */ 01064 } 01065 01066 while(src < buf + buf_size){ 01067 for(m=0; m<4; m++){ 01068 for(i=0; i<=st; i++) 01069 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3); 01070 for(i=0; i<=st; i++) 01071 *samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3); 01072 src++; 01073 } 01074 src += 4*st; 01075 } 01076 break; 01077 case CODEC_ID_ADPCM_4XM: 01078 cs = &(c->status[0]); 01079 c->status[0].predictor= (int16_t)bytestream_get_le16(&src); 01080 if(st){ 01081 c->status[1].predictor= (int16_t)bytestream_get_le16(&src); 01082 } 01083 c->status[0].step_index= (int16_t)bytestream_get_le16(&src); 01084 if(st){ 01085 c->status[1].step_index= (int16_t)bytestream_get_le16(&src); 01086 } 01087 if (cs->step_index < 0) cs->step_index = 0; 01088 if (cs->step_index > 88) cs->step_index = 88; 01089 01090 m= (buf_size - (src - buf))>>st; 01091 for(i=0; i<m; i++) { 01092 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4); 01093 if (st) 01094 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4); 01095 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4); 01096 if (st) 01097 *samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4); 01098 } 01099 01100 src += m<<st; 01101 01102 break; 01103 case CODEC_ID_ADPCM_MS: 01104 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01105 buf_size = avctx->block_align; 01106 n = buf_size - 7 * avctx->channels; 01107 if (n < 0) 01108 return -1; 01109 block_predictor[0] = av_clip(*src++, 0, 6); 01110 block_predictor[1] = 0; 01111 if (st) 01112 block_predictor[1] = av_clip(*src++, 0, 6); 01113 c->status[0].idelta = (int16_t)bytestream_get_le16(&src); 01114 if (st){ 01115 c->status[1].idelta = (int16_t)bytestream_get_le16(&src); 01116 } 01117 c->status[0].coeff1 = AdaptCoeff1[block_predictor[0]]; 01118 c->status[0].coeff2 = AdaptCoeff2[block_predictor[0]]; 01119 c->status[1].coeff1 = AdaptCoeff1[block_predictor[1]]; 01120 c->status[1].coeff2 = AdaptCoeff2[block_predictor[1]]; 01121 01122 c->status[0].sample1 = bytestream_get_le16(&src); 01123 if (st) c->status[1].sample1 = bytestream_get_le16(&src); 01124 c->status[0].sample2 = bytestream_get_le16(&src); 01125 if (st) c->status[1].sample2 = bytestream_get_le16(&src); 01126 01127 *samples++ = c->status[0].sample2; 01128 if (st) *samples++ = c->status[1].sample2; 01129 *samples++ = c->status[0].sample1; 01130 if (st) *samples++ = c->status[1].sample1; 01131 for(;n>0;n--) { 01132 *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 ); 01133 *samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F); 01134 src ++; 01135 } 01136 break; 01137 case CODEC_ID_ADPCM_IMA_DK4: 01138 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01139 buf_size = avctx->block_align; 01140 01141 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01142 c->status[0].step_index = *src++; 01143 src++; 01144 *samples++ = c->status[0].predictor; 01145 if (st) { 01146 c->status[1].predictor = (int16_t)bytestream_get_le16(&src); 01147 c->status[1].step_index = *src++; 01148 src++; 01149 *samples++ = c->status[1].predictor; 01150 } 01151 while (src < buf + buf_size) { 01152 01153 /* take care of the top nibble (always left or mono channel) */ 01154 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01155 src[0] >> 4, 3); 01156 01157 /* take care of the bottom nibble, which is right sample for 01158 * stereo, or another mono sample */ 01159 if (st) 01160 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01161 src[0] & 0x0F, 3); 01162 else 01163 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01164 src[0] & 0x0F, 3); 01165 01166 src++; 01167 } 01168 break; 01169 case CODEC_ID_ADPCM_IMA_DK3: 01170 if (avctx->block_align != 0 && buf_size > avctx->block_align) 01171 buf_size = avctx->block_align; 01172 01173 if(buf_size + 16 > (samples_end - samples)*3/8) 01174 return -1; 01175 01176 c->status[0].predictor = (int16_t)AV_RL16(src + 10); 01177 c->status[1].predictor = (int16_t)AV_RL16(src + 12); 01178 c->status[0].step_index = src[14]; 01179 c->status[1].step_index = src[15]; 01180 /* sign extend the predictors */ 01181 src += 16; 01182 diff_channel = c->status[1].predictor; 01183 01184 /* the DK3_GET_NEXT_NIBBLE macro issues the break statement when 01185 * the buffer is consumed */ 01186 while (1) { 01187 01188 /* for this algorithm, c->status[0] is the sum channel and 01189 * c->status[1] is the diff channel */ 01190 01191 /* process the first predictor of the sum channel */ 01192 DK3_GET_NEXT_NIBBLE(); 01193 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01194 01195 /* process the diff channel predictor */ 01196 DK3_GET_NEXT_NIBBLE(); 01197 adpcm_ima_expand_nibble(&c->status[1], nibble, 3); 01198 01199 /* process the first pair of stereo PCM samples */ 01200 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01201 *samples++ = c->status[0].predictor + c->status[1].predictor; 01202 *samples++ = c->status[0].predictor - c->status[1].predictor; 01203 01204 /* process the second predictor of the sum channel */ 01205 DK3_GET_NEXT_NIBBLE(); 01206 adpcm_ima_expand_nibble(&c->status[0], nibble, 3); 01207 01208 /* process the second pair of stereo PCM samples */ 01209 diff_channel = (diff_channel + c->status[1].predictor) / 2; 01210 *samples++ = c->status[0].predictor + c->status[1].predictor; 01211 *samples++ = c->status[0].predictor - c->status[1].predictor; 01212 } 01213 break; 01214 case CODEC_ID_ADPCM_IMA_ISS: 01215 c->status[0].predictor = (int16_t)AV_RL16(src + 0); 01216 c->status[0].step_index = src[2]; 01217 src += 4; 01218 if(st) { 01219 c->status[1].predictor = (int16_t)AV_RL16(src + 0); 01220 c->status[1].step_index = src[2]; 01221 src += 4; 01222 } 01223 01224 while (src < buf + buf_size) { 01225 01226 if (st) { 01227 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01228 src[0] >> 4 , 3); 01229 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01230 src[0] & 0x0F, 3); 01231 } else { 01232 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01233 src[0] & 0x0F, 3); 01234 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01235 src[0] >> 4 , 3); 01236 } 01237 01238 src++; 01239 } 01240 break; 01241 case CODEC_ID_ADPCM_IMA_WS: 01242 /* no per-block initialization; just start decoding the data */ 01243 while (src < buf + buf_size) { 01244 01245 if (st) { 01246 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01247 src[0] >> 4 , 3); 01248 *samples++ = adpcm_ima_expand_nibble(&c->status[1], 01249 src[0] & 0x0F, 3); 01250 } else { 01251 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01252 src[0] >> 4 , 3); 01253 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01254 src[0] & 0x0F, 3); 01255 } 01256 01257 src++; 01258 } 01259 break; 01260 case CODEC_ID_ADPCM_XA: 01261 while (buf_size >= 128) { 01262 xa_decode(samples, src, &c->status[0], &c->status[1], 01263 avctx->channels); 01264 src += 128; 01265 samples += 28 * 8; 01266 buf_size -= 128; 01267 } 01268 break; 01269 case CODEC_ID_ADPCM_IMA_EA_EACS: 01270 samples_in_chunk = bytestream_get_le32(&src) >> (1-st); 01271 01272 if (samples_in_chunk > buf_size-4-(8<<st)) { 01273 src += buf_size - 4; 01274 break; 01275 } 01276 01277 for (i=0; i<=st; i++) 01278 c->status[i].step_index = bytestream_get_le32(&src); 01279 for (i=0; i<=st; i++) 01280 c->status[i].predictor = bytestream_get_le32(&src); 01281 01282 for (; samples_in_chunk; samples_in_chunk--, src++) { 01283 *samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3); 01284 *samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3); 01285 } 01286 break; 01287 case CODEC_ID_ADPCM_IMA_EA_SEAD: 01288 for (; src < buf+buf_size; src++) { 01289 *samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6); 01290 *samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6); 01291 } 01292 break; 01293 case CODEC_ID_ADPCM_EA: 01294 if (buf_size < 4 || AV_RL32(src) >= ((buf_size - 12) * 2)) { 01295 src += buf_size; 01296 break; 01297 } 01298 samples_in_chunk = AV_RL32(src); 01299 src += 4; 01300 current_left_sample = (int16_t)bytestream_get_le16(&src); 01301 previous_left_sample = (int16_t)bytestream_get_le16(&src); 01302 current_right_sample = (int16_t)bytestream_get_le16(&src); 01303 previous_right_sample = (int16_t)bytestream_get_le16(&src); 01304 01305 for (count1 = 0; count1 < samples_in_chunk/28;count1++) { 01306 coeff1l = ea_adpcm_table[ *src >> 4 ]; 01307 coeff2l = ea_adpcm_table[(*src >> 4 ) + 4]; 01308 coeff1r = ea_adpcm_table[*src & 0x0F]; 01309 coeff2r = ea_adpcm_table[(*src & 0x0F) + 4]; 01310 src++; 01311 01312 shift_left = (*src >> 4 ) + 8; 01313 shift_right = (*src & 0x0F) + 8; 01314 src++; 01315 01316 for (count2 = 0; count2 < 28; count2++) { 01317 next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left; 01318 next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right; 01319 src++; 01320 01321 next_left_sample = (next_left_sample + 01322 (current_left_sample * coeff1l) + 01323 (previous_left_sample * coeff2l) + 0x80) >> 8; 01324 next_right_sample = (next_right_sample + 01325 (current_right_sample * coeff1r) + 01326 (previous_right_sample * coeff2r) + 0x80) >> 8; 01327 01328 previous_left_sample = current_left_sample; 01329 current_left_sample = av_clip_int16(next_left_sample); 01330 previous_right_sample = current_right_sample; 01331 current_right_sample = av_clip_int16(next_right_sample); 01332 *samples++ = (unsigned short)current_left_sample; 01333 *samples++ = (unsigned short)current_right_sample; 01334 } 01335 } 01336 01337 if (src - buf == buf_size - 2) 01338 src += 2; // Skip terminating 0x0000 01339 01340 break; 01341 case CODEC_ID_ADPCM_EA_MAXIS_XA: 01342 for(channel = 0; channel < avctx->channels; channel++) { 01343 for (i=0; i<2; i++) 01344 coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i]; 01345 shift[channel] = (*src & 0x0F) + 8; 01346 src++; 01347 } 01348 for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) { 01349 for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */ 01350 for(channel = 0; channel < avctx->channels; channel++) { 01351 int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel]; 01352 sample = (sample + 01353 c->status[channel].sample1 * coeff[channel][0] + 01354 c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8; 01355 c->status[channel].sample2 = c->status[channel].sample1; 01356 c->status[channel].sample1 = av_clip_int16(sample); 01357 *samples++ = c->status[channel].sample1; 01358 } 01359 } 01360 src+=avctx->channels; 01361 } 01362 break; 01363 case CODEC_ID_ADPCM_EA_R1: 01364 case CODEC_ID_ADPCM_EA_R2: 01365 case CODEC_ID_ADPCM_EA_R3: { 01366 /* channel numbering 01367 2chan: 0=fl, 1=fr 01368 4chan: 0=fl, 1=rl, 2=fr, 3=rr 01369 6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */ 01370 const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3; 01371 int32_t previous_sample, current_sample, next_sample; 01372 int32_t coeff1, coeff2; 01373 uint8_t shift; 01374 unsigned int channel; 01375 uint16_t *samplesC; 01376 const uint8_t *srcC; 01377 const uint8_t *src_end = buf + buf_size; 01378 01379 samples_in_chunk = (big_endian ? bytestream_get_be32(&src) 01380 : bytestream_get_le32(&src)) / 28; 01381 if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) || 01382 28*samples_in_chunk*avctx->channels > samples_end-samples) { 01383 src += buf_size - 4; 01384 break; 01385 } 01386 01387 for (channel=0; channel<avctx->channels; channel++) { 01388 int32_t offset = (big_endian ? bytestream_get_be32(&src) 01389 : bytestream_get_le32(&src)) 01390 + (avctx->channels-channel-1) * 4; 01391 01392 if ((offset < 0) || (offset >= src_end - src - 4)) break; 01393 srcC = src + offset; 01394 samplesC = samples + channel; 01395 01396 if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) { 01397 current_sample = (int16_t)bytestream_get_le16(&srcC); 01398 previous_sample = (int16_t)bytestream_get_le16(&srcC); 01399 } else { 01400 current_sample = c->status[channel].predictor; 01401 previous_sample = c->status[channel].prev_sample; 01402 } 01403 01404 for (count1=0; count1<samples_in_chunk; count1++) { 01405 if (*srcC == 0xEE) { /* only seen in R2 and R3 */ 01406 srcC++; 01407 if (srcC > src_end - 30*2) break; 01408 current_sample = (int16_t)bytestream_get_be16(&srcC); 01409 previous_sample = (int16_t)bytestream_get_be16(&srcC); 01410 01411 for (count2=0; count2<28; count2++) { 01412 *samplesC = (int16_t)bytestream_get_be16(&srcC); 01413 samplesC += avctx->channels; 01414 } 01415 } else { 01416 coeff1 = ea_adpcm_table[ *srcC>>4 ]; 01417 coeff2 = ea_adpcm_table[(*srcC>>4) + 4]; 01418 shift = (*srcC++ & 0x0F) + 8; 01419 01420 if (srcC > src_end - 14) break; 01421 for (count2=0; count2<28; count2++) { 01422 if (count2 & 1) 01423 next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift; 01424 else 01425 next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift; 01426 01427 next_sample += (current_sample * coeff1) + 01428 (previous_sample * coeff2); 01429 next_sample = av_clip_int16(next_sample >> 8); 01430 01431 previous_sample = current_sample; 01432 current_sample = next_sample; 01433 *samplesC = current_sample; 01434 samplesC += avctx->channels; 01435 } 01436 } 01437 } 01438 01439 if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) { 01440 c->status[channel].predictor = current_sample; 01441 c->status[channel].prev_sample = previous_sample; 01442 } 01443 } 01444 01445 src = src + buf_size - (4 + 4*avctx->channels); 01446 samples += 28 * samples_in_chunk * avctx->channels; 01447 break; 01448 } 01449 case CODEC_ID_ADPCM_EA_XAS: 01450 if (samples_end-samples < 32*4*avctx->channels 01451 || buf_size < (4+15)*4*avctx->channels) { 01452 src += buf_size; 01453 break; 01454 } 01455 for (channel=0; channel<avctx->channels; channel++) { 01456 int coeff[2][4], shift[4]; 01457 short *s2, *s = &samples[channel]; 01458 for (n=0; n<4; n++, s+=32*avctx->channels) { 01459 for (i=0; i<2; i++) 01460 coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i]; 01461 shift[n] = (src[2]&0x0F) + 8; 01462 for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels) 01463 s2[0] = (src[0]&0xF0) + (src[1]<<8); 01464 } 01465 01466 for (m=2; m<32; m+=2) { 01467 s = &samples[m*avctx->channels + channel]; 01468 for (n=0; n<4; n++, src++, s+=32*avctx->channels) { 01469 for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) { 01470 int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n]; 01471 int pred = s2[-1*avctx->channels] * coeff[0][n] 01472 + s2[-2*avctx->channels] * coeff[1][n]; 01473 s2[0] = av_clip_int16((level + pred + 0x80) >> 8); 01474 } 01475 } 01476 } 01477 } 01478 samples += 32*4*avctx->channels; 01479 break; 01480 case CODEC_ID_ADPCM_IMA_AMV: 01481 case CODEC_ID_ADPCM_IMA_SMJPEG: 01482 c->status[0].predictor = (int16_t)bytestream_get_le16(&src); 01483 c->status[0].step_index = bytestream_get_le16(&src); 01484 01485 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01486 src+=4; 01487 01488 while (src < buf + buf_size) { 01489 char hi, lo; 01490 lo = *src & 0x0F; 01491 hi = *src >> 4; 01492 01493 if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) 01494 FFSWAP(char, hi, lo); 01495 01496 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01497 lo, 3); 01498 *samples++ = adpcm_ima_expand_nibble(&c->status[0], 01499 hi, 3); 01500 src++; 01501 } 01502 break; 01503 case CODEC_ID_ADPCM_CT: 01504 while (src < buf + buf_size) { 01505 if (st) { 01506 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01507 src[0] >> 4); 01508 *samples++ = adpcm_ct_expand_nibble(&c->status[1], 01509 src[0] & 0x0F); 01510 } else { 01511 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01512 src[0] >> 4); 01513 *samples++ = adpcm_ct_expand_nibble(&c->status[0], 01514 src[0] & 0x0F); 01515 } 01516 src++; 01517 } 01518 break; 01519 case CODEC_ID_ADPCM_SBPRO_4: 01520 case CODEC_ID_ADPCM_SBPRO_3: 01521 case CODEC_ID_ADPCM_SBPRO_2: 01522 if (!c->status[0].step_index) { 01523 /* the first byte is a raw sample */ 01524 *samples++ = 128 * (*src++ - 0x80); 01525 if (st) 01526 *samples++ = 128 * (*src++ - 0x80); 01527 c->status[0].step_index = 1; 01528 } 01529 if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) { 01530 while (src < buf + buf_size) { 01531 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01532 src[0] >> 4, 4, 0); 01533 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01534 src[0] & 0x0F, 4, 0); 01535 src++; 01536 } 01537 } else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) { 01538 while (src < buf + buf_size && samples + 2 < samples_end) { 01539 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01540 src[0] >> 5 , 3, 0); 01541 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01542 (src[0] >> 2) & 0x07, 3, 0); 01543 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01544 src[0] & 0x03, 2, 0); 01545 src++; 01546 } 01547 } else { 01548 while (src < buf + buf_size && samples + 3 < samples_end) { 01549 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01550 src[0] >> 6 , 2, 2); 01551 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01552 (src[0] >> 4) & 0x03, 2, 2); 01553 *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], 01554 (src[0] >> 2) & 0x03, 2, 2); 01555 *samples++ = adpcm_sbpro_expand_nibble(&c->status[st], 01556 src[0] & 0x03, 2, 2); 01557 src++; 01558 } 01559 } 01560 break; 01561 case CODEC_ID_ADPCM_SWF: 01562 { 01563 GetBitContext gb; 01564 const int *table; 01565 int k0, signmask, nb_bits, count; 01566 int size = buf_size*8; 01567 01568 init_get_bits(&gb, buf, size); 01569 01570 //read bits & initial values 01571 nb_bits = get_bits(&gb, 2)+2; 01572 //av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits); 01573 table = swf_index_tables[nb_bits-2]; 01574 k0 = 1 << (nb_bits-2); 01575 signmask = 1 << (nb_bits-1); 01576 01577 while (get_bits_count(&gb) <= size - 22*avctx->channels) { 01578 for (i = 0; i < avctx->channels; i++) { 01579 *samples++ = c->status[i].predictor = get_sbits(&gb, 16); 01580 c->status[i].step_index = get_bits(&gb, 6); 01581 } 01582 01583 for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) { 01584 int i; 01585 01586 for (i = 0; i < avctx->channels; i++) { 01587 // similar to IMA adpcm 01588 int delta = get_bits(&gb, nb_bits); 01589 int step = step_table[c->status[i].step_index]; 01590 long vpdiff = 0; // vpdiff = (delta+0.5)*step/4 01591 int k = k0; 01592 01593 do { 01594 if (delta & k) 01595 vpdiff += step; 01596 step >>= 1; 01597 k >>= 1; 01598 } while(k); 01599 vpdiff += step; 01600 01601 if (delta & signmask) 01602 c->status[i].predictor -= vpdiff; 01603 else 01604 c->status[i].predictor += vpdiff; 01605 01606 c->status[i].step_index += table[delta & (~signmask)]; 01607 01608 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88); 01609 c->status[i].predictor = av_clip_int16(c->status[i].predictor); 01610 01611 *samples++ = c->status[i].predictor; 01612 if (samples >= samples_end) { 01613 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01614 return -1; 01615 } 01616 } 01617 } 01618 } 01619 src += buf_size; 01620 break; 01621 } 01622 case CODEC_ID_ADPCM_YAMAHA: 01623 while (src < buf + buf_size) { 01624 if (st) { 01625 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01626 src[0] & 0x0F); 01627 *samples++ = adpcm_yamaha_expand_nibble(&c->status[1], 01628 src[0] >> 4 ); 01629 } else { 01630 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01631 src[0] & 0x0F); 01632 *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], 01633 src[0] >> 4 ); 01634 } 01635 src++; 01636 } 01637 break; 01638 case CODEC_ID_ADPCM_THP: 01639 { 01640 int table[2][16]; 01641 unsigned int samplecnt; 01642 int prev[2][2]; 01643 int ch; 01644 01645 if (buf_size < 80) { 01646 av_log(avctx, AV_LOG_ERROR, "frame too small\n"); 01647 return -1; 01648 } 01649 01650 src+=4; 01651 samplecnt = bytestream_get_be32(&src); 01652 01653 for (i = 0; i < 32; i++) 01654 table[0][i] = (int16_t)bytestream_get_be16(&src); 01655 01656 /* Initialize the previous sample. */ 01657 for (i = 0; i < 4; i++) 01658 prev[0][i] = (int16_t)bytestream_get_be16(&src); 01659 01660 if (samplecnt >= (samples_end - samples) / (st + 1)) { 01661 av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n"); 01662 return -1; 01663 } 01664 01665 for (ch = 0; ch <= st; ch++) { 01666 samples = (unsigned short *) data + ch; 01667 01668 /* Read in every sample for this channel. */ 01669 for (i = 0; i < samplecnt / 14; i++) { 01670 int index = (*src >> 4) & 7; 01671 unsigned int exp = 28 - (*src++ & 15); 01672 int factor1 = table[ch][index * 2]; 01673 int factor2 = table[ch][index * 2 + 1]; 01674 01675 /* Decode 14 samples. */ 01676 for (n = 0; n < 14; n++) { 01677 int32_t sampledat; 01678 if(n&1) sampledat= *src++ <<28; 01679 else sampledat= (*src&0xF0)<<24; 01680 01681 sampledat = ((prev[ch][0]*factor1 01682 + prev[ch][1]*factor2) >> 11) + (sampledat>>exp); 01683 *samples = av_clip_int16(sampledat); 01684 prev[ch][1] = prev[ch][0]; 01685 prev[ch][0] = *samples++; 01686 01687 /* In case of stereo, skip one sample, this sample 01688 is for the other channel. */ 01689 samples += st; 01690 } 01691 } 01692 } 01693 01694 /* In the previous loop, in case stereo is used, samples is 01695 increased exactly one time too often. */ 01696 samples -= st; 01697 break; 01698 } 01699 01700 default: 01701 return -1; 01702 } 01703 *data_size = (uint8_t *)samples - (uint8_t *)data; 01704 return src - buf; 01705 } 01706 01707 01708 01709 #if CONFIG_ENCODERS 01710 #define ADPCM_ENCODER(id,name,long_name_) \ 01711 AVCodec ff_ ## name ## _encoder = { \ 01712 #name, \ 01713 AVMEDIA_TYPE_AUDIO, \ 01714 id, \ 01715 sizeof(ADPCMContext), \ 01716 adpcm_encode_init, \ 01717 adpcm_encode_frame, \ 01718 adpcm_encode_close, \ 01719 NULL, \ 01720 .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, \ 01721 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01722 } 01723 #else 01724 #define ADPCM_ENCODER(id,name,long_name_) 01725 #endif 01726 01727 #if CONFIG_DECODERS 01728 #define ADPCM_DECODER(id,name,long_name_) \ 01729 AVCodec ff_ ## name ## _decoder = { \ 01730 #name, \ 01731 AVMEDIA_TYPE_AUDIO, \ 01732 id, \ 01733 sizeof(ADPCMContext), \ 01734 adpcm_decode_init, \ 01735 NULL, \ 01736 NULL, \ 01737 adpcm_decode_frame, \ 01738 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \ 01739 } 01740 #else 01741 #define ADPCM_DECODER(id,name,long_name_) 01742 #endif 01743 01744 #define ADPCM_CODEC(id,name,long_name_) \ 01745 ADPCM_ENCODER(id,name,long_name_); ADPCM_DECODER(id,name,long_name_) 01746 01747 /* Note: Do not forget to add new entries to the Makefile as well. */ 01748 ADPCM_DECODER(CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); 01749 ADPCM_DECODER(CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology"); 01750 ADPCM_DECODER(CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts"); 01751 ADPCM_DECODER(CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA"); 01752 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1"); 01753 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2"); 01754 ADPCM_DECODER(CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3"); 01755 ADPCM_DECODER(CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS"); 01756 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV"); 01757 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3"); 01758 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); 01759 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); 01760 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); 01761 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); 01762 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); 01763 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); 01764 ADPCM_CODEC (CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); 01765 ADPCM_DECODER(CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); 01766 ADPCM_CODEC (CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); 01767 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); 01768 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); 01769 ADPCM_DECODER(CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); 01770 ADPCM_CODEC (CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); 01771 ADPCM_DECODER(CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); 01772 ADPCM_DECODER(CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); 01773 ADPCM_CODEC (CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha");