libavcodec/adpcmenc.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2001-2003 The ffmpeg Project
00003  *
00004  * This file is part of Libav.
00005  *
00006  * Libav is free software; you can redistribute it and/or
00007  * modify it under the terms of the GNU Lesser General Public
00008  * License as published by the Free Software Foundation; either
00009  * version 2.1 of the License, or (at your option) any later version.
00010  *
00011  * Libav is distributed in the hope that it will be useful,
00012  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * Lesser General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU Lesser General Public
00017  * License along with Libav; if not, write to the Free Software
00018  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00019  */
00020 
00021 #include "avcodec.h"
00022 #include "get_bits.h"
00023 #include "put_bits.h"
00024 #include "bytestream.h"
00025 #include "adpcm.h"
00026 #include "adpcm_data.h"
00027 
00038 typedef struct TrellisPath {
00039     int nibble;
00040     int prev;
00041 } TrellisPath;
00042 
00043 typedef struct TrellisNode {
00044     uint32_t ssd;
00045     int path;
00046     int sample1;
00047     int sample2;
00048     int step;
00049 } TrellisNode;
00050 
00051 typedef struct ADPCMEncodeContext {
00052     ADPCMChannelStatus status[6];
00053     TrellisPath *paths;
00054     TrellisNode *node_buf;
00055     TrellisNode **nodep_buf;
00056     uint8_t *trellis_hash;
00057 } ADPCMEncodeContext;
00058 
00059 #define FREEZE_INTERVAL 128
00060 
00061 static av_cold int adpcm_encode_init(AVCodecContext *avctx)
00062 {
00063     ADPCMEncodeContext *s = avctx->priv_data;
00064     uint8_t *extradata;
00065     int i;
00066     if (avctx->channels > 2)
00067         return -1; /* only stereo or mono =) */
00068 
00069     if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
00070         av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
00071         return -1;
00072     }
00073 
00074     if (avctx->trellis) {
00075         int frontier  = 1 << avctx->trellis;
00076         int max_paths =  frontier * FREEZE_INTERVAL;
00077         FF_ALLOC_OR_GOTO(avctx, s->paths,
00078                          max_paths * sizeof(*s->paths), error);
00079         FF_ALLOC_OR_GOTO(avctx, s->node_buf,
00080                          2 * frontier * sizeof(*s->node_buf),  error);
00081         FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
00082                          2 * frontier * sizeof(*s->nodep_buf), error);
00083         FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
00084                          65536 * sizeof(*s->trellis_hash), error);
00085     }
00086 
00087     avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
00088 
00089     switch (avctx->codec->id) {
00090     case CODEC_ID_ADPCM_IMA_WAV:
00091         /* each 16 bits sample gives one nibble
00092            and we have 4 bytes per channel overhead */
00093         avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
00094                             (4 * avctx->channels) + 1;
00095         /* seems frame_size isn't taken into account...
00096            have to buffer the samples :-( */
00097         avctx->block_align = BLKSIZE;
00098         break;
00099     case CODEC_ID_ADPCM_IMA_QT:
00100         avctx->frame_size  = 64;
00101         avctx->block_align = 34 * avctx->channels;
00102         break;
00103     case CODEC_ID_ADPCM_MS:
00104         /* each 16 bits sample gives one nibble
00105            and we have 7 bytes per channel overhead */
00106         avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 /
00107                              avctx->channels + 2;
00108         avctx->block_align    = BLKSIZE;
00109         avctx->extradata_size = 32;
00110         extradata = avctx->extradata = av_malloc(avctx->extradata_size);
00111         if (!extradata)
00112             return AVERROR(ENOMEM);
00113         bytestream_put_le16(&extradata, avctx->frame_size);
00114         bytestream_put_le16(&extradata, 7); /* wNumCoef */
00115         for (i = 0; i < 7; i++) {
00116             bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
00117             bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
00118         }
00119         break;
00120     case CODEC_ID_ADPCM_YAMAHA:
00121         avctx->frame_size  = BLKSIZE * avctx->channels;
00122         avctx->block_align = BLKSIZE;
00123         break;
00124     case CODEC_ID_ADPCM_SWF:
00125         if (avctx->sample_rate != 11025 &&
00126             avctx->sample_rate != 22050 &&
00127             avctx->sample_rate != 44100) {
00128             av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
00129                    "22050 or 44100\n");
00130             goto error;
00131         }
00132         avctx->frame_size = 512 * (avctx->sample_rate / 11025);
00133         break;
00134     default:
00135         goto error;
00136     }
00137 
00138     avctx->coded_frame = avcodec_alloc_frame();
00139     avctx->coded_frame->key_frame= 1;
00140 
00141     return 0;
00142 error:
00143     av_freep(&s->paths);
00144     av_freep(&s->node_buf);
00145     av_freep(&s->nodep_buf);
00146     av_freep(&s->trellis_hash);
00147     return -1;
00148 }
00149 
00150 static av_cold int adpcm_encode_close(AVCodecContext *avctx)
00151 {
00152     ADPCMEncodeContext *s = avctx->priv_data;
00153     av_freep(&avctx->coded_frame);
00154     av_freep(&s->paths);
00155     av_freep(&s->node_buf);
00156     av_freep(&s->nodep_buf);
00157     av_freep(&s->trellis_hash);
00158 
00159     return 0;
00160 }
00161 
00162 
00163 static inline unsigned char adpcm_ima_compress_sample(ADPCMChannelStatus *c,
00164                                                       short sample)
00165 {
00166     int delta  = sample - c->prev_sample;
00167     int nibble = FFMIN(7, abs(delta) * 4 /
00168                        ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
00169     c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
00170                         ff_adpcm_yamaha_difflookup[nibble]) / 8);
00171     c->prev_sample = av_clip_int16(c->prev_sample);
00172     c->step_index  = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
00173     return nibble;
00174 }
00175 
00176 static inline unsigned char adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
00177                                                          short sample)
00178 {
00179     int delta  = sample - c->prev_sample;
00180     int mask, step = ff_adpcm_step_table[c->step_index];
00181     int diff   = step >> 3;
00182     int nibble = 0;
00183 
00184     if (delta < 0) {
00185         nibble = 8;
00186         delta  = -delta;
00187     }
00188 
00189     for (mask = 4; mask;) {
00190         if (delta >= step) {
00191             nibble |= mask;
00192             delta  -= step;
00193             diff   += step;
00194         }
00195         step >>= 1;
00196         mask >>= 1;
00197     }
00198 
00199     if (nibble & 8)
00200         c->prev_sample -= diff;
00201     else
00202         c->prev_sample += diff;
00203 
00204     c->prev_sample = av_clip_int16(c->prev_sample);
00205     c->step_index  = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
00206 
00207     return nibble;
00208 }
00209 
00210 static inline unsigned char adpcm_ms_compress_sample(ADPCMChannelStatus *c,
00211                                                      short sample)
00212 {
00213     int predictor, nibble, bias;
00214 
00215     predictor = (((c->sample1) * (c->coeff1)) +
00216                 (( c->sample2) * (c->coeff2))) / 64;
00217 
00218     nibble = sample - predictor;
00219     if (nibble >= 0)
00220         bias =  c->idelta / 2;
00221     else
00222         bias = -c->idelta / 2;
00223 
00224     nibble = (nibble + bias) / c->idelta;
00225     nibble = av_clip(nibble, -8, 7) & 0x0F;
00226 
00227     predictor += (signed)((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
00228 
00229     c->sample2 = c->sample1;
00230     c->sample1 = av_clip_int16(predictor);
00231 
00232     c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
00233     if (c->idelta < 16)
00234         c->idelta = 16;
00235 
00236     return nibble;
00237 }
00238 
00239 static inline unsigned char adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
00240                                                          short sample)
00241 {
00242     int nibble, delta;
00243 
00244     if (!c->step) {
00245         c->predictor = 0;
00246         c->step      = 127;
00247     }
00248 
00249     delta = sample - c->predictor;
00250 
00251     nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
00252 
00253     c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
00254     c->predictor = av_clip_int16(c->predictor);
00255     c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
00256     c->step = av_clip(c->step, 127, 24567);
00257 
00258     return nibble;
00259 }
00260 
00261 static void adpcm_compress_trellis(AVCodecContext *avctx, const short *samples,
00262                                    uint8_t *dst, ADPCMChannelStatus *c, int n)
00263 {
00264     //FIXME 6% faster if frontier is a compile-time constant
00265     ADPCMEncodeContext *s = avctx->priv_data;
00266     const int frontier = 1 << avctx->trellis;
00267     const int stride   = avctx->channels;
00268     const int version  = avctx->codec->id;
00269     TrellisPath *paths       = s->paths, *p;
00270     TrellisNode *node_buf    = s->node_buf;
00271     TrellisNode **nodep_buf  = s->nodep_buf;
00272     TrellisNode **nodes      = nodep_buf; // nodes[] is always sorted by .ssd
00273     TrellisNode **nodes_next = nodep_buf + frontier;
00274     int pathn = 0, froze = -1, i, j, k, generation = 0;
00275     uint8_t *hash = s->trellis_hash;
00276     memset(hash, 0xff, 65536 * sizeof(*hash));
00277 
00278     memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
00279     nodes[0]          = node_buf + frontier;
00280     nodes[0]->ssd     = 0;
00281     nodes[0]->path    = 0;
00282     nodes[0]->step    = c->step_index;
00283     nodes[0]->sample1 = c->sample1;
00284     nodes[0]->sample2 = c->sample2;
00285     if (version == CODEC_ID_ADPCM_IMA_WAV ||
00286         version == CODEC_ID_ADPCM_IMA_QT  ||
00287         version == CODEC_ID_ADPCM_SWF)
00288         nodes[0]->sample1 = c->prev_sample;
00289     if (version == CODEC_ID_ADPCM_MS)
00290         nodes[0]->step = c->idelta;
00291     if (version == CODEC_ID_ADPCM_YAMAHA) {
00292         if (c->step == 0) {
00293             nodes[0]->step    = 127;
00294             nodes[0]->sample1 = 0;
00295         } else {
00296             nodes[0]->step    = c->step;
00297             nodes[0]->sample1 = c->predictor;
00298         }
00299     }
00300 
00301     for (i = 0; i < n; i++) {
00302         TrellisNode *t = node_buf + frontier*(i&1);
00303         TrellisNode **u;
00304         int sample   = samples[i * stride];
00305         int heap_pos = 0;
00306         memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
00307         for (j = 0; j < frontier && nodes[j]; j++) {
00308             // higher j have higher ssd already, so they're likely
00309             // to yield a suboptimal next sample too
00310             const int range = (j < frontier / 2) ? 1 : 0;
00311             const int step  = nodes[j]->step;
00312             int nidx;
00313             if (version == CODEC_ID_ADPCM_MS) {
00314                 const int predictor = ((nodes[j]->sample1 * c->coeff1) +
00315                                        (nodes[j]->sample2 * c->coeff2)) / 64;
00316                 const int div  = (sample - predictor) / step;
00317                 const int nmin = av_clip(div-range, -8, 6);
00318                 const int nmax = av_clip(div+range, -7, 7);
00319                 for (nidx = nmin; nidx <= nmax; nidx++) {
00320                     const int nibble = nidx & 0xf;
00321                     int dec_sample   = predictor + nidx * step;
00322 #define STORE_NODE(NAME, STEP_INDEX)\
00323                     int d;\
00324                     uint32_t ssd;\
00325                     int pos;\
00326                     TrellisNode *u;\
00327                     uint8_t *h;\
00328                     dec_sample = av_clip_int16(dec_sample);\
00329                     d = sample - dec_sample;\
00330                     ssd = nodes[j]->ssd + d*d;\
00331                     /* Check for wraparound, skip such samples completely. \
00332                      * Note, changing ssd to a 64 bit variable would be \
00333                      * simpler, avoiding this check, but it's slower on \
00334                      * x86 32 bit at the moment. */\
00335                     if (ssd < nodes[j]->ssd)\
00336                         goto next_##NAME;\
00337                     /* Collapse any two states with the same previous sample value. \
00338                      * One could also distinguish states by step and by 2nd to last
00339                      * sample, but the effects of that are negligible.
00340                      * Since nodes in the previous generation are iterated
00341                      * through a heap, they're roughly ordered from better to
00342                      * worse, but not strictly ordered. Therefore, an earlier
00343                      * node with the same sample value is better in most cases
00344                      * (and thus the current is skipped), but not strictly
00345                      * in all cases. Only skipping samples where ssd >=
00346                      * ssd of the earlier node with the same sample gives
00347                      * slightly worse quality, though, for some reason. */ \
00348                     h = &hash[(uint16_t) dec_sample];\
00349                     if (*h == generation)\
00350                         goto next_##NAME;\
00351                     if (heap_pos < frontier) {\
00352                         pos = heap_pos++;\
00353                     } else {\
00354                         /* Try to replace one of the leaf nodes with the new \
00355                          * one, but try a different slot each time. */\
00356                         pos = (frontier >> 1) +\
00357                               (heap_pos & ((frontier >> 1) - 1));\
00358                         if (ssd > nodes_next[pos]->ssd)\
00359                             goto next_##NAME;\
00360                         heap_pos++;\
00361                     }\
00362                     *h = generation;\
00363                     u  = nodes_next[pos];\
00364                     if (!u) {\
00365                         assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
00366                         u = t++;\
00367                         nodes_next[pos] = u;\
00368                         u->path = pathn++;\
00369                     }\
00370                     u->ssd  = ssd;\
00371                     u->step = STEP_INDEX;\
00372                     u->sample2 = nodes[j]->sample1;\
00373                     u->sample1 = dec_sample;\
00374                     paths[u->path].nibble = nibble;\
00375                     paths[u->path].prev   = nodes[j]->path;\
00376                     /* Sift the newly inserted node up in the heap to \
00377                      * restore the heap property. */\
00378                     while (pos > 0) {\
00379                         int parent = (pos - 1) >> 1;\
00380                         if (nodes_next[parent]->ssd <= ssd)\
00381                             break;\
00382                         FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
00383                         pos = parent;\
00384                     }\
00385                     next_##NAME:;
00386                     STORE_NODE(ms, FFMAX(16,
00387                                (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
00388                 }
00389             } else if (version == CODEC_ID_ADPCM_IMA_WAV ||
00390                        version == CODEC_ID_ADPCM_IMA_QT  ||
00391                        version == CODEC_ID_ADPCM_SWF) {
00392 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
00393                 const int predictor = nodes[j]->sample1;\
00394                 const int div = (sample - predictor) * 4 / STEP_TABLE;\
00395                 int nmin = av_clip(div - range, -7, 6);\
00396                 int nmax = av_clip(div + range, -6, 7);\
00397                 if (nmin <= 0)\
00398                     nmin--; /* distinguish -0 from +0 */\
00399                 if (nmax < 0)\
00400                     nmax--;\
00401                 for (nidx = nmin; nidx <= nmax; nidx++) {\
00402                     const int nibble = nidx < 0 ? 7 - nidx : nidx;\
00403                     int dec_sample = predictor +\
00404                                     (STEP_TABLE *\
00405                                      ff_adpcm_yamaha_difflookup[nibble]) / 8;\
00406                     STORE_NODE(NAME, STEP_INDEX);\
00407                 }
00408                 LOOP_NODES(ima, ff_adpcm_step_table[step],
00409                            av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
00410             } else { //CODEC_ID_ADPCM_YAMAHA
00411                 LOOP_NODES(yamaha, step,
00412                            av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
00413                                    127, 24567));
00414 #undef LOOP_NODES
00415 #undef STORE_NODE
00416             }
00417         }
00418 
00419         u = nodes;
00420         nodes = nodes_next;
00421         nodes_next = u;
00422 
00423         generation++;
00424         if (generation == 255) {
00425             memset(hash, 0xff, 65536 * sizeof(*hash));
00426             generation = 0;
00427         }
00428 
00429         // prevent overflow
00430         if (nodes[0]->ssd > (1 << 28)) {
00431             for (j = 1; j < frontier && nodes[j]; j++)
00432                 nodes[j]->ssd -= nodes[0]->ssd;
00433             nodes[0]->ssd = 0;
00434         }
00435 
00436         // merge old paths to save memory
00437         if (i == froze + FREEZE_INTERVAL) {
00438             p = &paths[nodes[0]->path];
00439             for (k = i; k > froze; k--) {
00440                 dst[k] = p->nibble;
00441                 p = &paths[p->prev];
00442             }
00443             froze = i;
00444             pathn = 0;
00445             // other nodes might use paths that don't coincide with the frozen one.
00446             // checking which nodes do so is too slow, so just kill them all.
00447             // this also slightly improves quality, but I don't know why.
00448             memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
00449         }
00450     }
00451 
00452     p = &paths[nodes[0]->path];
00453     for (i = n - 1; i > froze; i--) {
00454         dst[i] = p->nibble;
00455         p = &paths[p->prev];
00456     }
00457 
00458     c->predictor  = nodes[0]->sample1;
00459     c->sample1    = nodes[0]->sample1;
00460     c->sample2    = nodes[0]->sample2;
00461     c->step_index = nodes[0]->step;
00462     c->step       = nodes[0]->step;
00463     c->idelta     = nodes[0]->step;
00464 }
00465 
00466 static int adpcm_encode_frame(AVCodecContext *avctx,
00467                               unsigned char *frame, int buf_size, void *data)
00468 {
00469     int n, i, st;
00470     short *samples;
00471     unsigned char *dst;
00472     ADPCMEncodeContext *c = avctx->priv_data;
00473     uint8_t *buf;
00474 
00475     dst = frame;
00476     samples = (short *)data;
00477     st = avctx->channels == 2;
00478     /* n = (BLKSIZE - 4 * avctx->channels) / (2 * 8 * avctx->channels); */
00479 
00480     switch(avctx->codec->id) {
00481     case CODEC_ID_ADPCM_IMA_WAV:
00482         n = avctx->frame_size / 8;
00483         c->status[0].prev_sample = (signed short)samples[0]; /* XXX */
00484         /* c->status[0].step_index = 0;
00485         XXX: not sure how to init the state machine */
00486         bytestream_put_le16(&dst, c->status[0].prev_sample);
00487         *dst++ = (unsigned char)c->status[0].step_index;
00488         *dst++ = 0; /* unknown */
00489         samples++;
00490         if (avctx->channels == 2) {
00491             c->status[1].prev_sample = (signed short)samples[0];
00492             /* c->status[1].step_index = 0; */
00493             bytestream_put_le16(&dst, c->status[1].prev_sample);
00494             *dst++ = (unsigned char)c->status[1].step_index;
00495             *dst++ = 0;
00496             samples++;
00497         }
00498 
00499         /* stereo: 4 bytes (8 samples) for left,
00500             4 bytes for right, 4 bytes left, ... */
00501         if (avctx->trellis > 0) {
00502             FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 8, error);
00503             adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n * 8);
00504             if (avctx->channels == 2)
00505                 adpcm_compress_trellis(avctx, samples + 1, buf + n * 8,
00506                                        &c->status[1], n * 8);
00507             for (i = 0; i < n; i++) {
00508                 *dst++ = buf[8 * i + 0] | (buf[8 * i + 1] << 4);
00509                 *dst++ = buf[8 * i + 2] | (buf[8 * i + 3] << 4);
00510                 *dst++ = buf[8 * i + 4] | (buf[8 * i + 5] << 4);
00511                 *dst++ = buf[8 * i + 6] | (buf[8 * i + 7] << 4);
00512                 if (avctx->channels == 2) {
00513                     uint8_t *buf1 = buf + n * 8;
00514                     *dst++ = buf1[8 * i + 0] | (buf1[8 * i + 1] << 4);
00515                     *dst++ = buf1[8 * i + 2] | (buf1[8 * i + 3] << 4);
00516                     *dst++ = buf1[8 * i + 4] | (buf1[8 * i + 5] << 4);
00517                     *dst++ = buf1[8 * i + 6] | (buf1[8 * i + 7] << 4);
00518                 }
00519             }
00520             av_free(buf);
00521         } else {
00522             for (; n > 0; n--) {
00523                 *dst    = adpcm_ima_compress_sample(&c->status[0], samples[0]);
00524                 *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels    ]) << 4;
00525                 *dst    = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 2]);
00526                 *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 3]) << 4;
00527                 *dst    = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 4]);
00528                 *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 5]) << 4;
00529                 *dst    = adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 6]);
00530                 *dst++ |= adpcm_ima_compress_sample(&c->status[0], samples[avctx->channels * 7]) << 4;
00531                 /* right channel */
00532                 if (avctx->channels == 2) {
00533                     *dst    = adpcm_ima_compress_sample(&c->status[1], samples[1 ]);
00534                     *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[3 ]) << 4;
00535                     *dst    = adpcm_ima_compress_sample(&c->status[1], samples[5 ]);
00536                     *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[7 ]) << 4;
00537                     *dst    = adpcm_ima_compress_sample(&c->status[1], samples[9 ]);
00538                     *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[11]) << 4;
00539                     *dst    = adpcm_ima_compress_sample(&c->status[1], samples[13]);
00540                     *dst++ |= adpcm_ima_compress_sample(&c->status[1], samples[15]) << 4;
00541                 }
00542                 samples += 8 * avctx->channels;
00543             }
00544         }
00545         break;
00546     case CODEC_ID_ADPCM_IMA_QT:
00547     {
00548         int ch, i;
00549         PutBitContext pb;
00550         init_put_bits(&pb, dst, buf_size * 8);
00551 
00552         for (ch = 0; ch < avctx->channels; ch++) {
00553             put_bits(&pb, 9, (c->status[ch].prev_sample + 0x10000) >> 7);
00554             put_bits(&pb, 7,  c->status[ch].step_index);
00555             if (avctx->trellis > 0) {
00556                 uint8_t buf[64];
00557                 adpcm_compress_trellis(avctx, samples+ch, buf, &c->status[ch], 64);
00558                 for (i = 0; i < 64; i++)
00559                     put_bits(&pb, 4, buf[i ^ 1]);
00560             } else {
00561                 for (i = 0; i < 64; i += 2) {
00562                     int t1, t2;
00563                     t1 = adpcm_ima_qt_compress_sample(&c->status[ch],
00564                                                       samples[avctx->channels * (i + 0) + ch]);
00565                     t2 = adpcm_ima_qt_compress_sample(&c->status[ch],
00566                                                       samples[avctx->channels * (i + 1) + ch]);
00567                     put_bits(&pb, 4, t2);
00568                     put_bits(&pb, 4, t1);
00569                 }
00570             }
00571         }
00572 
00573         flush_put_bits(&pb);
00574         dst += put_bits_count(&pb) >> 3;
00575         break;
00576     }
00577     case CODEC_ID_ADPCM_SWF:
00578     {
00579         int i;
00580         PutBitContext pb;
00581         init_put_bits(&pb, dst, buf_size * 8);
00582 
00583         n = avctx->frame_size - 1;
00584 
00585         // store AdpcmCodeSize
00586         put_bits(&pb, 2, 2);    // set 4-bit flash adpcm format
00587 
00588         // init the encoder state
00589         for (i = 0; i < avctx->channels; i++) {
00590             // clip step so it fits 6 bits
00591             c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
00592             put_sbits(&pb, 16, samples[i]);
00593             put_bits(&pb, 6, c->status[i].step_index);
00594             c->status[i].prev_sample = (signed short)samples[i];
00595         }
00596 
00597         if (avctx->trellis > 0) {
00598             FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
00599             adpcm_compress_trellis(avctx, samples + 2, buf, &c->status[0], n);
00600             if (avctx->channels == 2)
00601                 adpcm_compress_trellis(avctx, samples + 3, buf + n,
00602                                        &c->status[1], n);
00603             for (i = 0; i < n; i++) {
00604                 put_bits(&pb, 4, buf[i]);
00605                 if (avctx->channels == 2)
00606                     put_bits(&pb, 4, buf[n + i]);
00607             }
00608             av_free(buf);
00609         } else {
00610             for (i = 1; i < avctx->frame_size; i++) {
00611                 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
00612                          samples[avctx->channels * i]));
00613                 if (avctx->channels == 2)
00614                     put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
00615                              samples[2 * i + 1]));
00616             }
00617         }
00618         flush_put_bits(&pb);
00619         dst += put_bits_count(&pb) >> 3;
00620         break;
00621     }
00622     case CODEC_ID_ADPCM_MS:
00623         for (i = 0; i < avctx->channels; i++) {
00624             int predictor = 0;
00625             *dst++ = predictor;
00626             c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
00627             c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
00628         }
00629         for (i = 0; i < avctx->channels; i++) {
00630             if (c->status[i].idelta < 16)
00631                 c->status[i].idelta = 16;
00632             bytestream_put_le16(&dst, c->status[i].idelta);
00633         }
00634         for (i = 0; i < avctx->channels; i++)
00635             c->status[i].sample2= *samples++;
00636         for (i = 0; i < avctx->channels; i++) {
00637             c->status[i].sample1 = *samples++;
00638             bytestream_put_le16(&dst, c->status[i].sample1);
00639         }
00640         for (i = 0; i < avctx->channels; i++)
00641             bytestream_put_le16(&dst, c->status[i].sample2);
00642 
00643         if (avctx->trellis > 0) {
00644             int n = avctx->block_align - 7 * avctx->channels;
00645             FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
00646             if (avctx->channels == 1) {
00647                 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00648                 for (i = 0; i < n; i += 2)
00649                     *dst++ = (buf[i] << 4) | buf[i + 1];
00650             } else {
00651                 adpcm_compress_trellis(avctx, samples,     buf,     &c->status[0], n);
00652                 adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
00653                 for (i = 0; i < n; i++)
00654                     *dst++ = (buf[i] << 4) | buf[n + i];
00655             }
00656             av_free(buf);
00657         } else {
00658             for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
00659                 int nibble;
00660                 nibble  = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
00661                 nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
00662                 *dst++  = nibble;
00663             }
00664         }
00665         break;
00666     case CODEC_ID_ADPCM_YAMAHA:
00667         n = avctx->frame_size / 2;
00668         if (avctx->trellis > 0) {
00669             FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
00670             n *= 2;
00671             if (avctx->channels == 1) {
00672                 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n);
00673                 for (i = 0; i < n; i += 2)
00674                     *dst++ = buf[i] | (buf[i + 1] << 4);
00675             } else {
00676                 adpcm_compress_trellis(avctx, samples,     buf,     &c->status[0], n);
00677                 adpcm_compress_trellis(avctx, samples + 1, buf + n, &c->status[1], n);
00678                 for (i = 0; i < n; i++)
00679                     *dst++ = buf[i] | (buf[n + i] << 4);
00680             }
00681             av_free(buf);
00682         } else
00683             for (n *= avctx->channels; n > 0; n--) {
00684                 int nibble;
00685                 nibble  = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
00686                 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
00687                 *dst++  = nibble;
00688             }
00689         break;
00690     default:
00691     error:
00692         return -1;
00693     }
00694     return dst - frame;
00695 }
00696 
00697 
00698 #define ADPCM_ENCODER(id_, name_, long_name_)               \
00699 AVCodec ff_ ## name_ ## _encoder = {                        \
00700     .name           = #name_,                               \
00701     .type           = AVMEDIA_TYPE_AUDIO,                   \
00702     .id             = id_,                                  \
00703     .priv_data_size = sizeof(ADPCMEncodeContext),           \
00704     .init           = adpcm_encode_init,                    \
00705     .encode         = adpcm_encode_frame,                   \
00706     .close          = adpcm_encode_close,                   \
00707     .sample_fmts    = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,   \
00708                                                       AV_SAMPLE_FMT_NONE}, \
00709     .long_name      = NULL_IF_CONFIG_SMALL(long_name_),     \
00710 }
00711 
00712 ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt,   "ADPCM IMA QuickTime");
00713 ADPCM_ENCODER(CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV");
00714 ADPCM_ENCODER(CODEC_ID_ADPCM_MS, adpcm_ms,           "ADPCM Microsoft");
00715 ADPCM_ENCODER(CODEC_ID_ADPCM_SWF, adpcm_swf,         "ADPCM Shockwave Flash");
00716 ADPCM_ENCODER(CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha,   "ADPCM Yamaha");