1 /* libFLAC - Free Lossless Audio Codec library
2 * Copyright (C) 2000,2001,2002,2003,2004,2005,2006,2007 Josh Coalson
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * - Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * - Neither the name of the Xiph.org Foundation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "flac/assert.h"
38 #include "flac/format.h"
39 #include "private/bitmath.h"
40 #include "private/lpc.h"
41 #if defined DEBUG || defined FLAC__OVERFLOW_DETECT || defined FLAC__OVERFLOW_DETECT_VERBOSE
45 #ifndef FLAC__INTEGER_ONLY_LIBRARY
48 /* math.h in VC++ doesn't seem to have this (how Microsoft is that?) */
49 #define M_LN2 0.69314718055994530942
52 /* OPT: #undef'ing this may improve the speed on some architectures */
53 #define FLAC__LPC_UNROLLED_FILTER_LOOPS
56 void FLAC__lpc_window_data(const FLAC__int32 in[], const FLAC__real window[], FLAC__real out[], unsigned data_len)
59 for(i = 0; i < data_len; i++)
60 out[i] = in[i] * window[i];
63 void FLAC__lpc_compute_autocorrelation(const FLAC__real data[], unsigned data_len, unsigned lag, FLAC__real autoc[])
65 /* a readable, but slower, version */
70 FLAC__ASSERT(lag > 0);
71 FLAC__ASSERT(lag <= data_len);
74 * Technically we should subtract the mean first like so:
75 * for(i = 0; i < data_len; i++)
77 * but it appears not to make enough of a difference to matter, and
78 * most signals are already closely centered around zero
81 for(i = lag, d = 0.0; i < data_len; i++)
82 d += data[i] * data[i - lag];
88 * this version tends to run faster because of better data locality
89 * ('data_len' is usually much larger than 'lag')
92 unsigned sample, coeff;
93 const unsigned limit = data_len - lag;
95 FLAC__ASSERT(lag > 0);
96 FLAC__ASSERT(lag <= data_len);
98 for(coeff = 0; coeff < lag; coeff++)
100 for(sample = 0; sample <= limit; sample++) {
102 for(coeff = 0; coeff < lag; coeff++)
103 autoc[coeff] += d * data[sample+coeff];
105 for(; sample < data_len; sample++) {
107 for(coeff = 0; coeff < data_len - sample; coeff++)
108 autoc[coeff] += d * data[sample+coeff];
112 void FLAC__lpc_compute_lp_coefficients(const FLAC__real autoc[], unsigned *max_order, FLAC__real lp_coeff[][FLAC__MAX_LPC_ORDER], FLAC__double error[])
115 FLAC__double r, err, ref[FLAC__MAX_LPC_ORDER], lpc[FLAC__MAX_LPC_ORDER];
117 FLAC__ASSERT(0 != max_order);
118 FLAC__ASSERT(0 < *max_order);
119 FLAC__ASSERT(*max_order <= FLAC__MAX_LPC_ORDER);
120 FLAC__ASSERT(autoc[0] != 0.0);
124 for(i = 0; i < *max_order; i++) {
125 /* Sum up this iteration's reflection coefficient. */
127 for(j = 0; j < i; j++)
128 r -= lpc[j] * autoc[i-j];
131 /* Update LPC coefficients and total error. */
133 for(j = 0; j < (i>>1); j++) {
134 FLAC__double tmp = lpc[j];
135 lpc[j] += r * lpc[i-1-j];
136 lpc[i-1-j] += r * tmp;
139 lpc[j] += lpc[j] * r;
141 err *= (1.0 - r * r);
143 /* save this order */
144 for(j = 0; j <= i; j++)
145 lp_coeff[i][j] = (FLAC__real)(-lpc[j]); /* negate FIR filter coeff to get predictor coeff */
148 /* see SF bug #1601812 http://sourceforge.net/tracker/index.php?func=detail&aid=1601812&group_id=13478&atid=113478 */
156 int FLAC__lpc_quantize_coefficients(const FLAC__real lp_coeff[], unsigned order, unsigned precision, FLAC__int32 qlp_coeff[], int *shift)
160 FLAC__int32 qmax, qmin;
162 FLAC__ASSERT(precision > 0);
163 FLAC__ASSERT(precision >= FLAC__MIN_QLP_COEFF_PRECISION);
165 /* drop one bit for the sign; from here on out we consider only |lp_coeff[i]| */
167 qmax = 1 << precision;
171 /* calc cmax = max( |lp_coeff[i]| ) */
173 for(i = 0; i < order; i++) {
174 const FLAC__double d = fabs(lp_coeff[i]);
180 /* => coefficients are all 0, which means our constant-detect didn't work */
184 const int max_shiftlimit = (1 << (FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN-1)) - 1;
185 const int min_shiftlimit = -max_shiftlimit - 1;
188 (void)frexp(cmax, &log2cmax);
190 *shift = (int)precision - log2cmax - 1;
192 if(*shift > max_shiftlimit)
193 *shift = max_shiftlimit;
194 else if(*shift < min_shiftlimit)
199 FLAC__double error = 0.0;
201 for(i = 0; i < order; i++) {
202 error += lp_coeff[i] * (1 << *shift);
203 #if 1 /* unfortunately lround() is C99 */
205 q = (FLAC__int32)(error + 0.5);
207 q = (FLAC__int32)(error - 0.5);
211 #ifdef FLAC__OVERFLOW_DETECT
212 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
213 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
215 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
225 /* negative shift is very rare but due to design flaw, negative shift is
226 * a NOP in the decoder, so it must be handled specially by scaling down
230 const int nshift = -(*shift);
231 FLAC__double error = 0.0;
234 fprintf(stderr,"FLAC__lpc_quantize_coefficients: negative shift=%d order=%u cmax=%f\n", *shift, order, cmax);
236 for(i = 0; i < order; i++) {
237 error += lp_coeff[i] / (1 << nshift);
238 #if 1 /* unfortunately lround() is C99 */
240 q = (FLAC__int32)(error + 0.5);
242 q = (FLAC__int32)(error - 0.5);
246 #ifdef FLAC__OVERFLOW_DETECT
247 if(q > qmax+1) /* we expect q==qmax+1 occasionally due to rounding */
248 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q>qmax %d>%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmax,*shift,cmax,precision+1,i,lp_coeff[i]);
250 fprintf(stderr,"FLAC__lpc_quantize_coefficients: quantizer overflow: q<qmin %d<%d shift=%d cmax=%f precision=%u lpc[%u]=%f\n",q,qmin,*shift,cmax,precision+1,i,lp_coeff[i]);
265 void FLAC__lpc_compute_residual_from_qlp_coefficients(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
266 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
271 const FLAC__int32 *history;
273 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
274 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
276 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
277 fprintf(stderr,"\n");
279 FLAC__ASSERT(order > 0);
281 for(i = 0; i < data_len; i++) {
285 for(j = 0; j < order; j++) {
286 sum += qlp_coeff[j] * (*(--history));
287 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
289 if(sumo > 2147483647I64 || sumo < -2147483648I64)
290 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%I64d\n",i,j,qlp_coeff[j],*history,sumo);
292 if(sumo > 2147483647ll || sumo < -2147483648ll)
293 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%lld\n",i,j,qlp_coeff[j],*history,(long long)sumo);
296 *(residual++) = *(data++) - (sum >> lp_quantization);
299 /* Here's a slower but clearer version:
300 for(i = 0; i < data_len; i++) {
302 for(j = 0; j < order; j++)
303 sum += qlp_coeff[j] * data[i-j-1];
304 residual[i] = data[i] - (sum >> lp_quantization);
308 #else /* fully unrolled version for normal use */
313 FLAC__ASSERT(order > 0);
314 FLAC__ASSERT(order <= 32);
317 * We do unique versions up to 12th order since that's the subset limit.
318 * Also they are roughly ordered to match frequency of occurrence to
319 * minimize branching.
325 for(i = 0; i < (int)data_len; i++) {
327 sum += qlp_coeff[11] * data[i-12];
328 sum += qlp_coeff[10] * data[i-11];
329 sum += qlp_coeff[9] * data[i-10];
330 sum += qlp_coeff[8] * data[i-9];
331 sum += qlp_coeff[7] * data[i-8];
332 sum += qlp_coeff[6] * data[i-7];
333 sum += qlp_coeff[5] * data[i-6];
334 sum += qlp_coeff[4] * data[i-5];
335 sum += qlp_coeff[3] * data[i-4];
336 sum += qlp_coeff[2] * data[i-3];
337 sum += qlp_coeff[1] * data[i-2];
338 sum += qlp_coeff[0] * data[i-1];
339 residual[i] = data[i] - (sum >> lp_quantization);
342 else { /* order == 11 */
343 for(i = 0; i < (int)data_len; i++) {
345 sum += qlp_coeff[10] * data[i-11];
346 sum += qlp_coeff[9] * data[i-10];
347 sum += qlp_coeff[8] * data[i-9];
348 sum += qlp_coeff[7] * data[i-8];
349 sum += qlp_coeff[6] * data[i-7];
350 sum += qlp_coeff[5] * data[i-6];
351 sum += qlp_coeff[4] * data[i-5];
352 sum += qlp_coeff[3] * data[i-4];
353 sum += qlp_coeff[2] * data[i-3];
354 sum += qlp_coeff[1] * data[i-2];
355 sum += qlp_coeff[0] * data[i-1];
356 residual[i] = data[i] - (sum >> lp_quantization);
362 for(i = 0; i < (int)data_len; i++) {
364 sum += qlp_coeff[9] * data[i-10];
365 sum += qlp_coeff[8] * data[i-9];
366 sum += qlp_coeff[7] * data[i-8];
367 sum += qlp_coeff[6] * data[i-7];
368 sum += qlp_coeff[5] * data[i-6];
369 sum += qlp_coeff[4] * data[i-5];
370 sum += qlp_coeff[3] * data[i-4];
371 sum += qlp_coeff[2] * data[i-3];
372 sum += qlp_coeff[1] * data[i-2];
373 sum += qlp_coeff[0] * data[i-1];
374 residual[i] = data[i] - (sum >> lp_quantization);
377 else { /* order == 9 */
378 for(i = 0; i < (int)data_len; i++) {
380 sum += qlp_coeff[8] * data[i-9];
381 sum += qlp_coeff[7] * data[i-8];
382 sum += qlp_coeff[6] * data[i-7];
383 sum += qlp_coeff[5] * data[i-6];
384 sum += qlp_coeff[4] * data[i-5];
385 sum += qlp_coeff[3] * data[i-4];
386 sum += qlp_coeff[2] * data[i-3];
387 sum += qlp_coeff[1] * data[i-2];
388 sum += qlp_coeff[0] * data[i-1];
389 residual[i] = data[i] - (sum >> lp_quantization);
397 for(i = 0; i < (int)data_len; i++) {
399 sum += qlp_coeff[7] * data[i-8];
400 sum += qlp_coeff[6] * data[i-7];
401 sum += qlp_coeff[5] * data[i-6];
402 sum += qlp_coeff[4] * data[i-5];
403 sum += qlp_coeff[3] * data[i-4];
404 sum += qlp_coeff[2] * data[i-3];
405 sum += qlp_coeff[1] * data[i-2];
406 sum += qlp_coeff[0] * data[i-1];
407 residual[i] = data[i] - (sum >> lp_quantization);
410 else { /* order == 7 */
411 for(i = 0; i < (int)data_len; i++) {
413 sum += qlp_coeff[6] * data[i-7];
414 sum += qlp_coeff[5] * data[i-6];
415 sum += qlp_coeff[4] * data[i-5];
416 sum += qlp_coeff[3] * data[i-4];
417 sum += qlp_coeff[2] * data[i-3];
418 sum += qlp_coeff[1] * data[i-2];
419 sum += qlp_coeff[0] * data[i-1];
420 residual[i] = data[i] - (sum >> lp_quantization);
426 for(i = 0; i < (int)data_len; i++) {
428 sum += qlp_coeff[5] * data[i-6];
429 sum += qlp_coeff[4] * data[i-5];
430 sum += qlp_coeff[3] * data[i-4];
431 sum += qlp_coeff[2] * data[i-3];
432 sum += qlp_coeff[1] * data[i-2];
433 sum += qlp_coeff[0] * data[i-1];
434 residual[i] = data[i] - (sum >> lp_quantization);
437 else { /* order == 5 */
438 for(i = 0; i < (int)data_len; i++) {
440 sum += qlp_coeff[4] * data[i-5];
441 sum += qlp_coeff[3] * data[i-4];
442 sum += qlp_coeff[2] * data[i-3];
443 sum += qlp_coeff[1] * data[i-2];
444 sum += qlp_coeff[0] * data[i-1];
445 residual[i] = data[i] - (sum >> lp_quantization);
453 for(i = 0; i < (int)data_len; i++) {
455 sum += qlp_coeff[3] * data[i-4];
456 sum += qlp_coeff[2] * data[i-3];
457 sum += qlp_coeff[1] * data[i-2];
458 sum += qlp_coeff[0] * data[i-1];
459 residual[i] = data[i] - (sum >> lp_quantization);
462 else { /* order == 3 */
463 for(i = 0; i < (int)data_len; i++) {
465 sum += qlp_coeff[2] * data[i-3];
466 sum += qlp_coeff[1] * data[i-2];
467 sum += qlp_coeff[0] * data[i-1];
468 residual[i] = data[i] - (sum >> lp_quantization);
474 for(i = 0; i < (int)data_len; i++) {
476 sum += qlp_coeff[1] * data[i-2];
477 sum += qlp_coeff[0] * data[i-1];
478 residual[i] = data[i] - (sum >> lp_quantization);
481 else { /* order == 1 */
482 for(i = 0; i < (int)data_len; i++)
483 residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
488 else { /* order > 12 */
489 for(i = 0; i < (int)data_len; i++) {
492 case 32: sum += qlp_coeff[31] * data[i-32];
493 case 31: sum += qlp_coeff[30] * data[i-31];
494 case 30: sum += qlp_coeff[29] * data[i-30];
495 case 29: sum += qlp_coeff[28] * data[i-29];
496 case 28: sum += qlp_coeff[27] * data[i-28];
497 case 27: sum += qlp_coeff[26] * data[i-27];
498 case 26: sum += qlp_coeff[25] * data[i-26];
499 case 25: sum += qlp_coeff[24] * data[i-25];
500 case 24: sum += qlp_coeff[23] * data[i-24];
501 case 23: sum += qlp_coeff[22] * data[i-23];
502 case 22: sum += qlp_coeff[21] * data[i-22];
503 case 21: sum += qlp_coeff[20] * data[i-21];
504 case 20: sum += qlp_coeff[19] * data[i-20];
505 case 19: sum += qlp_coeff[18] * data[i-19];
506 case 18: sum += qlp_coeff[17] * data[i-18];
507 case 17: sum += qlp_coeff[16] * data[i-17];
508 case 16: sum += qlp_coeff[15] * data[i-16];
509 case 15: sum += qlp_coeff[14] * data[i-15];
510 case 14: sum += qlp_coeff[13] * data[i-14];
511 case 13: sum += qlp_coeff[12] * data[i-13];
512 sum += qlp_coeff[11] * data[i-12];
513 sum += qlp_coeff[10] * data[i-11];
514 sum += qlp_coeff[ 9] * data[i-10];
515 sum += qlp_coeff[ 8] * data[i- 9];
516 sum += qlp_coeff[ 7] * data[i- 8];
517 sum += qlp_coeff[ 6] * data[i- 7];
518 sum += qlp_coeff[ 5] * data[i- 6];
519 sum += qlp_coeff[ 4] * data[i- 5];
520 sum += qlp_coeff[ 3] * data[i- 4];
521 sum += qlp_coeff[ 2] * data[i- 3];
522 sum += qlp_coeff[ 1] * data[i- 2];
523 sum += qlp_coeff[ 0] * data[i- 1];
525 residual[i] = data[i] - (sum >> lp_quantization);
531 void FLAC__lpc_compute_residual_from_qlp_coefficients_wide(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
532 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
536 const FLAC__int32 *history;
538 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
539 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
541 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
542 fprintf(stderr,"\n");
544 FLAC__ASSERT(order > 0);
546 for(i = 0; i < data_len; i++) {
549 for(j = 0; j < order; j++)
550 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
551 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
553 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%I64d\n", i, sum >> lp_quantization);
555 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, sum=%lld\n", i, (long long)(sum >> lp_quantization));
559 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*data) - (sum >> lp_quantization)) > 32) {
561 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%I64d, residual=%I64d\n", i, *data, sum >> lp_quantization, (FLAC__int64)(*data) - (sum >> lp_quantization));
563 fprintf(stderr,"FLAC__lpc_compute_residual_from_qlp_coefficients_wide: OVERFLOW, i=%u, data=%d, sum=%lld, residual=%lld\n", i, *data, (long long)(sum >> lp_quantization), (long long)((FLAC__int64)(*data) - (sum >> lp_quantization)));
567 *(residual++) = *(data++) - (FLAC__int32)(sum >> lp_quantization);
570 #else /* fully unrolled version for normal use */
575 FLAC__ASSERT(order > 0);
576 FLAC__ASSERT(order <= 32);
579 * We do unique versions up to 12th order since that's the subset limit.
580 * Also they are roughly ordered to match frequency of occurrence to
581 * minimize branching.
587 for(i = 0; i < (int)data_len; i++) {
589 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
590 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
591 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
592 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
593 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
594 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
595 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
596 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
597 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
598 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
599 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
600 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
601 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
604 else { /* order == 11 */
605 for(i = 0; i < (int)data_len; i++) {
607 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
608 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
609 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
610 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
611 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
612 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
613 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
614 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
615 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
616 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
617 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
618 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
624 for(i = 0; i < (int)data_len; i++) {
626 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
627 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
628 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
629 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
630 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
631 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
632 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
633 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
634 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
635 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
636 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
639 else { /* order == 9 */
640 for(i = 0; i < (int)data_len; i++) {
642 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
643 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
644 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
645 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
646 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
647 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
648 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
649 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
650 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
651 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
659 for(i = 0; i < (int)data_len; i++) {
661 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
662 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
663 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
664 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
665 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
666 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
667 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
668 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
669 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
672 else { /* order == 7 */
673 for(i = 0; i < (int)data_len; i++) {
675 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
676 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
677 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
678 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
679 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
680 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
681 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
682 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
688 for(i = 0; i < (int)data_len; i++) {
690 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
691 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
692 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
693 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
694 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
695 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
696 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
699 else { /* order == 5 */
700 for(i = 0; i < (int)data_len; i++) {
702 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
703 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
704 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
705 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
706 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
707 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
715 for(i = 0; i < (int)data_len; i++) {
717 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
718 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
719 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
720 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
721 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
724 else { /* order == 3 */
725 for(i = 0; i < (int)data_len; i++) {
727 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
728 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
729 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
730 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
736 for(i = 0; i < (int)data_len; i++) {
738 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
739 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
740 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
743 else { /* order == 1 */
744 for(i = 0; i < (int)data_len; i++)
745 residual[i] = data[i] - (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
750 else { /* order > 12 */
751 for(i = 0; i < (int)data_len; i++) {
754 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
755 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
756 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
757 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
758 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
759 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
760 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
761 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
762 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
763 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
764 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
765 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
766 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
767 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
768 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
769 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
770 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
771 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
772 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
773 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
774 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
775 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
776 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
777 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
778 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
779 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
780 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
781 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
782 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
783 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
784 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
785 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
787 residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
793 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */
795 void FLAC__lpc_restore_signal(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
796 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
801 const FLAC__int32 *r = residual, *history;
803 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
804 fprintf(stderr,"FLAC__lpc_restore_signal: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
806 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
807 fprintf(stderr,"\n");
809 FLAC__ASSERT(order > 0);
811 for(i = 0; i < data_len; i++) {
815 for(j = 0; j < order; j++) {
816 sum += qlp_coeff[j] * (*(--history));
817 sumo += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*history);
819 if(sumo > 2147483647I64 || sumo < -2147483648I64)
820 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%I64d\n",i,j,qlp_coeff[j],*history,sumo);
822 if(sumo > 2147483647ll || sumo < -2147483648ll)
823 fprintf(stderr,"FLAC__lpc_restore_signal: OVERFLOW, i=%u, j=%u, c=%d, d=%d, sumo=%lld\n",i,j,qlp_coeff[j],*history,(long long)sumo);
826 *(data++) = *(r++) + (sum >> lp_quantization);
829 /* Here's a slower but clearer version:
830 for(i = 0; i < data_len; i++) {
832 for(j = 0; j < order; j++)
833 sum += qlp_coeff[j] * data[i-j-1];
834 data[i] = residual[i] + (sum >> lp_quantization);
838 #else /* fully unrolled version for normal use */
843 FLAC__ASSERT(order > 0);
844 FLAC__ASSERT(order <= 32);
847 * We do unique versions up to 12th order since that's the subset limit.
848 * Also they are roughly ordered to match frequency of occurrence to
849 * minimize branching.
855 for(i = 0; i < (int)data_len; i++) {
857 sum += qlp_coeff[11] * data[i-12];
858 sum += qlp_coeff[10] * data[i-11];
859 sum += qlp_coeff[9] * data[i-10];
860 sum += qlp_coeff[8] * data[i-9];
861 sum += qlp_coeff[7] * data[i-8];
862 sum += qlp_coeff[6] * data[i-7];
863 sum += qlp_coeff[5] * data[i-6];
864 sum += qlp_coeff[4] * data[i-5];
865 sum += qlp_coeff[3] * data[i-4];
866 sum += qlp_coeff[2] * data[i-3];
867 sum += qlp_coeff[1] * data[i-2];
868 sum += qlp_coeff[0] * data[i-1];
869 data[i] = residual[i] + (sum >> lp_quantization);
872 else { /* order == 11 */
873 for(i = 0; i < (int)data_len; i++) {
875 sum += qlp_coeff[10] * data[i-11];
876 sum += qlp_coeff[9] * data[i-10];
877 sum += qlp_coeff[8] * data[i-9];
878 sum += qlp_coeff[7] * data[i-8];
879 sum += qlp_coeff[6] * data[i-7];
880 sum += qlp_coeff[5] * data[i-6];
881 sum += qlp_coeff[4] * data[i-5];
882 sum += qlp_coeff[3] * data[i-4];
883 sum += qlp_coeff[2] * data[i-3];
884 sum += qlp_coeff[1] * data[i-2];
885 sum += qlp_coeff[0] * data[i-1];
886 data[i] = residual[i] + (sum >> lp_quantization);
892 for(i = 0; i < (int)data_len; i++) {
894 sum += qlp_coeff[9] * data[i-10];
895 sum += qlp_coeff[8] * data[i-9];
896 sum += qlp_coeff[7] * data[i-8];
897 sum += qlp_coeff[6] * data[i-7];
898 sum += qlp_coeff[5] * data[i-6];
899 sum += qlp_coeff[4] * data[i-5];
900 sum += qlp_coeff[3] * data[i-4];
901 sum += qlp_coeff[2] * data[i-3];
902 sum += qlp_coeff[1] * data[i-2];
903 sum += qlp_coeff[0] * data[i-1];
904 data[i] = residual[i] + (sum >> lp_quantization);
907 else { /* order == 9 */
908 for(i = 0; i < (int)data_len; i++) {
910 sum += qlp_coeff[8] * data[i-9];
911 sum += qlp_coeff[7] * data[i-8];
912 sum += qlp_coeff[6] * data[i-7];
913 sum += qlp_coeff[5] * data[i-6];
914 sum += qlp_coeff[4] * data[i-5];
915 sum += qlp_coeff[3] * data[i-4];
916 sum += qlp_coeff[2] * data[i-3];
917 sum += qlp_coeff[1] * data[i-2];
918 sum += qlp_coeff[0] * data[i-1];
919 data[i] = residual[i] + (sum >> lp_quantization);
927 for(i = 0; i < (int)data_len; i++) {
929 sum += qlp_coeff[7] * data[i-8];
930 sum += qlp_coeff[6] * data[i-7];
931 sum += qlp_coeff[5] * data[i-6];
932 sum += qlp_coeff[4] * data[i-5];
933 sum += qlp_coeff[3] * data[i-4];
934 sum += qlp_coeff[2] * data[i-3];
935 sum += qlp_coeff[1] * data[i-2];
936 sum += qlp_coeff[0] * data[i-1];
937 data[i] = residual[i] + (sum >> lp_quantization);
940 else { /* order == 7 */
941 for(i = 0; i < (int)data_len; i++) {
943 sum += qlp_coeff[6] * data[i-7];
944 sum += qlp_coeff[5] * data[i-6];
945 sum += qlp_coeff[4] * data[i-5];
946 sum += qlp_coeff[3] * data[i-4];
947 sum += qlp_coeff[2] * data[i-3];
948 sum += qlp_coeff[1] * data[i-2];
949 sum += qlp_coeff[0] * data[i-1];
950 data[i] = residual[i] + (sum >> lp_quantization);
956 for(i = 0; i < (int)data_len; i++) {
958 sum += qlp_coeff[5] * data[i-6];
959 sum += qlp_coeff[4] * data[i-5];
960 sum += qlp_coeff[3] * data[i-4];
961 sum += qlp_coeff[2] * data[i-3];
962 sum += qlp_coeff[1] * data[i-2];
963 sum += qlp_coeff[0] * data[i-1];
964 data[i] = residual[i] + (sum >> lp_quantization);
967 else { /* order == 5 */
968 for(i = 0; i < (int)data_len; i++) {
970 sum += qlp_coeff[4] * data[i-5];
971 sum += qlp_coeff[3] * data[i-4];
972 sum += qlp_coeff[2] * data[i-3];
973 sum += qlp_coeff[1] * data[i-2];
974 sum += qlp_coeff[0] * data[i-1];
975 data[i] = residual[i] + (sum >> lp_quantization);
983 for(i = 0; i < (int)data_len; i++) {
985 sum += qlp_coeff[3] * data[i-4];
986 sum += qlp_coeff[2] * data[i-3];
987 sum += qlp_coeff[1] * data[i-2];
988 sum += qlp_coeff[0] * data[i-1];
989 data[i] = residual[i] + (sum >> lp_quantization);
992 else { /* order == 3 */
993 for(i = 0; i < (int)data_len; i++) {
995 sum += qlp_coeff[2] * data[i-3];
996 sum += qlp_coeff[1] * data[i-2];
997 sum += qlp_coeff[0] * data[i-1];
998 data[i] = residual[i] + (sum >> lp_quantization);
1004 for(i = 0; i < (int)data_len; i++) {
1006 sum += qlp_coeff[1] * data[i-2];
1007 sum += qlp_coeff[0] * data[i-1];
1008 data[i] = residual[i] + (sum >> lp_quantization);
1011 else { /* order == 1 */
1012 for(i = 0; i < (int)data_len; i++)
1013 data[i] = residual[i] + ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
1018 else { /* order > 12 */
1019 for(i = 0; i < (int)data_len; i++) {
1022 case 32: sum += qlp_coeff[31] * data[i-32];
1023 case 31: sum += qlp_coeff[30] * data[i-31];
1024 case 30: sum += qlp_coeff[29] * data[i-30];
1025 case 29: sum += qlp_coeff[28] * data[i-29];
1026 case 28: sum += qlp_coeff[27] * data[i-28];
1027 case 27: sum += qlp_coeff[26] * data[i-27];
1028 case 26: sum += qlp_coeff[25] * data[i-26];
1029 case 25: sum += qlp_coeff[24] * data[i-25];
1030 case 24: sum += qlp_coeff[23] * data[i-24];
1031 case 23: sum += qlp_coeff[22] * data[i-23];
1032 case 22: sum += qlp_coeff[21] * data[i-22];
1033 case 21: sum += qlp_coeff[20] * data[i-21];
1034 case 20: sum += qlp_coeff[19] * data[i-20];
1035 case 19: sum += qlp_coeff[18] * data[i-19];
1036 case 18: sum += qlp_coeff[17] * data[i-18];
1037 case 17: sum += qlp_coeff[16] * data[i-17];
1038 case 16: sum += qlp_coeff[15] * data[i-16];
1039 case 15: sum += qlp_coeff[14] * data[i-15];
1040 case 14: sum += qlp_coeff[13] * data[i-14];
1041 case 13: sum += qlp_coeff[12] * data[i-13];
1042 sum += qlp_coeff[11] * data[i-12];
1043 sum += qlp_coeff[10] * data[i-11];
1044 sum += qlp_coeff[ 9] * data[i-10];
1045 sum += qlp_coeff[ 8] * data[i- 9];
1046 sum += qlp_coeff[ 7] * data[i- 8];
1047 sum += qlp_coeff[ 6] * data[i- 7];
1048 sum += qlp_coeff[ 5] * data[i- 6];
1049 sum += qlp_coeff[ 4] * data[i- 5];
1050 sum += qlp_coeff[ 3] * data[i- 4];
1051 sum += qlp_coeff[ 2] * data[i- 3];
1052 sum += qlp_coeff[ 1] * data[i- 2];
1053 sum += qlp_coeff[ 0] * data[i- 1];
1055 data[i] = residual[i] + (sum >> lp_quantization);
1061 void FLAC__lpc_restore_signal_wide(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
1062 #if defined(FLAC__OVERFLOW_DETECT) || !defined(FLAC__LPC_UNROLLED_FILTER_LOOPS)
1066 const FLAC__int32 *r = residual, *history;
1068 #ifdef FLAC__OVERFLOW_DETECT_VERBOSE
1069 fprintf(stderr,"FLAC__lpc_restore_signal_wide: data_len=%d, order=%u, lpq=%d",data_len,order,lp_quantization);
1070 for(i=0;i<order;i++)
1071 fprintf(stderr,", q[%u]=%d",i,qlp_coeff[i]);
1072 fprintf(stderr,"\n");
1074 FLAC__ASSERT(order > 0);
1076 for(i = 0; i < data_len; i++) {
1079 for(j = 0; j < order; j++)
1080 sum += (FLAC__int64)qlp_coeff[j] * (FLAC__int64)(*(--history));
1081 if(FLAC__bitmath_silog2_wide(sum >> lp_quantization) > 32) {
1083 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%I64d\n", i, sum >> lp_quantization);
1085 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, sum=%lld\n", i, (long long)(sum >> lp_quantization));
1089 if(FLAC__bitmath_silog2_wide((FLAC__int64)(*r) + (sum >> lp_quantization)) > 32) {
1091 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%I64d, data=%I64d\n", i, *r, sum >> lp_quantization, (FLAC__int64)(*r) + (sum >> lp_quantization));
1093 fprintf(stderr,"FLAC__lpc_restore_signal_wide: OVERFLOW, i=%u, residual=%d, sum=%lld, data=%lld\n", i, *r, (long long)(sum >> lp_quantization), (long long)((FLAC__int64)(*r) + (sum >> lp_quantization)));
1097 *(data++) = *(r++) + (FLAC__int32)(sum >> lp_quantization);
1100 #else /* fully unrolled version for normal use */
1105 FLAC__ASSERT(order > 0);
1106 FLAC__ASSERT(order <= 32);
1109 * We do unique versions up to 12th order since that's the subset limit.
1110 * Also they are roughly ordered to match frequency of occurrence to
1111 * minimize branching.
1117 for(i = 0; i < (int)data_len; i++) {
1119 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1120 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1121 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1122 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1123 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1124 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1125 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1126 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1127 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1128 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1129 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1130 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1131 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1134 else { /* order == 11 */
1135 for(i = 0; i < (int)data_len; i++) {
1137 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1138 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1139 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1140 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1141 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1142 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1143 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1144 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1145 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1146 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1147 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1148 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1154 for(i = 0; i < (int)data_len; i++) {
1156 sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
1157 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1158 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1159 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1160 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1161 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1162 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1163 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1164 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1165 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1166 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1169 else { /* order == 9 */
1170 for(i = 0; i < (int)data_len; i++) {
1172 sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
1173 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1174 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1175 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1176 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1177 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1178 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1179 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1180 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1181 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1186 else if(order > 4) {
1189 for(i = 0; i < (int)data_len; i++) {
1191 sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
1192 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1193 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1194 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1195 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1196 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1197 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1198 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1199 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1202 else { /* order == 7 */
1203 for(i = 0; i < (int)data_len; i++) {
1205 sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
1206 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1207 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1208 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1209 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1210 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1211 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1212 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1218 for(i = 0; i < (int)data_len; i++) {
1220 sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
1221 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1222 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1223 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1224 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1225 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1226 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1229 else { /* order == 5 */
1230 for(i = 0; i < (int)data_len; i++) {
1232 sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
1233 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1234 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1235 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1236 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1237 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1245 for(i = 0; i < (int)data_len; i++) {
1247 sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
1248 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1249 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1250 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1251 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1254 else { /* order == 3 */
1255 for(i = 0; i < (int)data_len; i++) {
1257 sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
1258 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1259 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1260 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1266 for(i = 0; i < (int)data_len; i++) {
1268 sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
1269 sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
1270 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1273 else { /* order == 1 */
1274 for(i = 0; i < (int)data_len; i++)
1275 data[i] = residual[i] + (FLAC__int32)((qlp_coeff[0] * (FLAC__int64)data[i-1]) >> lp_quantization);
1280 else { /* order > 12 */
1281 for(i = 0; i < (int)data_len; i++) {
1284 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32];
1285 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31];
1286 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30];
1287 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29];
1288 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28];
1289 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27];
1290 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26];
1291 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25];
1292 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24];
1293 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23];
1294 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22];
1295 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21];
1296 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20];
1297 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19];
1298 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18];
1299 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17];
1300 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16];
1301 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15];
1302 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14];
1303 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
1304 sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
1305 sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
1306 sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
1307 sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
1308 sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
1309 sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
1310 sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
1311 sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
1312 sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
1313 sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
1314 sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
1315 sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
1317 data[i] = residual[i] + (FLAC__int32)(sum >> lp_quantization);
1323 #ifndef FLAC__INTEGER_ONLY_LIBRARY
1325 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample(FLAC__double lpc_error, unsigned total_samples)
1327 FLAC__double error_scale;
1329 FLAC__ASSERT(total_samples > 0);
1331 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1333 return FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error, error_scale);
1336 FLAC__double FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(FLAC__double lpc_error, FLAC__double error_scale)
1338 if(lpc_error > 0.0) {
1339 FLAC__double bps = (FLAC__double)0.5 * log(error_scale * lpc_error) / M_LN2;
1345 else if(lpc_error < 0.0) { /* error should not be negative but can happen due to inadequate floating-point resolution */
1353 unsigned FLAC__lpc_compute_best_order(const FLAC__double lpc_error[], unsigned max_order, unsigned total_samples, unsigned overhead_bits_per_order)
1355 unsigned order, index, best_index; /* 'index' the index into lpc_error; index==order-1 since lpc_error[0] is for order==1, lpc_error[1] is for order==2, etc */
1356 FLAC__double bits, best_bits, error_scale;
1358 FLAC__ASSERT(max_order > 0);
1359 FLAC__ASSERT(total_samples > 0);
1361 error_scale = 0.5 * M_LN2 * M_LN2 / (FLAC__double)total_samples;
1364 best_bits = (unsigned)(-1);
1366 for(index = 0, order = 1; index < max_order; index++, order++) {
1367 bits = FLAC__lpc_compute_expected_bits_per_residual_sample_with_error_scale(lpc_error[index], error_scale) * (FLAC__double)(total_samples - order) + (FLAC__double)(order * overhead_bits_per_order);
1368 if(bits < best_bits) {
1374 return best_index+1; /* +1 since index of lpc_error[] is order-1 */
1377 #endif /* !defined FLAC__INTEGER_ONLY_LIBRARY */