From: Zooko O'Whielacronx zooko@zooko.com Date: Mon, 22 Jan 2007 23:17:31 +0000 (+0530) Subject: pyfec v0.9 X-Git-Url: https://git.rkrishnan.org/vdrive/%22file:/frontends/%22doc.html/%22news.html/...?a=commitdiff_plain;h=1ea461b11d82209ff01e749a79663e65b3aa6464;p=tahoe-lafs%2Fzfec.git pyfec v0.9 Here is the change history from the first darcs era, in reverse chronological order: Mon Jan 22 16:12:56 MST 2007 "Zooko O'Whielacronx " * move everything into a subdirectory so that I can merge this darcs repo with the tahoe darcs repo ./fec -> ./pyfec/fec ./setup.py -> ./pyfec/setup.py A ./pyfec/ Mon Jan 22 16:10:17 MST 2007 "Zooko O'Whielacronx " * clean up and minimize fec.c * strip out unused code * hard-code GF_BITS to 8 * reindent and reformat curly bracket placement M ./fec/fec.c -655 +324 M ./fec/fec.h -25 Mon Jan 22 14:24:32 MST 2007 "Zooko O'Whielacronx " * change API to allow a subset of the shares to be produced, and to just pass back pointers to primary shares instead of copying them M ./fec/fec.c -24 +40 M ./fec/fec.h -5 +17 M ./fec/fecmodule.c -63 +144 M ./fec/test/test_pyfec.py -16 +25 M ./setup.py -2 +27 Tue Jan 16 23:01:44 MST 2007 "Zooko O'Whielacronx " * split encoder from decoder M ./fec/fecmodule.c -48 +161 M ./fec/test/test_pyfec.py -3 +4 Tue Jan 16 14:35:25 MST 2007 "Zooko O'Whielacronx " * it compiles now! ./fec.c -> ./pyfec/fec.c ./fec.h -> ./pyfec/fec.h ./fecmodule.c -> ./pyfec/fecmodule.c ./pyfec -> ./fec M ./fec/fec.c -109 +85 r13 M ./fec/fec.h -3 +2 r13 M ./fec/fecmodule.c -23 +241 r13 A ./fec/test/ A ./fec/test/test_pyfec.py A ./pyfec/ A ./setup.py Tue Jan 9 10:47:58 MST 2007 zooko@zooko.com * start of new fecmodule.c A ./fecmodule.c Mon Jan 1 15:00:04 MST 2007 zooko@zooko.com * tidy up error handling M ./fec.c -26 +16 Mon Jan 1 14:06:30 MST 2007 zooko@zooko.com * remove the on-the-fly encoding option We don't currently need it. M ./fec.c -68 M ./fec.h -22 Mon Jan 1 13:53:28 MST 2007 zooko@zooko.com * original import from Mnet project A ./fec.c A ./fec.h darcs-hash:c67d3cc8ce1c8a3d3f81692c4b3b117350b5c9eb --- 1ea461b11d82209ff01e749a79663e65b3aa6464 diff --git a/pyfec/fec/fec.c b/pyfec/fec/fec.c new file mode 100644 index 0000000..b5cf444 --- /dev/null +++ b/pyfec/fec/fec.c @@ -0,0 +1,636 @@ +/* TODO: + * + prune all unused code + * + profile + */ +/* + * fec.c -- forward error correction based on Vandermonde matrices + * 980624 + * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it) + * + * Portions derived from code by Phil Karn (karn@ka9q.ampr.org), + * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari + * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 + * + * Modifications by Dan Rubenstein (see Modifications.txt for + * their description. + * Modifications (C) 1998 Dan Rubenstein (drubenst@cs.umass.edu) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "fec.h" + + +/* + * If you get a error returned (negative value) from a fec_* function, + * look in here for the error message. + */ + +#define FEC_ERROR_SIZE 1025 +char fec_error[FEC_ERROR_SIZE+1]; + +#define ERR(...) (snprintf(fec_error, FEC_ERROR_SIZE, __VA_ARGS__)) + +/* + * Primitive polynomials - see Lin & Costello, Appendix A, + * and Lee & Messerschmitt, p. 453. + */ +static const char*const Pp="101110001"; + + +/* + * To speed up computations, we have tables for logarithm, exponent and + * inverse of a number. We use a table for multiplication as well (it takes + * 64K, no big deal even on a PDA, especially because it can be + * pre-initialized an put into a ROM!), otherwhise we use a table of + * logarithms. In any case the macro gf_mul(x,y) takes care of + * multiplications. + */ + +static gf gf_exp[510]; /* index->poly form conversion table */ +static int gf_log[256]; /* Poly->index form conversion table */ +static gf inverse[256]; /* inverse of field elem. */ + /* inv[\alpha**i]=\alpha**(GF_SIZE-i-1) */ + +/* + * modnn(x) computes x % GF_SIZE, where GF_SIZE is 2**GF_BITS - 1, + * without a slow divide. + */ +static inline gf +modnn(int x) { + while (x >= 255) { + x -= 255; + x = (x >> 8) + (x & 255); + } + return x; +} + +#define SWAP(a,b,t) {t tmp; tmp=a; a=b; b=tmp;} + +/* + * gf_mul(x,y) multiplies two numbers. It is much faster to use a + * multiplication table. + * + * USE_GF_MULC, GF_MULC0(c) and GF_ADDMULC(x) can be used when multiplying + * many numbers by the same constant. In this case the first call sets the + * constant, and others perform the multiplications. A value related to the + * multiplication is held in a local variable declared with USE_GF_MULC . See + * usage in addmul1(). + */ +static gf gf_mul_table[256][256]; + +#define gf_mul(x,y) gf_mul_table[x][y] + +#define USE_GF_MULC register gf * __gf_mulc_ +#define GF_MULC0(c) __gf_mulc_ = gf_mul_table[c] +#define GF_ADDMULC(dst, x) dst ^= __gf_mulc_[x] + +/* + * Generate GF(2**m) from the irreducible polynomial p(X) in p[0]..p[m] + * Lookup tables: + * index->polynomial form gf_exp[] contains j= \alpha^i; + * polynomial form -> index form gf_log[ j = \alpha^i ] = i + * \alpha=x is the primitive element of GF(2^m) + * + * For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple + * multiplication of two numbers can be resolved without calling modnn + */ +static void +init_mul_table () { + int i, j; + for (i = 0; i < 256; i++) + for (j = 0; j < 256; j++) + gf_mul_table[i][j] = gf_exp[modnn (gf_log[i] + gf_log[j])]; + + for (j = 0; j < 256; j++) + gf_mul_table[0][j] = gf_mul_table[j][0] = 0; +} + +/* + * i use malloc so many times, it is easier to put checks all in + * one place. + */ +static void * +my_malloc (int sz, char *err_string) { + void *p = malloc (sz); + if (p == NULL) { + ERR("Malloc failure allocating %s\n", err_string); + exit (1); + } + return p; +} + +#define NEW_GF_MATRIX(rows, cols) \ + (gf *)my_malloc(rows * cols * sizeof(gf), " ## __LINE__ ## " ) + +/* + * initialize the data structures used for computations in GF. + */ +static void +generate_gf (void) { + int i; + gf mask; + + mask = 1; /* x ** 0 = 1 */ + gf_exp[8] = 0; /* will be updated at the end of the 1st loop */ + /* + * first, generate the (polynomial representation of) powers of \alpha, + * which are stored in gf_exp[i] = \alpha ** i . + * At the same time build gf_log[gf_exp[i]] = i . + * The first 8 powers are simply bits shifted to the left. + */ + for (i = 0; i < 8; i++, mask <<= 1) { + gf_exp[i] = mask; + gf_log[gf_exp[i]] = i; + /* + * If Pp[i] == 1 then \alpha ** i occurs in poly-repr + * gf_exp[8] = \alpha ** 8 + */ + if (Pp[i] == '1') + gf_exp[8] ^= mask; + } + /* + * now gf_exp[8] = \alpha ** 8 is complete, so can also + * compute its inverse. + */ + gf_log[gf_exp[8]] = 8; + /* + * Poly-repr of \alpha ** (i+1) is given by poly-repr of + * \alpha ** i shifted left one-bit and accounting for any + * \alpha ** 8 term that may occur when poly-repr of + * \alpha ** i is shifted. + */ + mask = 1 << 7; + for (i = 9; i < 255; i++) { + if (gf_exp[i - 1] >= mask) + gf_exp[i] = gf_exp[8] ^ ((gf_exp[i - 1] ^ mask) << 1); + else + gf_exp[i] = gf_exp[i - 1] << 1; + gf_log[gf_exp[i]] = i; + } + /* + * log(0) is not defined, so use a special value + */ + gf_log[0] = 255; + /* set the extended gf_exp values for fast multiply */ + for (i = 0; i < 255; i++) + gf_exp[i + 255] = gf_exp[i]; + + /* + * again special cases. 0 has no inverse. This used to + * be initialized to 255, but it should make no difference + * since noone is supposed to read from here. + */ + inverse[0] = 0; + inverse[1] = 1; + for (i = 2; i <= 255; i++) + inverse[i] = gf_exp[255 - gf_log[i]]; +} + +/* + * Various linear algebra operations that i use often. + */ + +/* + * addmul() computes dst[] = dst[] + c * src[] + * This is used often, so better optimize it! Currently the loop is + * unrolled 16 times, a good value for 486 and pentium-class machines. + * The case c=0 is also optimized, whereas c=1 is not. These + * calls are unfrequent in my typical apps so I did not bother. + */ +#define addmul(dst, src, c, sz) \ + if (c != 0) addmul1(dst, src, c, sz) + +#define UNROLL 16 /* 1, 4, 8, 16 */ +static void +addmul1 (gf * dst1, const gf * src1, gf c, int sz) { + USE_GF_MULC; + register gf *dst = dst1; + register const gf *src = src1; + gf *lim = &dst[sz - UNROLL + 1]; + + GF_MULC0 (c); + +#if (UNROLL > 1) /* unrolling by 8/16 is quite effective on the pentium */ + for (; dst < lim; dst += UNROLL, src += UNROLL) { + GF_ADDMULC (dst[0], src[0]); + GF_ADDMULC (dst[1], src[1]); + GF_ADDMULC (dst[2], src[2]); + GF_ADDMULC (dst[3], src[3]); +#if (UNROLL > 4) + GF_ADDMULC (dst[4], src[4]); + GF_ADDMULC (dst[5], src[5]); + GF_ADDMULC (dst[6], src[6]); + GF_ADDMULC (dst[7], src[7]); +#endif +#if (UNROLL > 8) + GF_ADDMULC (dst[8], src[8]); + GF_ADDMULC (dst[9], src[9]); + GF_ADDMULC (dst[10], src[10]); + GF_ADDMULC (dst[11], src[11]); + GF_ADDMULC (dst[12], src[12]); + GF_ADDMULC (dst[13], src[13]); + GF_ADDMULC (dst[14], src[14]); + GF_ADDMULC (dst[15], src[15]); +#endif + } +#endif + lim += UNROLL - 1; + for (; dst < lim; dst++, src++) /* final components */ + GF_ADDMULC (*dst, *src); +} + +/* + * computes C = AB where A is n*k, B is k*m, C is n*m + */ +static void +matmul (gf * a, gf * b, gf * c, int n, int k, int m) { + int row, col, i; + + for (row = 0; row < n; row++) { + for (col = 0; col < m; col++) { + gf *pa = &a[row * k]; + gf *pb = &b[col]; + gf acc = 0; + for (i = 0; i < k; i++, pa++, pb += m) + acc ^= gf_mul (*pa, *pb); + c[row * m + col] = acc; + } + } +} + +/* + * invert_mat() takes a matrix and produces its inverse + * k is the size of the matrix. + * (Gauss-Jordan, adapted from Numerical Recipes in C) + * Return non-zero if singular. + */ +static int +invert_mat (gf * src, int k) { + gf c, *p; + int irow, icol, row, col, i, ix; + + int error = -1; + int *indxc = (int *) my_malloc (k * sizeof (int), "indxc"); + int *indxr = (int *) my_malloc (k * sizeof (int), "indxr"); + int *ipiv = (int *) my_malloc (k * sizeof (int), "ipiv"); + gf *id_row = NEW_GF_MATRIX (1, k); + gf *temp_row = NEW_GF_MATRIX (1, k); + + memset (id_row, '\0', k * sizeof (gf)); + /* + * ipiv marks elements already used as pivots. + */ + for (i = 0; i < k; i++) + ipiv[i] = 0; + + for (col = 0; col < k; col++) { + gf *pivot_row; + /* + * Zeroing column 'col', look for a non-zero element. + * First try on the diagonal, if it fails, look elsewhere. + */ + irow = icol = -1; + if (ipiv[col] != 1 && src[col * k + col] != 0) { + irow = col; + icol = col; + goto found_piv; + } + for (row = 0; row < k; row++) { + if (ipiv[row] != 1) { + for (ix = 0; ix < k; ix++) { + if (ipiv[ix] == 0) { + if (src[row * k + ix] != 0) { + irow = row; + icol = ix; + goto found_piv; + } + } else if (ipiv[ix] > 1) { + ERR("singular matrix"); + goto fail; + } + } + } + } + if (icol == -1) { + ERR("Pivot not found!"); + goto fail; + } + found_piv: + ++(ipiv[icol]); + /* + * swap rows irow and icol, so afterwards the diagonal + * element will be correct. Rarely done, not worth + * optimizing. + */ + if (irow != icol) + for (ix = 0; ix < k; ix++) + SWAP (src[irow * k + ix], src[icol * k + ix], gf); + indxr[col] = irow; + indxc[col] = icol; + pivot_row = &src[icol * k]; + c = pivot_row[icol]; + if (c == 0) { + ERR("singular matrix 2"); + goto fail; + } + if (c != 1) { /* otherwhise this is a NOP */ + /* + * this is done often , but optimizing is not so + * fruitful, at least in the obvious ways (unrolling) + */ + c = inverse[c]; + pivot_row[icol] = 1; + for (ix = 0; ix < k; ix++) + pivot_row[ix] = gf_mul (c, pivot_row[ix]); + } + /* + * from all rows, remove multiples of the selected row + * to zero the relevant entry (in fact, the entry is not zero + * because we know it must be zero). + * (Here, if we know that the pivot_row is the identity, + * we can optimize the addmul). + */ + id_row[icol] = 1; + if (memcmp (pivot_row, id_row, k * sizeof (gf)) != 0) { + for (p = src, ix = 0; ix < k; ix++, p += k) { + if (ix != icol) { + c = p[icol]; + p[icol] = 0; + addmul (p, pivot_row, c, k); + } + } + } + id_row[icol] = 0; + } /* done all columns */ + for (col = k - 1; col >= 0; col--) { + if (indxr[col] < 0 || indxr[col] >= k) { + ERR("AARGH, indxr[col] %d\n", indxr[col]); + goto fail; + } else if (indxc[col] < 0 || indxc[col] >= k) { + ERR("AARGH, indxc[col] %d\n", indxc[col]); + goto fail; + } else if (indxr[col] != indxc[col]) { + for (row = 0; row < k; row++) + SWAP (src[row * k + indxr[col]], src[row * k + indxc[col]], gf); + } + } + error = 0; + fail: + free (indxc); + free (indxr); + free (ipiv); + free (id_row); + free (temp_row); + return error; +} + +/* + * fast code for inverting a vandermonde matrix. + * + * NOTE: It assumes that the matrix is not singular and _IS_ a vandermonde + * matrix. Only uses the second column of the matrix, containing the p_i's. + * + * Algorithm borrowed from "Numerical recipes in C" -- sec.2.8, but largely + * revised for my purposes. + * p = coefficients of the matrix (p_i) + * q = values of the polynomial (known) + */ +int +invert_vdm (gf * src, int k) { + int i, j, row, col; + gf *b, *c, *p; + gf t, xx; + + if (k == 1) /* degenerate case, matrix must be p^0 = 1 */ + return 0; + /* + * c holds the coefficient of P(x) = Prod (x - p_i), i=0..k-1 + * b holds the coefficient for the matrix inversion + */ + c = NEW_GF_MATRIX (1, k); + b = NEW_GF_MATRIX (1, k); + + p = NEW_GF_MATRIX (1, k); + + for (j = 1, i = 0; i < k; i++, j += k) { + c[i] = 0; + p[i] = src[j]; /* p[i] */ + } + /* + * construct coeffs. recursively. We know c[k] = 1 (implicit) + * and start P_0 = x - p_0, then at each stage multiply by + * x - p_i generating P_i = x P_{i-1} - p_i P_{i-1} + * After k steps we are done. + */ + c[k - 1] = p[0]; /* really -p(0), but x = -x in GF(2^m) */ + for (i = 1; i < k; i++) { + gf p_i = p[i]; /* see above comment */ + for (j = k - 1 - (i - 1); j < k - 1; j++) + c[j] ^= gf_mul (p_i, c[j + 1]); + c[k - 1] ^= p_i; + } + + for (row = 0; row < k; row++) { + /* + * synthetic division etc. + */ + xx = p[row]; + t = 1; + b[k - 1] = 1; /* this is in fact c[k] */ + for (i = k - 2; i >= 0; i--) { + b[i] = c[i + 1] ^ gf_mul (xx, b[i + 1]); + t = gf_mul (xx, t) ^ b[i]; + } + for (col = 0; col < k; col++) + src[col * k + row] = gf_mul (inverse[t], b[col]); + } + free (c); + free (b); + free (p); + return 0; +} + +static int fec_initialized = 0; +static void +init_fec (void) { + generate_gf (); + init_mul_table (); + fec_initialized = 1; +} + +/* + * This section contains the proper FEC encoding/decoding routines. + * The encoding matrix is computed starting with a Vandermonde matrix, + * and then transforming it into a systematic matrix. + */ + +#define FEC_MAGIC 0xFECC0DEC + +void +fec_free (fec_t *p) { + if (p == NULL || + p->magic != (((FEC_MAGIC ^ p->k) ^ p->n) ^ (unsigned long) (p->enc_matrix))) { + ERR("bad parameters to fec_free"); + return; + } + free (p->enc_matrix); + free (p); +} + +/* + * create a new encoder, returning a descriptor. This contains k,n and + * the encoding matrix. + */ +fec_t * +fec_new (int k, int n) { + int row, col; + gf *p, *tmp_m; + + fec_t *retval; + + fec_error[FEC_ERROR_SIZE] = '\0'; + + if (fec_initialized == 0) + init_fec (); + + if (k < 1 || k > 256 || n > 256 || k > n) { + ERR("Invalid parameters k %d n %d GF_SIZE %d", k, n, 255); + return NULL; + } + retval = (fec_t *) my_malloc (sizeof (fec_t), "new_code"); + retval->k = k; + retval->n = n; + retval->enc_matrix = NEW_GF_MATRIX (n, k); + retval->magic = ((FEC_MAGIC ^ k) ^ n) ^ (unsigned long) (retval->enc_matrix); + tmp_m = NEW_GF_MATRIX (n, k); + /* + * fill the matrix with powers of field elements, starting from 0. + * The first row is special, cannot be computed with exp. table. + */ + tmp_m[0] = 1; + for (col = 1; col < k; col++) + tmp_m[col] = 0; + for (p = tmp_m + k, row = 0; row < n - 1; row++, p += k) { + for (col = 0; col < k; col++) + p[col] = gf_exp[modnn (row * col)]; + } + + /* + * quick code to build systematic matrix: invert the top + * k*k vandermonde matrix, multiply right the bottom n-k rows + * by the inverse, and construct the identity matrix at the top. + */ + invert_vdm (tmp_m, k); /* much faster than invert_mat */ + matmul (tmp_m + k * k, tmp_m, retval->enc_matrix + k * k, n - k, k, k); + /* + * the upper matrix is I so do not bother with a slow multiply + */ + memset (retval->enc_matrix, '\0', k * k * sizeof (gf)); + for (p = retval->enc_matrix, col = 0; col < k; col++, p += k + 1) + *p = 1; + free (tmp_m); + + return retval; +} + +void +fec_encode_all(const fec_t* code, const gf*restrict const*restrict const src, gf*restrict const*restrict const fecs, const unsigned char*restrict const share_ids, unsigned char num_share_ids, size_t sz) { + unsigned i, j; + unsigned char fecnum; + gf* p; + unsigned fecs_ix = 0; /* index into the fecs array */ + + for (i=0; i= code->k) { + memset(fecs[fecs_ix], 0, sz); + p = &(code->enc_matrix[fecnum * code->k]); + for (j = 0; j < code->k; j++) + addmul (fecs[fecs_ix], src[j], p[j], sz); + fecs_ix++; + } + } +} + +#if 0 +void +fec_encode_all(const fec_t* code, const gf*restrict const*restrict const src, gf*restrict const*restrict const fecs, const unsigned char*restrict const share_ids, unsigned char num_share_ids, size_t sz) { + for (unsigned j=0; j < code->k; j++) { + unsigned fecs_ix = 0; /* index into the fecs array */ + for (unsigned i=0; i= code->k) { + if (j == 0) + memset(fecs[fecs_ix], 0, sz); + gf* p = &(code->enc_matrix[fecnum * code->k]); + addmul (fecs[fecs_ix], src[j], p[j], sz); + fecs_ix++; + } + } + } +} +#endif + +/** + * Build decode matrix into some memory space. + * + * @param matrix a space allocated for a k by k matrix + */ +void +build_decode_matrix_into_space(const fec_t*restrict const code, const int*const restrict index, const int k, gf*restrict const matrix) { + unsigned i; + gf* p; + for (i=0, p=matrix; i < k; i++, p += k) { + if (index[i] < k) { + memset(p, 0, k); + p[i] = 1; + } else { + memcpy(p, &(code->enc_matrix[index[i] * code->k]), k); + } + } + invert_mat (matrix, k); +} + +void +fec_decode_all(const fec_t* code, const gf*restrict const*restrict const inpkts, gf*restrict const*restrict const outpkts, const unsigned*restrict const index, unsigned sz) { + gf m_dec[code->k * code->k]; + build_decode_matrix_into_space(code, index, code->k, m_dec); + + unsigned outix=0; + for (unsigned row=0; rowk; row++) { + if (index[row] >= code->k) { + memset(outpkts[outix], 0, sz); + for (unsigned col=0; col < code->k; col++) + addmul(outpkts[outix], inpkts[col], m_dec[row * code->k + col], sz); + outix++; + } + } +} diff --git a/pyfec/fec/fec.h b/pyfec/fec/fec.h new file mode 100644 index 0000000..43adcef --- /dev/null +++ b/pyfec/fec/fec.h @@ -0,0 +1,73 @@ +/* + * fec.h -- forward error correction based on Vandermonde matrices + * 980614 + * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it) + * + * Portions derived from code by Phil Karn (karn@ka9q.ampr.org), + * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari + * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 + * + * Modifications by Dan Rubenstein (see Modifications.txt for + * their description. + * Modifications (C) 1998 Dan Rubenstein (drubenst@cs.umass.edu) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +/* + * If you get a error returned (negative value) from a fec_* function, + * look in here for the error message. + */ + +extern char fec_error[]; + +typedef unsigned char gf; + +typedef struct { + unsigned long magic; + unsigned char k, n; /* parameters of the code */ + gf *enc_matrix; +} fec_t; + +void fec_free (fec_t *p); +fec_t *fec_new (int k, int n); + +/** + * @param inpkts the "primary shares" i.e. the chunks of the input data + * @param fecs buffers into which the secondary shares will be written + * @param share_ids the numbers of the desired shares -- including both primary shares (the id < k) which fec_encode_all() ignores and check shares (the id >= k) which fec_encode_all() will produce and store into the buffers of the fecs parameter + * @param num_share_ids the length of the share_ids array + */ +void fec_encode_all(const fec_t* code, const gf*restrict const*restrict const src, gf*restrict const*restrict const fecs, const unsigned char*restrict const share_ids, unsigned char num_share_ids, size_t sz); + +/** + * @param inpkts an array of packets (size k) + * @param outpkts an array of buffers into which the output packets will be written + * @param index an array of the shareids of the packets in inpkts + * @param sz size of a packet in bytes + */ +void fec_decode_all(const fec_t* code, const gf*restrict const*restrict const inpkts, gf*restrict const*restrict const outpkts, const unsigned*restrict const index, unsigned sz); + +/* end of file */ diff --git a/pyfec/fec/fecmodule.c b/pyfec/fec/fecmodule.c new file mode 100644 index 0000000..f8ba4f0 --- /dev/null +++ b/pyfec/fec/fecmodule.c @@ -0,0 +1,528 @@ +/** + * copyright 2007 Allmydata Inc. + * author: Zooko O'Whielacronx + * based on fecmodule.c by the Mnet Project + */ + +#include +#include + +#include "fec.h" + +static PyObject *py_fec_error; +static PyObject *py_raise_fec_error (const char *format, ...); + +static char fec__doc__[] = "\ +FEC - Forward Error Correction \n\ +"; + +static PyObject * +py_raise_fec_error(const char *format, ...) { + char exceptionMsg[1024]; + va_list ap; + + va_start (ap, format); + vsnprintf (exceptionMsg, 1024, format, ap); + va_end (ap); + exceptionMsg[1023]='\0'; + PyErr_SetString (py_fec_error, exceptionMsg); + return NULL; +} + +static char Encoder__doc__[] = "\ +Hold static encoder state (an in-memory table for matrix multiplication), and k and m parameters, and provide {encode()} method.\n\ +"; + +typedef struct { + PyObject_HEAD + + /* expose these */ + int kk; + int mm; + + /* internal */ + fec_t* fec_matrix; +} Encoder; + +static PyObject * +Encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { + Encoder *self; + + self = (Encoder*)type->tp_alloc(type, 0); + if (self != NULL) { + self->kk = 0; + self->mm = 0; + self->fec_matrix = NULL; + } + + return (PyObject *)self; +} + +static char Encoder_init__doc__[] = "\ +@param k: the number of packets required for reconstruction \n\ +@param m: the number of packets generated \n\ +"; + +static int +Encoder_init(Encoder *self, PyObject *args, PyObject *kwdict) { + static char *kwlist[] = { + "k", + "m", + NULL + }; + if (!PyArg_ParseTupleAndKeywords(args, kwdict, "ii", kwlist, &self->kk, &self->mm)) + return -1; + + self->fec_matrix = fec_new(self->kk, self->mm); + if(self->fec_matrix == NULL) { + py_raise_fec_error(fec_error); /* xyz */ + return -1; + } + + return 0; +} + +static char Encoder_encode__doc__[] = "\ +Encode data into m packets.\ +@param inshares: a sequence of k buffers of data to encode -- these are the k primary shares, i.e. the input data split into k pieces (for best performance, make it a tuple instead of a list)\n\ +@param desired_shares_nums optional sorted sequence of sharenums indicating which shares to produce and return; If None, all m shares will be returned (in order). (For best performance, make it a tuple instead of a list.)\n\ +@returns: a list of buffers containing the requested shares\n\ +"; + +static PyObject * +Encoder_encode(Encoder *self, PyObject *args) { + PyObject* inshares; + PyObject* desired_shares_ids = NULL; /* The shareids of the shares that should be returned. */ + PyObject* result = NULL; + + if (!PyArg_ParseTuple(args, "O!|O!", &PyList_Type, &inshares, &PyList_Type, &desired_shares_ids)) + return NULL; + + gf* check_shares_produced[self->mm - self->kk]; /* This is an upper bound -- we will actually use only num_check_shares_produced of these elements (see below). */ + PyObject* pystrs_produced[self->mm - self->kk]; /* This is an upper bound -- we will actually use only num_check_shares_produced of these elements (see below). */ + unsigned num_check_shares_produced = 0; /* The first num_check_shares_produced elements of the check_shares_produced array and of the pystrs_produced array will be used. */ + const gf* incshares[self->kk]; + unsigned char num_desired_shares; + PyObject* fast_desired_shares_ids = NULL; + PyObject** fast_desired_shares_ids_items; + unsigned char c_desired_shares_ids[self->mm]; + unsigned i; + unsigned prev_desired_id = 256; /* impossible value */ + if (desired_shares_ids) { + fast_desired_shares_ids = PySequence_Fast(desired_shares_ids, "Second argument (optional) was not a sequence."); + num_desired_shares = PySequence_Fast_GET_SIZE(fast_desired_shares_ids); + fast_desired_shares_ids_items = PySequence_Fast_ITEMS(fast_desired_shares_ids); + for (i=0; i= c_desired_shares_ids[i]) { + py_raise_fec_error("Precondition violation: first argument is required to be in order -- each requested shareid in the sequence must be a higher number than the previous one. current requested shareid: %u, previous requested shareid: %u\n", c_desired_shares_ids[i], prev_desired_id); + goto err; + } + prev_desired_id = c_desired_shares_ids[i]; + if (c_desired_shares_ids[i] >= self->kk) + num_check_shares_produced++; + } + } else { + num_desired_shares = self->mm; + for (i=0; imm - self->kk; + } + + for (i=0; ikk) { + py_raise_fec_error("Precondition violation: Wrong length -- first argument is required to contain exactly k shares. len(first): %d, k: %d", PySequence_Fast_GET_SIZE(fastinshares), self->kk); + goto err; + } + + /* Construct a C array of gf*'s of the input data. */ + PyObject** fastinsharesitems = PySequence_Fast_ITEMS(fastinshares); + if (!fastinsharesitems) + goto err; + int sz, oldsz = 0; + for (i=0; ikk; i++) { + if (!PyObject_CheckReadBuffer(fastinsharesitems[i])) { + py_raise_fec_error("Precondition violation: %u'th item is required to offer the single-segment read character buffer protocol, but it does not.\n", i); + goto err; + } + if (PyObject_AsCharBuffer(fastinsharesitems[i], &(incshares[i]), &sz)) + goto err; + if (oldsz != 0 && oldsz != sz) { + py_raise_fec_error("Precondition violation: Input shares are required to be all the same length. oldsz: %Zu, sz: %Zu\n", oldsz, sz); + goto err; + } + oldsz = sz; + } + + /* Allocate space for all of the check shares. */ + unsigned check_share_index = 0; /* index into the check_shares_produced and (parallel) pystrs_produced arrays */ + for (i=0; i= self->kk) { + pystrs_produced[check_share_index] = PyString_FromStringAndSize(NULL, sz); + if (pystrs_produced[check_share_index] == NULL) + goto err; + check_shares_produced[check_share_index] = PyString_AsString(pystrs_produced[check_share_index]); + if (check_shares_produced[check_share_index] == NULL) + goto err; + check_share_index++; + } + } + assert (check_share_index == num_check_shares_produced); + + /* Encode any check shares that are needed. */ + fec_encode_all(self->fec_matrix, incshares, check_shares_produced, c_desired_shares_ids, num_desired_shares, sz); + + /* Wrap all requested shares up into a Python list of Python strings. */ + result = PyList_New(num_desired_shares); + if (result == NULL) + goto err; + check_share_index = 0; + for (i=0; ikk) { + Py_INCREF(fastinsharesitems[c_desired_shares_ids[i]]); + if (PyList_SetItem(result, i, fastinsharesitems[c_desired_shares_ids[i]]) == -1) { + Py_DECREF(fastinsharesitems[c_desired_shares_ids[i]]); + goto err; + } + } else { + if (PyList_SetItem(result, i, pystrs_produced[check_share_index]) == -1) + goto err; + pystrs_produced[check_share_index] = NULL; + check_share_index++; + } + } + + goto cleanup; + err: + for (i=0; ifec_matrix); + self->ob_type->tp_free((PyObject*)self); +} + +static PyMethodDef Encoder_methods[] = { + {"encode", (PyCFunction)Encoder_encode, METH_VARARGS, Encoder_encode__doc__}, + {NULL}, +}; + +static PyMemberDef Encoder_members[] = { + {"k", T_INT, offsetof(Encoder, kk), READONLY, "k"}, + {"m", T_INT, offsetof(Encoder, mm), READONLY, "m"}, + {NULL} /* Sentinel */ +}; + +static PyTypeObject Encoder_type = { + PyObject_HEAD_INIT(NULL) + 0, /*ob_size*/ + "fec.Encoder", /*tp_name*/ + sizeof(Encoder), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)Encoder_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + Encoder__doc__, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + Encoder_methods, /* tp_methods */ + Encoder_members, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)Encoder_init, /* tp_init */ + 0, /* tp_alloc */ + Encoder_new, /* tp_new */ +}; + +static char Decoder__doc__[] = "\ +Hold static decoder state (an in-memory table for matrix multiplication), and k and m parameters, and provide {decode()} method.\n\ +"; + +typedef struct { + PyObject_HEAD + + /* expose these */ + int kk; + int mm; + + /* internal */ + fec_t* fec_matrix; +} Decoder; + +static PyObject * +Decoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { + Decoder *self; + + self = (Decoder*)type->tp_alloc(type, 0); + if (self != NULL) { + self->kk = 0; + self->mm = 0; + self->fec_matrix = NULL; + } + + return (PyObject *)self; +} + +static char Decoder_init__doc__[] = "\ +@param k: the number of packets required for reconstruction \n\ +@param m: the number of packets generated \n\ +"; + +static int +Decoder_init(Encoder *self, PyObject *args, PyObject *kwdict) { + static char *kwlist[] = { + "k", + "m", + NULL + }; + if (!PyArg_ParseTupleAndKeywords(args, kwdict, "ii", kwlist, &self->kk, &self->mm)) + return -1; + + self->fec_matrix = fec_new(self->kk, self->mm); + if(self->fec_matrix == NULL) { + py_raise_fec_error(fec_error); /* xyz */ + return -1; + } + + return 0; +} + +static char Decoder_decode__doc__[] = "\ +Decode a list shares into a list of segments.\n\ +@param shares a sequence of buffers containing share data (for best performance, make it a tuple instead of a list)\n\ +@param sharenums a sequence of integers of the sharenumber for each share in shares (for best performance, make it a tuple instead of a list)\n\ +\n\ +@return a list of strings containing the segment data (i.e. ''.join(retval) yields a string containing the decoded data)\n\ +"; + +static PyObject * +Decoder_decode(Decoder *self, PyObject *args) { + PyObject*restrict shares; + PyObject*restrict sharenums; + PyObject* result = NULL; + + if (!PyArg_ParseTuple(args, "O!O!", &PyList_Type, &shares, &PyList_Type, &sharenums)) + return NULL; + + const gf*restrict cshares[self->kk]; + unsigned csharenums[self->kk]; + gf*restrict recoveredcstrs[self->kk]; /* self->kk is actually an upper bound -- we probably won't need all of this space. */ + PyObject*restrict recoveredpystrs[self->kk]; /* self->kk is actually an upper bound -- we probably won't need all of this space. */ + unsigned i; + for (i=0; ikk; i++) + recoveredpystrs[i] = NULL; + PyObject*restrict fastshares = PySequence_Fast(shares, "First argument was not a sequence."); + if (!fastshares) + goto err; + PyObject*restrict fastsharenums = PySequence_Fast(sharenums, "Second argument was not a sequence."); + if (!fastsharenums) + goto err; + + if (PySequence_Fast_GET_SIZE(fastshares) != self->kk) { + py_raise_fec_error("Precondition violation: Wrong length -- first argument is required to contain exactly k shares. len(first): %d, k: %d", PySequence_Fast_GET_SIZE(fastshares), self->kk); + goto err; + } + if (PySequence_Fast_GET_SIZE(fastsharenums) != self->kk) { + py_raise_fec_error("Precondition violation: Wrong length -- sharenums is required to contain exactly k shares. len(sharenums): %d, k: %d", PySequence_Fast_GET_SIZE(fastsharenums), self->kk); + goto err; + } + + /* Construct a C array of gf*'s of the data and another of C ints of the sharenums. */ + unsigned needtorecover=0; + PyObject** fastsharenumsitems = PySequence_Fast_ITEMS(fastsharenums); + if (!fastsharenumsitems) + goto err; + PyObject** fastsharesitems = PySequence_Fast_ITEMS(fastshares); + if (!fastsharesitems) + goto err; + int sz, oldsz = 0; + for (i=0; ikk; i++) { + if (!PyInt_Check(fastsharenumsitems[i])) + goto err; + csharenums[i] = PyInt_AsLong(fastsharenumsitems[i]); + if (csharenums[i] >= self->kk) + needtorecover+=1; + + if (!PyObject_CheckReadBuffer(fastsharesitems[i])) + goto err; + if (PyObject_AsCharBuffer(fastsharesitems[i], &(cshares[i]), &sz)) + goto err; + if (oldsz != 0 && oldsz != sz) { + py_raise_fec_error("Precondition violation: Input shares are required to be all the same length. oldsz: %Zu, sz: %Zu\n", oldsz, sz); + goto err; + } + oldsz = sz; + } + + /* Allocate space for all of the recovered shares. */ + for (i=0; ifec_matrix, cshares, recoveredcstrs, csharenums, sz); + + /* Wrap up both original primary shares and decoded shares into a Python list of Python strings. */ + unsigned nextrecoveredix=0; + result = PyList_New(self->kk); + if (result == NULL) + goto err; + for (i=0; ikk; i++) { + if (csharenums[i] == i) { + /* Original primary share. */ + Py_INCREF(fastsharesitems[i]); + if (PyList_SetItem(result, i, fastsharesitems[i]) == -1) { + Py_DECREF(fastsharesitems[i]); + goto err; + } + } else { + /* Recovered share. */ + if (PyList_SetItem(result, i, recoveredpystrs[nextrecoveredix]) == -1) + goto err; + recoveredpystrs[nextrecoveredix] = NULL; + nextrecoveredix++; + } + } + + goto cleanup; + err: + for (i=0; ikk; i++) + Py_XDECREF(recoveredpystrs[i]); + Py_XDECREF(result); result = NULL; + cleanup: + Py_XDECREF(fastshares); fastshares=NULL; + Py_XDECREF(fastsharenums); fastsharenums=NULL; + return result; +} + +static void +Decoder_dealloc(Decoder * self) { + fec_free(self->fec_matrix); + self->ob_type->tp_free((PyObject*)self); +} + +static PyMethodDef Decoder_methods[] = { + {"decode", (PyCFunction)Decoder_decode, METH_VARARGS, Decoder_decode__doc__}, + {NULL}, +}; + +static PyMemberDef Decoder_members[] = { + {"k", T_INT, offsetof(Encoder, kk), READONLY, "k"}, + {"m", T_INT, offsetof(Encoder, mm), READONLY, "m"}, + {NULL} /* Sentinel */ +}; + +static PyTypeObject Decoder_type = { + PyObject_HEAD_INIT(NULL) + 0, /*ob_size*/ + "fec.Decoder", /*tp_name*/ + sizeof(Decoder), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + (destructor)Decoder_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash */ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ + Decoder__doc__, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + Decoder_methods, /* tp_methods */ + Decoder_members, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)Decoder_init, /* tp_init */ + 0, /* tp_alloc */ + Decoder_new, /* tp_new */ +}; + +static PyMethodDef fec_methods[] = { + {NULL} +}; + +#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ +#define PyMODINIT_FUNC void +#endif +PyMODINIT_FUNC +initfec (void) { + PyObject *module; + PyObject *module_dict; + + if (PyType_Ready(&Encoder_type) < 0) + return; + if (PyType_Ready(&Decoder_type) < 0) + return; + + module = Py_InitModule3 ("fec", fec_methods, fec__doc__); + if (module == NULL) + return; + + Py_INCREF(&Encoder_type); + Py_INCREF(&Decoder_type); + + PyModule_AddObject(module, "Encoder", (PyObject *)&Encoder_type); + PyModule_AddObject(module, "Decoder", (PyObject *)&Decoder_type); + + module_dict = PyModule_GetDict (module); + py_fec_error = PyErr_NewException ("fec.Error", NULL, NULL); + PyDict_SetItemString (module_dict, "Error", py_fec_error); +} + diff --git a/pyfec/fec/test/test_pyfec.py b/pyfec/fec/test/test_pyfec.py new file mode 100755 index 0000000..9eae42c --- /dev/null +++ b/pyfec/fec/test/test_pyfec.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python + +from pyutil.assertutil import _assert, precondition + +import random +import sys + +import fec + +def shuffle(nums_and_shares): + """ Make sure that if nums_and_shares[i][0] < len(nums_and_shares), that i == nums_and_shares[i][0]. """ + i = 0 + while i < len(nums_and_shares): + num, share = nums_and_shares[i] + if num >= len(nums_and_shares) or num == i: + i += 1 + else: + nums_and_shares[i] = nums_and_shares[num] + nums_and_shares[num] = (num, share,) + _assert([ (i, (num, share,),) for (i, (num, share,),) in enumerate(nums_and_shares) if num < len(nums_and_shares) and num != i ] == [], [ (i, (num, share,),) for (i, (num, share,),) in enumerate(nums_and_shares) if num < len(nums_and_shares) and num != i ]) + +def _h(k, m, ss): + # sys.stdout.write("k: %s, m: %s, len(ss): %r, len(ss[0]): %r" % (k, m, len(ss), len(ss[0]),)) ; sys.stdout.flush() + encer = fec.Encoder(k, m) + # sys.stdout.write("constructed.\n") ; sys.stdout.flush() + nums_and_shares = list(enumerate(encer.encode(ss))) + # sys.stdout.write("encoded.\n") ; sys.stdout.flush() + _assert(isinstance(nums_and_shares, list), nums_and_shares) + _assert(len(nums_and_shares) == m, len(nums_and_shares), m) + nums_and_shares = random.sample(nums_and_shares, k) + shuffle(nums_and_shares) + shares = [ x[1] for x in nums_and_shares ] + nums = [ x[0] for x in nums_and_shares ] + # sys.stdout.write("about to construct Decoder.\n") ; sys.stdout.flush() + decer = fec.Decoder(k, m) + # sys.stdout.write("about to decode.\n") ; sys.stdout.flush() + decoded = decer.decode(shares, nums) + # sys.stdout.write("decoded.\n") ; sys.stdout.flush() + _assert(len(decoded) == len(ss), len(decoded), len(ss)) + _assert(tuple([str(s) for s in decoded]) == tuple([str(s) for s in ss]), tuple([str(s) for s in decoded]), tuple([str(s) for s in ss])) + +def randstr(n): + return ''.join(map(chr, map(random.randrange, [0]*n, [256]*n))) + +def div_ceil(n, d): + """ + The smallest integer k such that k*d >= n. + """ + return (n/d) + (n%d != 0) + +def next_multiple(n, k): + """ + The smallest multiple of k which is >= n. + """ + return div_ceil(n, k) * k + +def pad_size(n, k): + """ + The smallest number that has to be added to n so that n is a multiple of k. + """ + if n%k: + return k - n%k + else: + return 0 + +def _test_random(): + # m = random.randrange(1, 255) + m = 99 + # k = random.randrange(1, m+1) + k = 33 + # l = random.randrange(0, 2**16) + l = 2**12 + ss = [ randstr(l/k) + '\x00' * pad_size(l/k, k) for x in range(k) ] + _h(k, m, ss) + +def test_random(): + for i in range(2**9): + sys.stdout.write(",") + _test_random() + sys.stdout.write(".") + +test_random() diff --git a/pyfec/setup.py b/pyfec/setup.py new file mode 100755 index 0000000..0c15d6c --- /dev/null +++ b/pyfec/setup.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +from distutils.core import Extension, setup + + +DEBUGMODE=False +# DEBUGMODE=True + +extra_compile_args=[] +extra_link_args=[] + +extra_compile_args.append("-std=c99") + +undef_macros=[] + +if DEBUGMODE: + extra_compile_args.append("-O0") + extra_compile_args.append("-g") + extra_link_args.append("-g") + undef_macros.append('NDEBUG') + +trove_classifiers=""" +XYZ insert trove classifiers here. +""" + +setup(name='pyfec', + versions='0.9', + summary='Provides a fast C implementation of Reed-Solomon erasure coding with a Python interface.', + description='Erasure coding is the generation of extra redundant packets of information such that if some packets are lost ("erased") then the original data can be recovered from the remaining packets. This package contains an optimized implementation along with a Python interface.', + author='Zooko O\'Whielacronx', + author_email='zooko@zooko.com', + url='http://zooko.com/repos/pyfec', + license='GNU GPL', + platform='Any', + packages=['fec'], + classifiers=trove_classifiers.split("\n"), + ext_modules=[Extension('fec', ['fec/fec.c', 'fec/fecmodule.c',], extra_link_args=extra_link_args, extra_compile_args=extra_compile_args, undef_macros=undef_macros),], + )