checksum.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:10k
- /*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
- * Fixed some nasty bugs, causing some horrible crashes.
- * A: At some points, the sum (%0) was used as
- * length-counter instead of the length counter
- * (%1). Thanks to Roman Hodek for pointing this out.
- * B: GCC seems to mess up if one uses too many
- * data-registers to hold input values and one tries to
- * specify d0 and d1 as scratch registers. Letting gcc
- * choose these registers itself solves the problem.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * 1998/8/31 Andreas Schwab:
- * Zero out rest of buffer on exception in
- * csum_partial_copy_from_user.
- */
- #include <net/checksum.h>
- /*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
- unsigned int
- csum_partial (const unsigned char *buff, int len, unsigned int sum)
- {
- unsigned long tmp1, tmp2;
- /*
- * Experiments with ethernet and slip connections show that buff
- * is aligned on either a 2-byte or 4-byte boundary.
- */
- __asm__("movel %2,%3nt"
- "btst #1,%3nt" /* Check alignment */
- "jeq 2fnt"
- "subql #2,%1nt" /* buff%4==2: treat first word */
- "jgt 1fnt"
- "addql #2,%1nt" /* len was == 2, treat only rest */
- "jra 4fn"
- "1:t"
- "addw %2@+,%0nt" /* add first word to sum */
- "clrl %3nt"
- "addxl %3,%0n" /* add X bit */
- "2:t"
- /* unrolled loop for the main part: do 8 longs at once */
- "movel %1,%3nt" /* save len in tmp1 */
- "lsrl #5,%1nt" /* len/32 */
- "jeq 2fnt" /* not enough... */
- "subql #1,%1n"
- "1:t"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "dbra %1,1bnt"
- "clrl %4nt"
- "addxl %4,%0nt" /* add X bit */
- "clrw %1nt"
- "subql #1,%1nt"
- "jcc 1bn"
- "2:t"
- "movel %3,%1nt" /* restore len from tmp1 */
- "andw #0x1c,%3nt" /* number of rest longs */
- "jeq 4fnt"
- "lsrw #2,%3nt"
- "subqw #1,%3n"
- "3:t"
- /* loop for rest longs */
- "movel %2@+,%4nt"
- "addxl %4,%0nt"
- "dbra %3,3bnt"
- "clrl %4nt"
- "addxl %4,%0n" /* add X bit */
- "4:t"
- /* now check for rest bytes that do not fit into longs */
- "andw #3,%1nt"
- "jeq 7fnt"
- "clrl %4nt" /* clear tmp2 for rest bytes */
- "subqw #2,%1nt"
- "jlt 5fnt"
- "movew %2@+,%4nt" /* have rest >= 2: get word */
- "swap %4nt" /* into bits 16..31 */
- "tstw %1nt" /* another byte? */
- "jeq 6fn"
- "5:t"
- "moveb %2@,%4nt" /* have odd rest: get byte */
- "lslw #8,%4nt" /* into bits 8..15; 16..31 untouched */
- "6:t"
- "addl %4,%0nt" /* now add rest long to sum */
- "clrl %4nt"
- "addxl %4,%0n" /* add X bit */
- "7:t"
- : "=d" (sum), "=d" (len), "=a" (buff),
- "=&d" (tmp1), "=&d" (tmp2)
- : "0" (sum), "1" (len), "2" (buff)
- );
- return(sum);
- }
- /*
- * copy from user space while checksumming, with exception handling.
- */
- unsigned int
- csum_partial_copy_from_user(const char *src, char *dst, int len,
- int sum, int *csum_err)
- {
- /*
- * GCC doesn't like more than 10 operands for the asm
- * statements so we have to use tmp2 for the error
- * code.
- */
- unsigned long tmp1, tmp2;
- __asm__("movel %2,%4nt"
- "btst #1,%4nt" /* Check alignment */
- "jeq 2fnt"
- "subql #2,%1nt" /* buff%4==2: treat first word */
- "jgt 1fnt"
- "addql #2,%1nt" /* len was == 2, treat only rest */
- "jra 4fn"
- "1:n"
- "10:t"
- "movesw %2@+,%4nt" /* add first word to sum */
- "addw %4,%0nt"
- "movew %4,%3@+nt"
- "clrl %4nt"
- "addxl %4,%0n" /* add X bit */
- "2:t"
- /* unrolled loop for the main part: do 8 longs at once */
- "movel %1,%4nt" /* save len in tmp1 */
- "lsrl #5,%1nt" /* len/32 */
- "jeq 2fnt" /* not enough... */
- "subql #1,%1n"
- "1:n"
- "11:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "12:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "13:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "14:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "15:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "16:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "17:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "18:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "dbra %1,1bnt"
- "clrl %5nt"
- "addxl %5,%0nt" /* add X bit */
- "clrw %1nt"
- "subql #1,%1nt"
- "jcc 1bn"
- "2:t"
- "movel %4,%1nt" /* restore len from tmp1 */
- "andw #0x1c,%4nt" /* number of rest longs */
- "jeq 4fnt"
- "lsrw #2,%4nt"
- "subqw #1,%4n"
- "3:n"
- /* loop for rest longs */
- "19:t"
- "movesl %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "dbra %4,3bnt"
- "clrl %5nt"
- "addxl %5,%0n" /* add X bit */
- "4:t"
- /* now check for rest bytes that do not fit into longs */
- "andw #3,%1nt"
- "jeq 7fnt"
- "clrl %5nt" /* clear tmp2 for rest bytes */
- "subqw #2,%1nt"
- "jlt 5fnt"
- "20:t"
- "movesw %2@+,%5nt" /* have rest >= 2: get word */
- "movew %5,%3@+nt"
- "swap %5nt" /* into bits 16..31 */
- "tstw %1nt" /* another byte? */
- "jeq 6fn"
- "5:n"
- "21:t"
- "movesb %2@,%5nt" /* have odd rest: get byte */
- "moveb %5,%3@+nt"
- "lslw #8,%5nt" /* into bits 8..15; 16..31 untouched */
- "6:t"
- "addl %5,%0nt" /* now add rest long to sum */
- "clrl %5nt"
- "addxl %5,%0nt" /* add X bit */
- "7:t"
- "clrl %5n" /* no error - clear return value */
- "8:n"
- ".section .fixup,"ax"n"
- ".evenn"
- /* If any execption occurs zero out the rest.
- Similarities with the code above are intentional :-) */
- "90:t"
- "clrw %3@+nt"
- "movel %1,%4nt"
- "lsrl #5,%1nt"
- "jeq 1fnt"
- "subql #1,%1n"
- "91:t"
- "clrl %3@+n"
- "92:t"
- "clrl %3@+n"
- "93:t"
- "clrl %3@+n"
- "94:t"
- "clrl %3@+n"
- "95:t"
- "clrl %3@+n"
- "96:t"
- "clrl %3@+n"
- "97:t"
- "clrl %3@+n"
- "98:t"
- "clrl %3@+nt"
- "dbra %1,91bnt"
- "clrw %1nt"
- "subql #1,%1nt"
- "jcc 91bn"
- "1:t"
- "movel %4,%1nt"
- "andw #0x1c,%4nt"
- "jeq 1fnt"
- "lsrw #2,%4nt"
- "subqw #1,%4n"
- "99:t"
- "clrl %3@+nt"
- "dbra %4,99bnt"
- "1:t"
- "andw #3,%1nt"
- "jeq 9fn"
- "100:t"
- "clrw %3@+nt"
- "tstw %1nt"
- "jeq 9fn"
- "101:t"
- "clrb %3@+n"
- "9:t"
- #define STR(X) STR1(X)
- #define STR1(X) #X
- "moveq #-" STR(EFAULT) ",%5nt"
- "jra 8bn"
- ".previousn"
- ".section __ex_table,"a"n"
- ".long 10b,90bn"
- ".long 11b,91bn"
- ".long 12b,92bn"
- ".long 13b,93bn"
- ".long 14b,94bn"
- ".long 15b,95bn"
- ".long 16b,96bn"
- ".long 17b,97bn"
- ".long 18b,98bn"
- ".long 19b,99bn"
- ".long 20b,100bn"
- ".long 21b,101bn"
- ".previous"
- : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
- "=&d" (tmp1), "=d" (tmp2)
- : "0" (sum), "1" (len), "2" (src), "3" (dst)
- );
- *csum_err = tmp2;
- return(sum);
- }
- /*
- * copy from kernel space while checksumming, otherwise like csum_partial
- */
- unsigned int
- csum_partial_copy(const char *src, char *dst, int len, int sum)
- {
- unsigned long tmp1, tmp2;
- __asm__("movel %2,%4nt"
- "btst #1,%4nt" /* Check alignment */
- "jeq 2fnt"
- "subql #2,%1nt" /* buff%4==2: treat first word */
- "jgt 1fnt"
- "addql #2,%1nt" /* len was == 2, treat only rest */
- "jra 4fn"
- "1:t"
- "movew %2@+,%4nt" /* add first word to sum */
- "addw %4,%0nt"
- "movew %4,%3@+nt"
- "clrl %4nt"
- "addxl %4,%0n" /* add X bit */
- "2:t"
- /* unrolled loop for the main part: do 8 longs at once */
- "movel %1,%4nt" /* save len in tmp1 */
- "lsrl #5,%1nt" /* len/32 */
- "jeq 2fnt" /* not enough... */
- "subql #1,%1n"
- "1:t"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "dbra %1,1bnt"
- "clrl %5nt"
- "addxl %5,%0nt" /* add X bit */
- "clrw %1nt"
- "subql #1,%1nt"
- "jcc 1bn"
- "2:t"
- "movel %4,%1nt" /* restore len from tmp1 */
- "andw #0x1c,%4nt" /* number of rest longs */
- "jeq 4fnt"
- "lsrw #2,%4nt"
- "subqw #1,%4n"
- "3:t"
- /* loop for rest longs */
- "movel %2@+,%5nt"
- "addxl %5,%0nt"
- "movel %5,%3@+nt"
- "dbra %4,3bnt"
- "clrl %5nt"
- "addxl %5,%0n" /* add X bit */
- "4:t"
- /* now check for rest bytes that do not fit into longs */
- "andw #3,%1nt"
- "jeq 7fnt"
- "clrl %5nt" /* clear tmp2 for rest bytes */
- "subqw #2,%1nt"
- "jlt 5fnt"
- "movew %2@+,%5nt" /* have rest >= 2: get word */
- "movew %5,%3@+nt"
- "swap %5nt" /* into bits 16..31 */
- "tstw %1nt" /* another byte? */
- "jeq 6fn"
- "5:t"
- "moveb %2@,%5nt" /* have odd rest: get byte */
- "moveb %5,%3@+nt"
- "lslw #8,%5n" /* into bits 8..15; 16..31 untouched */
- "6:t"
- "addl %5,%0nt" /* now add rest long to sum */
- "clrl %5nt"
- "addxl %5,%0n" /* add X bit */
- "7:t"
- : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
- "=&d" (tmp1), "=&d" (tmp2)
- : "0" (sum), "1" (len), "2" (src), "3" (dst)
- );
- return(sum);
- }