Exemplo n.º 1
0
 def crc32(self):
     try:
     
         from zlib import crc32
         try:
             if crc32('amuse')&0xffffffff == 0xc0cc9367:
                 return crc32
         except Exception:
             #python 3, crc32 needs bytes...
             def python3_crc32(x):
                 x = crc32(bytes(x, 'ascii'))
                 return x - ((x & 0x80000000) <<1)
             if python3_crc32('amuse')&0xffffffff == 0xc0cc9367:
                 return python3_crc32
     except Exception:
         pass
     try:
         from binascii import crc32
         try:
             if crc32('amuse')&0xffffffff == 0xc0cc9367:
                 return crc32
         except Exception:
             #python 3, crc32 needs bytes...
             def python3_crc32(x):
                 x = crc32(bytes(x, 'ascii'))
                 return x - ((x & 0x80000000) <<1)
             if python3_crc32('amuse')&0xffffffff == 0xc0cc9367:
                 return python3_crc32
     except Exception:
         pass
     
     raise Exception("No working crc32 implementation found!")
Exemplo n.º 2
0
def _to_png(data, width, height):
    # From MSS
    line = width * 3
    png_filter = struct.pack('>B', 0)
    scanlines = b''.join(
        [png_filter + data[y * line:y * line + line] for y in range(height)]
    )
    magic = struct.pack('>8B', 137, 80, 78, 71, 13, 10, 26, 10)

    # Header: size, marker, data, CRC32
    ihdr = [b'', b'IHDR', b'', b'']
    ihdr[2] = struct.pack('>2I5B', width, height, 8, 2, 0, 0, 0)
    ihdr[3] = struct.pack('>I', crc32(b''.join(ihdr[1:3])) & 0xffffffff)
    ihdr[0] = struct.pack('>I', len(ihdr[2]))

    # Data: size, marker, data, CRC32
    idat = [b'', b'IDAT', compress(scanlines), b'']
    idat[3] = struct.pack('>I', crc32(b''.join(idat[1:3])) & 0xffffffff)
    idat[0] = struct.pack('>I', len(idat[2]))

    # Footer: size, marker, None, CRC32
    iend = [b'', b'IEND', b'', b'']
    iend[3] = struct.pack('>I', crc32(iend[1]) & 0xffffffff)
    iend[0] = struct.pack('>I', len(iend[2]))

    return b''.join([
        magic,
        b''.join(ihdr),
        b''.join(idat),
        b''.join(iend)
    ])
Exemplo n.º 3
0
 def write_delete(self, id):
     fd = self.get_write_fd()
     header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
     crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
     fd.write(b''.join((crc, header, id)))
     self.offset += self.put_header_fmt.size
     return self.segment
Exemplo n.º 4
0
 def iter_objects(self, segment, include_data=False):
     fd = self.get_fd(segment)
     fd.seek(0)
     if fd.read(8) != MAGIC:
         raise IntegrityError('Invalid segment header')
     offset = 8
     header = fd.read(self.header_fmt.size)
     while header:
         crc, size, tag = self.header_fmt.unpack(header)
         if size > MAX_OBJECT_SIZE:
             raise IntegrityError('Invalid segment object size')
         rest = fd.read(size - self.header_fmt.size)
         if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
             raise IntegrityError('Segment checksum mismatch')
         if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
             raise IntegrityError('Invalid segment entry header')
         key = None
         if tag in (TAG_PUT, TAG_DELETE):
             key = rest[:32]
         if include_data:
             yield tag, key, offset, rest[32:]
         else:
             yield tag, key, offset
         offset += size
         header = fd.read(self.header_fmt.size)
Exemplo n.º 5
0
def crc32(*args):
    """
    .. function:: crc32(args) -> int

    Returns the CRC32 of args. Numbers are converted to text before hashing is
    performed.

    Examples:

    >>> sql("select crc32(65)")
    crc32(65)
    ----------
    2658551721

    >>> sql("select crc32(6,5)")
    crc32(6,5)
    ----------
    1565899724

    >>> sql("select crc32(5)")
    crc32(5)
    ----------
    2226203566

    >>> sql("select crc32('5')")
    crc32('5')
    ----------
    1201448970
    """

    if len(args) == 1:
        return zlib.crc32(repr(args[0])) & 0xFFFFFFFF
    else:
        return zlib.crc32(chr(30).join([repr(x) for x in args])) & 0xFFFFFFFF
Exemplo n.º 6
0
 def read_chunk(self):
     """
     Read a PNG chunk from the input file, return tag name and data.
     """
     # http://www.w3.org/TR/PNG/#5Chunk-layout
     try:
         data_bytes, tag = struct.unpack('!I4s', self.file.read(8))
     except struct.error:
         raise ValueError('Chunk too short for header')
     data = self.file.read(data_bytes)
     if len(data) != data_bytes:
         raise ValueError('Chunk %s too short for required %i data octets'
                          % (tag, data_bytes))
     checksum = self.file.read(4)
     if len(checksum) != 4:
         raise ValueError('Chunk %s too short for checksum', tag)
     verify = zlib.crc32(tag)
     verify = zlib.crc32(data, verify)
     verify = struct.pack('!i', verify)
     if checksum != verify:
         # print repr(checksum)
         (a,) = struct.unpack('!I', checksum)
         (b,) = struct.unpack('!I', verify)
         raise ValueError("Checksum error in %s chunk: 0x%X != 0x%X"
                          % (tag, a, b))
     return tag, data
Exemplo n.º 7
0
 def _read(self, fd, fmt, header, segment, offset, acceptable_tags):
     # some code shared by read() and iter_objects()
     try:
         hdr_tuple = fmt.unpack(header)
     except struct.error as err:
         raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
             segment, offset, err))
     if fmt is self.put_header_fmt:
         crc, size, tag, key = hdr_tuple
     elif fmt is self.header_fmt:
         crc, size, tag = hdr_tuple
         key = None
     else:
         raise TypeError("_read called with unsupported format")
     if size > MAX_OBJECT_SIZE or size < fmt.size:
         raise IntegrityError('Invalid segment entry size [segment {}, offset {}]'.format(
             segment, offset))
     length = size - fmt.size
     data = fd.read(length)
     if len(data) != length:
         raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
             segment, offset, length, len(data)))
     if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
         raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
             segment, offset))
     if tag not in acceptable_tags:
         raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
             segment, offset))
     if key is None and tag in (TAG_PUT, TAG_DELETE):
         key, data = data[:32], data[32:]
     return size, tag, key, data
    def test_service_creation_with_offset(self):
        b = BlockBackend("/dev/null", "test-1")
        blocks = b.create_info_blocks({"test1": 300,
                                       "test2": 512,
                                       "test3": 1024*1024*50},
                                      first_free=1000)

        self.assertEqual(3, len(blocks))

        test1 = struct.pack("!4sQ64pQQQQ",
                            self.SIGNATURE,
                            1001, "test1",
                            1003, 1,
                            0, 0)
        test1crc = struct.pack("!L", zlib.crc32(test1) & 0xffffffff)
        test2 = struct.pack("!4sQ64pQQQQ",
                            self.SIGNATURE,
                            1002, "test2",
                            1004, 1,
                            0, 0)
        test2crc = struct.pack("!L", zlib.crc32(test2) & 0xffffffff)
        test3 = struct.pack("!4sQ64pQQQQ",
                            self.SIGNATURE,
                            0, "test3",
                            1005, 102400,
                            0, 0)
        test3crc = struct.pack("!L", zlib.crc32(test3) & 0xffffffff)

        expected = [
            test1 + test1crc,
            test2 + test2crc,
            test3 + test3crc
        ]

        self.assertEqual(expected, blocks)
    def test_get_services(self):
        raw1 = struct.pack("!4sQ64pQQQQQQ",
                           self.SIGNATURE,
                           1, "test",
                           1, 100,
                           102, 100,
                           0, 0)
        raw1crc = struct.pack("!L", zlib.crc32(raw1) & 0xffffffff)

        raw2 = struct.pack("!4sQ64pQQQQQQ",
                           self.SIGNATURE,
                           0, "test2",
                           2, 200,
                           202, 200,
                           0, 0)
        raw2crc = struct.pack("!L", zlib.crc32(raw2) & 0xffffffff)

        b = BlockBackend("/dev/null", "test-1")
        blockdev = io.BytesIO()
        blockdev.write(raw1)
        blockdev.write(raw1crc)
        blockdev.seek(b.blocksize)
        blockdev.write(raw2)
        blockdev.write(raw2crc)
        blockdev.seek(0)
        expected = ({'test': [(1, 100), (102, 100)],
                    'test2': [(2, 200), (202, 200)]},
                    1,
                    402)
        services = b.get_services(blockdev)
        self.assertEqual(expected, services)
Exemplo n.º 10
0
 def _write_block(self, block):
     #print("Saving %i bytes" % len(block))
     start_offset = self._handle.tell()
     assert len(block) <= 65536
     #Giving a negative window bits means no gzip/zlib headers, -15 used in samtools
     c = zlib.compressobj(self.compresslevel,
                          zlib.DEFLATED,
                          -15,
                          zlib.DEF_MEM_LEVEL,
                          0)
     compressed = c.compress(block) + c.flush()
     del c
     assert len(compressed) < 65536, "TODO - Didn't compress enough, try less data in this block"
     crc = zlib.crc32(block)
     #Should cope with a mix of Python platforms...
     if crc < 0:
         crc = struct.pack("<i", crc)
     else:
         crc = struct.pack("<I", crc)
     bsize = struct.pack("<H", len(compressed)+25)  # includes -1
     crc = struct.pack("<I", zlib.crc32(block) & 0xffffffffL)
     uncompressed_length = struct.pack("<I", len(block))
     #Fixed 16 bytes,
     # gzip magic bytes (4) mod time (4),
     # gzip flag (1), os (1), extra length which is six (2),
     # sub field which is BC (2), sub field length of two (2),
     #Variable data,
     #2 bytes: block length as BC sub field (2)
     #X bytes: the data
     #8 bytes: crc (4), uncompressed data length (4)
     data = _bgzf_header + bsize + compressed + crc + uncompressed_length
     self._handle.write(data)
Exemplo n.º 11
0
def compress(body, compress_level):
    """Compress 'body' at the given compress_level."""
    import zlib

    # See http://www.gzip.org/zlib/rfc-gzip.html
    yield ntob("\x1f\x8b")  # ID1 and ID2: gzip marker
    yield ntob("\x08")  # CM: compression method
    yield ntob("\x00")  # FLG: none set
    # MTIME: 4 bytes
    yield struct.pack("<L", int(time.time()) & int("FFFFFFFF", 16))
    yield ntob("\x02")  # XFL: max compression, slowest algo
    yield ntob("\xff")  # OS: unknown

    crc = zlib.crc32(ntob(""))
    size = 0
    zobj = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
    for line in body:
        size += len(line)
        crc = zlib.crc32(line, crc)
        yield zobj.compress(line)
    yield zobj.flush()

    # CRC32: 4 bytes
    yield struct.pack("<L", crc & int("FFFFFFFF", 16))
    # ISIZE: 4 bytes
    yield struct.pack("<L", size & int("FFFFFFFF", 16))
Exemplo n.º 12
0
 def iter_objects(self, segment, include_data=False):
     fd = self.get_fd(segment)
     fd.seek(0)
     if fd.read(MAGIC_LEN) != MAGIC:
         raise IntegrityError('Invalid segment magic')
     offset = MAGIC_LEN
     header = fd.read(self.header_fmt.size)
     while header:
         try:
             crc, size, tag = self.header_fmt.unpack(header)
         except struct.error as err:
             raise IntegrityError('Invalid segment entry header [offset {}]: {}'.format(offset, err))
         if size > MAX_OBJECT_SIZE:
             raise IntegrityError('Invalid segment entry size [offset {}]'.format(offset))
         length = size - self.header_fmt.size
         rest = fd.read(length)
         if len(rest) != length:
             raise IntegrityError('Segment entry data short read [offset {}]: expected: {}, got {} bytes'.format(
                                  offset, length, len(rest)))
         if crc32(rest, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
             raise IntegrityError('Segment entry checksum mismatch [offset {}]'.format(offset))
         if tag not in (TAG_PUT, TAG_DELETE, TAG_COMMIT):
             raise IntegrityError('Invalid segment entry tag [offset {}]'.format(offset))
         key = None
         if tag in (TAG_PUT, TAG_DELETE):
             key = rest[:32]
         if include_data:
             yield tag, key, offset, rest[32:]
         else:
             yield tag, key, offset
         offset += size
         header = fd.read(self.header_fmt.size)
Exemplo n.º 13
0
 def decrypt(cls, string, secret_key):
     data = super(SecureEncryptedCookie, cls).decrypt(string, secret_key)
     data, crc1 = data[:-4], data[-4:]
     crc2 = zlib.crc32(data, zlib.crc32(secret_key))
     if crc1 != struct.pack('>I', crc2 & 0xffffffff):
         return b''
     return data
Exemplo n.º 14
0
def fileobj_to_generator(fileobj, bufsize=8192, gzipped=False):
    assert hasattr(fileobj, 'read')
    if not gzipped:
        while 1:
            data = fileobj.read(bufsize)
            if not data:
                fileobj.close()
                break
            else:
                yield data
    else:
        compressobj = zlib.compressobj(zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
        crc         = zlib.crc32('')
        size        = 0
        yield '\037\213\010\000' '\0\0\0\0' '\002\377'
        while 1:
            data = fileobj.read(bufsize)
            if not data:
                break
            crc = zlib.crc32(data, crc)
            size += len(data)
            zdata = compressobj.compress(data)
            if zdata:
                yield zdata
        zdata = compressobj.flush()
        if zdata:
            yield zdata
        yield struct.pack('<LL', crc&0xFFFFFFFFL, size&0xFFFFFFFFL)
Exemplo n.º 15
0
    def _read(self, readsize):
        """Bug was here with bad EOF signal"""
        data = self.fileobj.read(readsize)
        is_eof = True
 
        while True:

                
            if data == "":
                decompdata = self.decompobj.flush()
            else:
                decompdata = self.decompobj.decompress(data)
            decomplen = len(decompdata)
            self.buffer.append(decompdata)
            self.bufferlen += decomplen
            self.size += decomplen
            if decomplen:
                is_eof = False
            self.crcval = zlib.crc32(decompdata, self.crcval)
            if self.decompobj.unused_data:
                data = self._read_eof()
                self.decompobj = zlib.decompressobj(-zlib.MAX_WBITS)
                self.crcval = zlib.crc32("")
                self.size = 0
                if data:
                    continue
            break
        return is_eof
Exemplo n.º 16
0
def decompress(input, type=None):
    if type == None: type = guesstype(input)
    if type == 'gzip':
        magic1, magic2, method, flags, mtime, xf, os = unpack('<BBBBIBB', input[:10])
        if magic1 != 0x1F or magic2 != 0x8B: raise IOError('Not a gzipped file')
        if method != 8: raise IOError('Unknown compression method')
        if flags & 0xE0: raise IOError('Unknown flags')
        off = unpack('<H', input[10:12])[0] + 12 if flags & FEXTRA else 10
        if flag & FNAME:    off = input.index('\x00', off) + 1
        if flag & FCOMMENT: off = input.index('\x00', off) + 1
        if flags & FHCRC:
            if unpack('<H', input[off:off+2])[0] != (crc32(input[:off]) & 0xffff): raise IOError('Header corrupted')
            off += 2
        crc32, isize = unpack('<II', input[-8:])
        s = zdecompress(input[off:-8], -MAX_WBITS, isize)
        checksum = crc32(s)
        if crc32 != checksum: raise IOError("CRC32 check failed %08x != %08x" % (crc32, checksum))
        if isize != (len(s) & 0xffffffffL): raise IOError("Incorrect length of data produced")
        return s
    elif type == 'zlib':
        header = unpack('>H', input[:2])[0]
        method = (header >>  8) & 0xF
        windowsize = ((header >> 12) & 0xF) + 8
        fdict  = (header & 0x20) != 0
        if method != 8 or windowsize > MAX_WBITS or fdict: raise IOError('Unknown compression method')
        if header % 31 != 0: raise IOError('Header corrupted')
        s = zdecompress(input[2:-4], -windowsize)
        a32 = unpack('>I', input[-4:])[0]
        checksum = adler32(s)
        if a32 != checksum: raise IOError("Adler32 check failed %08x != %08x" % (a32, checksum))
        return s
    elif type == 'deflate':
        return zdecompress(input)
    else:
        raise ValueError('Compression type must be one of deflate, zlib, gzip, or None')
Exemplo n.º 17
0
 def _write_box(self, name, data, length):
     if length > 0:
         self._writes.write(struct.pack('!L4s%dsl' % length,
             length, name, data,
             zlib.crc32(name+data)))
     else:
         self._writes.write(struct.pack('!L4sl', 0, name, zlib.crc32(name)))
Exemplo n.º 18
0
 def chunk(tag, data):
   return [
       struct.pack("!I", len(data)),
       tag,
       data,
       struct.pack("!I", 0xFFFFFFFF & zlib.crc32(data, zlib.crc32(tag)))
     ]
Exemplo n.º 19
0
def compress(chunks, compress_level, close=True):
    """
    Compress 'chunks' at the given compress_level, where 'chunks' is an iterable
    over chunks of bytes.  If close=True, then look for .close() method on chunks
    and call that when done iterating.
    """
    try:
        # See http://www.gzip.org/zlib/rfc-gzip.html
        yield '\x1f\x8b'       # ID1 and ID2: gzip marker
        yield '\x08'           # CM: compression method
        yield '\x00'           # FLG: none set
        # MTIME: 4 bytes
        yield struct.pack("<L", int(time.time()) & int('FFFFFFFF', 16))
        yield '\x02'           # XFL: max compression, slowest algo
        yield '\xff'           # OS: unknown

        crc = zlib.crc32("")
        size = 0
        zobj = zlib.compressobj(compress_level,
                                zlib.DEFLATED, -zlib.MAX_WBITS,
                                zlib.DEF_MEM_LEVEL, 0)
        for chunk in chunks:
            size += len(chunk)
            crc = zlib.crc32(chunk, crc)
            yield zobj.compress(chunk)
        yield zobj.flush()

        # CRC32: 4 bytes
        yield struct.pack("<L", crc & int('FFFFFFFF', 16))
        # ISIZE: 4 bytes
        yield struct.pack("<L", size & int('FFFFFFFF', 16))
    finally:
        if close and hasattr(chunks, 'close'):
            chunks.close()
Exemplo n.º 20
0
def calcsum(objeto,tipoobjeto,algoritmo):
	valorhash = 0
	
	if algoritmo == 'crc32':
		if tipoobjeto == 't':
			valorhash = zlib.crc32(objeto)
		else:
			fh = open(objeto,'rb') # Abre lectura en modo binario
			for linea in fh:
				valorhash = zlib.crc32(linea, valorhash)
		valorhash = "%X"%(valorhash & 0xFFFFFFFF) #Almacena el valor hash en hexadecimal

	else:
		if algoritmo == 'sha256':
			m = hashlib.sha256()
		elif algoritmo == 'sha1':
			m = hashlib.sha1()
		else:
			m = hashlib.md5()
		if tipoobjeto == 't':
			m.update(objeto)
		else:
			fh = open(objeto, 'rb') #Abre lectura en modo binario
			while True:
				data = fh.read(8192) #Lee el archivo en bloques de 8Kb
				if not data:
					break
				m.update(data)
			fh.close
		valorhash = m.hexdigest()
		
	return valorhash #Devuelve el valor hash en hexadecimal
Exemplo n.º 21
0
Arquivo: pypng.py Projeto: arokem/Fos
 def read_chunk(self):
     """
     Read a PNG chunk from the input file, return tag name and data.
     """
     # http://www.w3.org/TR/PNG/#5Chunk-layout
     try:
         data_bytes, tag = struct.unpack('!I4s', self.file.read(8))
     except struct.error:
         raise ValueError('Chunk too short for header')
     data = self.file.read(data_bytes)
     if len(data) != data_bytes:
         raise ValueError('Chunk %s too short for required %i data octets'
                          % (tag, data_bytes))
     checksum = self.file.read(4)
     if len(checksum) != 4:
         raise ValueError('Chunk %s too short for checksum', tag)
     verify = zlib.crc32(tag)
     verify = zlib.crc32(data, verify)
     # Whether the output from zlib.crc32 is signed or not varies
     # according to hideous implementation details, see
     # http://bugs.python.org/issue1202 .
     # We coerce it to be positive here (in a way which works on
     # Python 2.3 and older).
     verify &= 2**32 - 1
     verify = struct.pack('!I', verify)
     if checksum != verify:
         # print repr(checksum)
         (a,) = struct.unpack('!I', checksum)
         (b,) = struct.unpack('!I', verify)
         raise ValueError("Checksum error in %s chunk: 0x%X != 0x%X"
                          % (tag, a, b))
     return tag, data
Exemplo n.º 22
0
def fix_png(data):
    """
    Fix the signature and checksums on a fuzzed PNG image.
    """
    out = [b"\x89PNG\r\n\x1A\n"]
    data = bytes(data[8:])
    chunk = 0
    while len(data) >= 8:
        chunklen = data[:4]
        out.append(chunklen)
        chunklen = struct.unpack("!I", chunklen)[0]
        if chunk == 0:
            chunkname = b"IHDR" # make sure the first tag is correct
        else:
            chunkname = data[4:8]
            #chunkname = bytes(_coerce_ascii(c) for c in data[4:8])
        out.append(chunkname)
        data = data[8:]
        if len(data) < chunklen:
            break
        else:
            chunkdata = data[:chunklen]
            chunkcrc = zlib.crc32(chunkname) & 0xFFFFFFFF
            chunkcrc = zlib.crc32(chunkdata, chunkcrc) & 0xFFFFFFFF
            out.append(chunkdata)
            out.append(struct.pack("!I", chunkcrc))
            data = data[chunklen+4:] # skip the old crc
        chunk += 1
    out.append(data)
    return b"".join(out)
Exemplo n.º 23
0
def patch(orig_fname, diff_fname, dest_fname, crc_offset):
#    crcoffset = 0x044E80        # Iphone 3Gs - 5.0.1
    crcoffset = 0x0409A9 - 4    # Ipad   1   - 5.0.1
#    crcoffset = 0x042170 - 4    # Galaxy tab - ??

    # read original firmare.
    origdata = ''
    with open(orig_fname, "rb") as f:
        origdata = f.read()
        if pack("<l", crc32(origdata[:crc_offset]))[:4] != origdata[crc_offset:crc_offset+4]:
            raise Exception("checksum mismatch!")
        print "checksum ok!"

    # read list of changes to do.
    
    changes = readDiff(diff_fname)
   
    # apply changes.
    newdata = origdata[:]
    for offset, before, after in changes:
        if origdata[offset] != chr(before):
            raise Exception("data mismatch at %x expecting %x found %x" % (offset, before, origdata[offset]))
        newdata = newdata[:offset] + chr(after) + newdata[offset+1:]
    
    # fix checksum
    newchecksum = pack("<l", crc32(newdata[:crc_offset]))[:4]
    newdata = newdata[:crc_offset] + newchecksum + newdata[crc_offset+4:]
    
    # write new file
    with open(dest_fname, "wb") as f:
        f.write(newdata)
Exemplo n.º 24
0
def compress(body, compress_level):
    """Compress 'body' at the given compress_level."""

    # Header
    yield b"\037\213\010\0" \
        + struct.pack("<L", int(time.time())) \
        + b"\002\377"

    size = 0
    crc = zlib.crc32(b"")

    zobj = zlib.compressobj(
        compress_level,
        zlib.DEFLATED,
        -zlib.MAX_WBITS,
        zlib.DEF_MEM_LEVEL,
        0,
    )

    for chunk in body:
        if not isinstance(chunk, bytes):
            chunk = chunk.encode("utf-8")

        size += len(chunk)
        crc = zlib.crc32(chunk, crc)
        yield zobj.compress(chunk)

    yield zobj.flush() \
        + struct.pack("<l", crc) \
        + struct.pack("<L", size & 0xFFFFFFFF)
Exemplo n.º 25
0
def crc32(data):
    if DEBUG:
        print '++++++ CRC32 ++++++'
        print 'input: ' + data
        print 'crc32: ' + hex(zlib.crc32(data) & 0xffffffff)
        print '+++++++++++++++++++'
    return hex(zlib.crc32(data) & 0xffffffff)  # crc32 returns a signed value, &-ing it will match py3k
Exemplo n.º 26
0
def main(arg):
	with open("C:/Users/user/Documents/Visual Studio 2013/Projects/pCTF2015/forensics/150/test_crc_binary_second_byte.bin", "rb") as fi:
		data = fi.read()
		b = bytearray(data)
		for x in range(0, 256):
			# data[0] = 0x00
			b[len(data) - 1] = x
			d = binascii.a2b_hex(binascii.hexlify(b[4:0x20004]))
			# calculate tag crc
			tag_crc = binascii.crc32(b[0:4])

			item = ctypes.c_ulong(zlib.crc32(d, tag_crc))
			item2 = ctypes.c_ulong(zlib.crc32(d))
			# print "0x%s"%binascii.hexlify(d)
			final_crc = "%x" % item.value
			# if arg in final_crc:
			print "%s" % binascii.hexlify(d[len(d) - 4:len(d)])
			print "final crc is 0x%x" % (item.value)
				# print "final crc without tag is 0x%x"%(item2.value)
			# print "%s"%binascii.hexlify(d[0x1fffc:0x20000])
		# b.append(data)
		# print "0x%s"%binascii.hexlify(b)


		print "data length is 0x%x" % (len(data) - 4)
	pass
Exemplo n.º 27
0
def decode(filName):
    # read file as binary
    filObj = open(filName, 'rb')
    filCon = filObj.read()
    filObj.close()
    
    # get file size to forecast output array
    statinfo = os.stat(filName)
    
    # process file header
    setWidth = struct.unpack('B', filCon[:1])
    setNames = []
    for ii in range(0, setWidth[0]):
        setNames.append(filCon[ii*5+1:ii*5+6])
    print("[CRC] of file header:", end="")
    crcVal = crc32(filCon[0:setWidth[0]*5+1+4]) & 0xffffffff
    crcErrors = 0
    if ( crcVal == 0xffffffff):
        print("\tOK\t["+hex(crcVal)+"]")
    else:
        print("\tERROR\t["+hex(crcVal)+"]")
        crcErrors += 1
    offset = setWidth[0]*5+5
    
    # process data sets
    setCon = np.zeros(statinfo.st_size // 4)
    idx = 0
    fmtStr = ""
    setBytes = 0
    for setName in setNames:
        fmtStr += chr(setName[0])
        setBytes += fmtChars[chr(setName[0])]
    while(offset < len(filCon)):
        setNumber = struct.unpack('B', filCon[offset:offset+1])
        offset += 1
        for ii in range(setNumber[0]):
            setCon[idx:idx+setWidth[0]] = np.array(struct.unpack(fmtStr, filCon[offset:setBytes+offset]))
            offset += setBytes
            idx += setWidth[0]
        crcVal = crc32(filCon[offset-setBytes*setNumber[0]-1:offset+4]) & 0xffffffff
        print("[CRC] of data set:", end="")
        if ( crcVal == 0xffffffff):
            print("\tOK\t["+hex(crcVal)+"]")
        else:
            print("\tERROR\t["+hex(crcVal)+"]")
            crcErrors += 1
        offset += 4
    if (not crcErrors):
        print("[CRC] no errors occurred:\tOK")
    else:
        print("[CRC] {0} errors occurred:\tERROR".format(crcErrors))
    
    # remove not required elements and reshape as matrix
    setCon = np.reshape(setCon[0:idx], (setWidth[0], idx//setWidth[0]), 'f')
    
    # create output dictionary
    output = {}
    for ii in range(setWidth[0]):
        output[setNames[ii][1:].decode("utf-8").strip()] = setCon[ii]
    return output
Exemplo n.º 28
0
def get_crc32(data):
    '''Returns the crc32 value of the input string. '''

    if PY_VER_2:
        return crc32(data)
    strbytes = bytes(data, encoding='UTF-8')
    return crc32(strbytes)
Exemplo n.º 29
0
def patch(orig_fname, diff_fname, dest_fname, crc_offset):
    """TODO"""
    # Read original firmare.
    origdata = ''
    with open(orig_fname, "rb") as f:
        origdata = f.read()
        checksum = pack("<l", crc32(origdata[:crc_offset]))[:4]
        if checksum != origdata[crc_offset:crc_offset+4]:
            raise Exception("Checksum mismatch!")
        print "Checksum ok!"
    
    # Read list of changes to do.
    changes = readDiff(diff_fname)
   
    # Apply changes.
    newdata = origdata[:]
    for offset, before, after in changes:
        if origdata[offset] != chr(before):
            byte = origdata[offset]
            msg = "Data mismatch at %X expecting %X found %X" % (offset,
                                                                 before,
                                                                 byte)
            raise Exception(msg)
        newdata = newdata[:offset] + chr(after) + newdata[offset+1:]
    
    # Fix checksum
    newchecksum = pack("<l", crc32(newdata[:crc_offset]))[:4]
    newdata = newdata[:crc_offset] + newchecksum + newdata[crc_offset+4:]
    
    # Write new file
    with open(dest_fname, "wb") as f:
        f.write(newdata)
Exemplo n.º 30
0
 def pack(self):
     b1 = struct.pack('>BIIIIIH', self.flag, self.window, self.seq, self.ack,
                      self.sndtime, self.acktime, len(self.content))
     crc = zlib.crc32(b1)
     if len(self.content) > 0: crc = zlib.crc32(self.content, crc)
     b2 = struct.pack('>H', crc & 0xffff)
     return b1 + b2 + self.content
Exemplo n.º 31
0
def get_category_info_version():
    return zlib.crc32(get_category_info_string().encode('utf-8')) & 0x7ffffff
Exemplo n.º 32
0
 def delServer(self,ip,port):
     str = ip + port
     index = zlib.crc32(str)
     del self.servers[index]
Exemplo n.º 33
0
ucode_blob_sz = ucode_size & ~0xfff
ucode_jt_sz = ucode_size & 0xfff
ucode_blob = ucode[:ucode_blob_sz]
ucode_jt = ucode[ucode_blob_sz:]

print "have: 0x%x / 0x%x / 0x%x (total, blob, jt)" % (
    ucode_size, ucode_blob_sz, ucode_jt_sz)

if ucode_blob_sz < want_blob_sz:
    ucode_blob += "\x00" * (want_blob_sz - ucode_blob_sz)
elif ucode_blob_sz > want_blob_sz:
    ucode_blob = ucode_blob[:want_blob_sz]

if ucode_jt_sz < want_jt_sz:
    ucode_jt += ucode_jt[-4:] * ((want_jt_sz - ucode_jt_sz) / 4)
elif ucode_jt_sz > want_jt_sz:
    ucode_jt = ucode_jt[:want_jt_sz]

sub_hdr = struct.pack("<III212x", ucode_feature_version, want_blob_sz / 4,
                      want_jt_sz / 4)
payload = sub_hdr + ucode_blob + ucode_jt
assert len(payload) == want_size + 0xe0
crc = zlib.crc32(payload)

hdr = struct.pack("<I16sIII",
                  len(payload) + 0x20, p1, want_size, 0x100, crc & 0xffffffff)

with open(sys.argv[3], "wb") as fd:
    fd.write(hdr)
    fd.write(payload)
Exemplo n.º 34
0
def advisory_lock(lock_id, shared=False, wait=True, using=None):

    import six
    from django.db import DEFAULT_DB_ALIAS, connections, transaction

    if using is None:
        using = DEFAULT_DB_ALIAS

    # Assemble the function name based on the options.

    function_name = 'pg_'

    if not wait:
        function_name += 'try_'

    function_name += 'advisory_lock'

    if shared:
        function_name += '_shared'

    release_function_name = 'pg_advisory_unlock'
    if shared:
        release_function_name += '_shared'

    # Format up the parameters.

    tuple_format = False

    if isinstance(lock_id, (
            list,
            tuple,
    )):
        if len(lock_id) != 2:
            raise ValueError(
                "Tuples and lists as lock IDs must have exactly two entries.")

        if not isinstance(lock_id[0], six.integer_types) or not isinstance(
                lock_id[1], six.integer_types):
            raise ValueError(
                "Both members of a tuple/list lock ID must be integers")

        tuple_format = True
    elif isinstance(lock_id, six.string_types):
        # Generates an id within postgres integer range (-2^31 to 2^31 - 1).
        # crc32 generates an unsigned integer in Py3, we convert it into
        # a signed integer using 2's complement (this is a noop in Py2)
        pos = crc32(lock_id.encode("utf-8"))
        lock_id = (2**31 - 1) & pos
        if pos & 2**31:
            lock_id -= 2**31
    elif not isinstance(lock_id, six.integer_types):
        raise ValueError("Cannot use %s as a lock id" % lock_id)

    if tuple_format:
        base = "SELECT %s(%d, %d)"
        params = (
            lock_id[0],
            lock_id[1],
        )
    else:
        base = "SELECT %s(%d)"
        params = (lock_id, )

    acquire_params = (function_name, ) + params

    command = base % acquire_params
    cursor = connections[using].cursor()

    cursor.execute(command)

    if not wait:
        acquired = cursor.fetchone()[0]
    else:
        acquired = True

    try:
        yield acquired
    finally:
        if acquired:
            release_params = (release_function_name, ) + params

            command = base % release_params
            cursor.execute(command)

        cursor.close()
Exemplo n.º 35
0
            print(aux)
            if (aux):
                col += 1
                print('\nCONTRASEÑAS ROTAS HASTA AHORA = %d' % col)
                cr += aux
            else:
                h = p
                for i in range(pf):
                    if (i % 2 == 0):
                        r = str(h % 1000000)

                    else:
                        r = r2(h)

                    h = crc32(bytes(r, 'ascii'))
                    if ((h in hashes) and (h not in cr)):
                        print('\nContraseña rota al %d intento en el bucle 1' %
                              (i + 1))
                        col += 1
                        aux = [v for v in hashes if v == h]
                        cr += aux
                        print('\nCONTRASEÑAS ROTAS HASTA AHORA = %d' % col)
                        break
                if (not aux):
                    h = p
                    for i in range(pf):
                        if (i % 2 == 0):
                            r = r2(h)

                        else:
Exemplo n.º 36
0
def simple_hash(s):
    # & 0xffffffff avoids python2/python3 compatibility
    return zlib.crc32(s.encode()) & 0xffffffff
Exemplo n.º 37
0
 def process(self, rec_data, result):
     result[self.mName] = crc32(bytes(result[self.mBaseName],
                                      'utf-8')) % (1 << 32)
Exemplo n.º 38
0
import zlib


# TODO(davedash): liberate this
def manual_order(qs, pks, pk_name='id'):
    """
    Given a query set and a list of primary keys, return a set of objects from
    the query set in that exact order.
    """

    if not pks:
        return qs.none()

    objects = qs.filter(id__in=pks).extra(select={
        '_manual':
        'FIELD(%s, %s)' % (pk_name, ','.join(map(str, pks)))
    },
                                          order_by=['_manual'])

    return objects


crc32 = lambda x: zlib.crc32(x) & 0xffffffff
Exemplo n.º 39
0
 def file_checksum(self, file_path):
     """
     Generate checksum from file.
     """
     with open(file_path, "rb") as in_file:
         return zlib.crc32(in_file.read())
Exemplo n.º 40
0
 def _get_server(self, key):
     # Naive, low-performance implementation
     assert self._ips, "_find_server should not have been called with self._ips == []"
     return self._ips[zlib.crc32(key) % len(self._ips)]
Exemplo n.º 41
0
 def png_pack(png_tag, data):
     chunk_head = png_tag + data
     return struct.pack("!I", len(data)) + chunk_head + struct.pack(
         "!I", 0xFFFFFFFF & zlib.crc32(chunk_head))
Exemplo n.º 42
0
def _ranged_scan_and_read(infile, start, end, buffer=collections.deque()):
    assert start < end, "must span at least one byte!"

    infile.seek(start, os.SEEK_SET)

    # read the bgzip header
    # see https://samtools.github.io/hts-specs/SAMv1.pdf
    pattern = "<BBBBIBBHBBHH"
    patternsize = struct.calcsize(pattern)
    got_content = False
    block_start = start - len(buffer)
    while block_start < end:
        block_start_next = block_start

        if len(buffer) < patternsize:
            bytesread = infile.read(patternsize - len(buffer))
            buffer.extend(bytesread)
        if len(buffer) < patternsize:
            logger.warning(f"Unable to read up to {patternsize} at {block_start}")
            break

        headerbuffer = bytes((buffer[i] for i in range(patternsize)))

        header = struct.unpack(pattern, headerbuffer)
        logger.debug(
            f"Header at {block_start} is {header} or in raw bytes {headerbuffer}"
        )

        if check_bytes_header(header):
            logger.debug(f"Matched header at {block_start} {header}")
            # this is a valid location for a block
            blocksize = header[11] - header[7] - 19
            logger.debug(f"Block at {block_start} has cdata size = {blocksize}")
            cdata = infile.read(blocksize)
            assert (
                len(cdata) == blocksize
            ), f"Unable to read up to {blocksize} of cdata at {block_start}"
            buffer.clear()

            # now do the actual decompression
            decompressor = zlib.decompressobj(
                wbits=-15
            )  # we've alread read the header, so ignore it
            decompressed = decompressor.decompress(cdata)
            assert (
                not decompressor.unconsumed_tail
            ), f"unconsumed tail of {len(decompressor.unconsumed_tail)} at {block_start}"
            assert (
                not decompressor.unused_data
            ), f"unused data present of {len(decompressor.unused_data)} at {block_start}"

            # read isize and crc check
            tailpattern = "<II"
            tailpatternsize = struct.calcsize(tailpattern)
            tailbytes = infile.read(tailpatternsize)
            if len(tailbytes) != tailpatternsize:
                raise ValueError(
                    f"Unable to read {tailpatternsize} bytes for tail at {block_start}"
                )
            tail_crc, tail_isize = struct.unpack(tailpattern, tailbytes)
            # check decompressed size is expected
            assert len(decompressed) == tail_isize
            # check crc check is expected
            assert zlib.crc32(decompressed) == tail_crc

            # last block has no compressed content
            if decompressed:
                got_content = True
                yield decompressed
            else:
                logger.warning(f"Found empty block at {block_start}")
                break

            logger.debug(f"Read block from {block_start} to {infile.tell()}")

            block_start_next += patternsize + blocksize + tailpatternsize
        else:
            # move ahead a byte
            buffer.popleft()
            block_start_next += 1

        block_start = block_start_next

    logger.debug(f"End of scan from {start} to {end} at {block_start}-{infile.tell()}")

    if not got_content:
        logger.warning(f"Got to end of {start}-{end} without finding a block")
Exemplo n.º 43
0
def query_performance_express_report(code, start_date=None, end_date=None):
    """公司业绩快报。
    @param code: 证券代码,不可为空
    @param start_date: 开始日期,默认2015-01-01;发布日期或更新日期在这个范围内。
    @param end_date: 结束日期,默认系统当前日期;发布日期或更新日期在这个范围内。
    """

    data = rs.ResultData()
    if code is None or code == "":
        print("股票代码不能为空,请检查。")
        data.error_msg = "股票代码不能为空,请检查。"
        data.error_code = cons.BSERR_PARAM_ERR
        return data
    if len(code) != cons.STOCK_CODE_LENGTH:
        print("股票代码应为" + str(cons.STOCK_CODE_LENGTH) + "位,请检查。格式示例:sh.600000。")
        data.error_msg = "股票代码应为" + str(
            cons.STOCK_CODE_LENGTH) + "位,请检查。格式示例:sh.600000。"
        data.error_code = cons.BSERR_PARAM_ERR
        return data
    code = code.lower()
    if (code.endswith("sh") or code.endswith("sz")):
        code = code[7:9].lower() + "." + code[0:6]

    if start_date is None or start_date == "":
        start_date = cons.DEFAULT_START_DATE
    if end_date is None or end_date == "":
        end_date = time.strftime("%Y-%m-%d", time.localtime())

    if start_date != "" and start_date is not None and end_date != "" and end_date is not None:
        if strUtil.is_valid_date(start_date) and strUtil.is_valid_date(
                end_date):
            start_date_time = datetime.datetime.strptime(
                start_date, '%Y-%m-%d')
            end_date_time = datetime.datetime.strptime(end_date, '%Y-%m-%d')
            if end_date_time < start_date_time:
                print("起始日期大于终止日期,请修改。")
                data.error_code = cons.BSERR_START_BIGTHAN_END
                data.error_msg = "起始日期大于终止日期,请修改。"
                return data
        else:
            print("日期格式不正确,请修改。")
            return

    user_id = ""
    try:
        user_id = getattr(conx, "user_id")
    except Exception:
        print("you don't login.")
        data.error_code = cons.BSERR_NO_LOGIN
        data.error_msg = "you don't login."
        return data

    param = "%s,%s,%s,%s,%s,%s,%s" % (
        "query_performance_express_report", user_id, "1",
        cons.BAOSTOCK_PER_PAGE_COUNT, code, start_date, end_date)

    msg_body = strUtil.organize_msg_body(param)
    msg_header = msgheader.to_message_header(
        cons.MESSAGE_TYPE_QUERYPERFORMANCEEXPRESSREPORT_REQUEST, len(msg_body))

    data.msg_type = cons.MESSAGE_TYPE_QUERYPERFORMANCEEXPRESSREPORT_REQUEST
    data.msg_body = msg_body

    head_body = msg_header + msg_body
    crc32str = zlib.crc32(bytes(head_body, encoding='utf-8'))
    receive_data = sock.send_msg(head_body + cons.MESSAGE_SPLIT +
                                 str(crc32str))

    if receive_data is None or receive_data.strip() == "":
        data.error_code = cons.BSERR_RECVSOCK_FAIL
        data.error_msg = "网络接收错误。"
        return data

    msg_header = receive_data[0:cons.MESSAGE_HEADER_LENGTH]
    msg_body = receive_data[cons.MESSAGE_HEADER_LENGTH:-1]
    header_arr = msg_header.split(cons.MESSAGE_SPLIT)
    body_arr = msg_body.split(cons.MESSAGE_SPLIT)
    data.msg_body_length = header_arr[2]
    data.error_code = body_arr[0]
    data.error_msg = body_arr[1]

    if cons.BSERR_SUCCESS == data.error_code:
        data.method = body_arr[2]
        data.user_id = body_arr[3]
        data.cur_page_num = body_arr[4]
        data.per_page_count = body_arr[5]
        data.setData(body_arr[6])
        data.code = body_arr[7]
        data.start_date = body_arr[8]
        data.end_date = body_arr[9]
        data.setFields(body_arr[10])

    return data
Exemplo n.º 44
0
 def CreateHashValue(self, hashString):
     return zlib.crc32(hashString.encode())  # % (1<<32)
Exemplo n.º 45
0
 def prepared(self):
     finished = base64.b64decode(self.stitched())
     prev = 0
     prev = zlib.crc32(finished, prev)
     text_crc = "%x"%(prev & 0xFFFFFFFF)
     return finished, bool(self.crc == text_crc)
Exemplo n.º 46
0
def getCRC32(fileName):
    prev = 0
    for eachLine in open(fileName,"rb"):
        prev = zlib.crc32(eachLine, prev)
    return "%X"%(prev & 0xFFFFFFFF)
Exemplo n.º 47
0
 def _init_read(self):
     self._crc = zlib.crc32('')
     self._stream_size = 0
def main():
    def file_rel(fullpath: str) -> str:
        ''' Returns relative path '''
        return os.path.relpath(fullpath, FOLDER)

    # Get path from command-line or clipboard:
    try:
        FOLDER = argv[1]
    except IndexError:
        FOLDER = paste()
    except:
        raise

    # Check if folder looks like path:
    if FOLDER.find(':\\') != 1:
        input(f'Wrong folder:\r\n\r\n{FOLDER}\n\nPress Enter to exit')
        exit()

    print(f'Searching for duplicates in {FOLDER}\n')

    alg = int(
        input('Algorythm:\n' + '0 - Cancel\n' + '1 - By size\n' +
              f'2 - By hash of first {HASH_SIZE_PERCENT} percent of file\n' +
              '3 - By hash of full file\n\n' + 'Choice: '))

    if alg == 0: return

    dups = []
    dups_final = []
    files = {}
    hashes = []

    pathlist = Path(FOLDER).rglob('*')

    widgets = ['Processed: ', Counter(), ' files (', Timer(), ')']
    pbar = ProgressBar(widgets=widgets)
    for fi in pbar(pathlist):
        # Skip folders:
        if fi.is_file():
            filesize = fi.stat().st_size
            if filesize in files:
                # print(file_rel(str(fi)))
                # Collect all files with known size:
                dups.append(str(fi))
            else:
                # Collect unique file size and file:
                files.update({filesize: str(fi)})

    # Now we have list and dictionary:
    # {files} - files with unique size
    # [dups] - files with duplicate sizes
    print('\nDuplicates by size:')
    print(*list(map(file_rel, dups)), sep='\n')
    print(f'Total: {len(dups)}')

    if alg == 1:
        # We already have duplicates
        dups_final = dups
    elif alg == 2:
        print('Now compare hashes')
        # First HASH_SIZE_PERCENT of files
        widgets = ['Processed: ', Counter(), ' files (', Timer(), ')']
        pbar = ProgressBar(widgets=widgets)
        for fi in pbar(dups):
            file_size = os.stat(fi).st_size
            read_limit = int(file_size / 100 * HASH_SIZE_PERCENT)
            with open(fi, 'rb') as f:
                hash_dup = crc32(f.read(read_limit)) & 0xFFFFFFFF
            if hash_dup in hashes:
                dups_final.append(fi)
            else:
                # Calculate hash of file with same size from {files}
                with open(files[file_size], 'rb') as f:
                    hash_prev = crc32(f.read(read_limit)) & 0xFFFFFFFF
                hashes.append(hash_prev)
                if hash_dup == hash_prev:
                    # It's the same file:
                    dups_final.append(fi)
                else:
                    # Same size but different file:
                    hashes.append(hash_dup)
    elif alg == 3:
        # By hash of full file:
        widgets = ['Processed: ', Counter(), ' files (', Timer(), ')']
        pbar = ProgressBar(widgets=widgets)
        for fi in pbar(dups):
            file_size = os.stat(fi).st_size
            with open(fi, 'rb') as f:
                hash_dup = crc32(f.read()) & 0xFFFFFFFF
            if hash_dup in hashes:
                dups_final.append(fi)
            else:
                # Calculate hash of file with same size from {files}
                with open(files[file_size], 'rb') as f:
                    hash_prev = crc32(f.read()) & 0xFFFFFFFF
                hashes.append(hash_prev)
                if hash_dup == hash_prev:
                    # It's the same file:
                    dups_final.append(fi)
                else:
                    # Same size but different file:
                    hashes.append(hash_dup)
    else:
        print('WTF?')

    # Final list of duplicates:
    print('\nFinal list of duplicates:')
    print(*list(map(file_rel, dups_final)), sep='\n')
    print(f'Final total: {len(dups_final)}')
    MessageBox(None, 'Search for duplicates is complete', MSG_TITLE, 64 + 4096)

    if len(dups_final) > 0:
        if input('\nDelete duplicates? (Y/n): ') == 'Y':
            for dup in dups_final:
                try:
                    os.remove(dup)
                except:
                    print(f'Can\'t delete file: {dup}')
                    raise
        else:
            exit()
Exemplo n.º 49
0
from zlib import crc32
import numpy as np

if __name__ == '__main__':
    mail = input('enter phystech e-mail\n')
    to_hash = mail.strip().split('@')[0]
    h = crc32(to_hash.encode('utf-8')) + 127
    seed = h % (2**32 - 1)
    rs = np.random.RandomState(seed)
    task1 = rs.randint(low=1, high=5)
    task2 = rs.randint(low=1, high=5)
    print('Your tasks are 1.{}, 2.{}'.format(task1, task2))
Exemplo n.º 50
0
 def _add_read_data(self, data):
     self._crc = zlib.crc32(data, self._crc)
     self._stream_size = self._stream_size + len(data)
Exemplo n.º 51
0
#!/usr/bin/env python
# Demo program for zlib; it compresses or decompresses files, but *doesn't*
# delete the original.  This doesn't support all of gzip's options.
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16

def write32(output, value):
    output.write(chr(value & 255))
    value = value / 256
    output.write(chr(value & 255))
    value = value / 256
    output.write(chr(value & 255))
    value = value / 256
    output.write(chr(value & 255))

def read32(input):
    v = ord(input.read(1))
    v = v + (ord(input.read(1)) << 8)
    v = v + (ord(input.read(1)) << 16)
    v = v + (ord(input.read(1)) << 24)
    return v

import zlib, sys
if len(sys.argv) != 2:
    print 'Usage: minigzip.py <filename>'
    print '  The file will be compressed or decompressed.'
    sys.exit(0)
filename = sys.argv[1]
compressing = 1
outputname = filename + '.gz'
if filename[-3:] == '.gz':
Exemplo n.º 52
0
# Not meaningful but does what is expected.
# Date compression.
import zlib
# gzip lzma bz2 tarfile
s = b'witch which has which witches wrist watch'

len(s)
# Now compress

compressed = zlib.compress(s)
len(compressed)

zlib.decompress(compressed)

# Perform a cyclic redundancy check. crc32
zlib.crc32(s)

# Compute speed

from timeit import Timer

Timer('t=a; a=b; b=t', 'a=1; b=2').timeit()

# use pstats to find time critical sections.

# Quality Control.
# Write tests, test them with doctest


def avg(*values):
    '''Returns the mean of a list of values
Exemplo n.º 53
0
    def _monitor_recv(self):
        clk = RisingEdge(self.clock)
        self._pkt = ""

        while True:
            yield clk
            ctrl, bytes = self._get_bytes()

            if ctrl[0] and bytes[0] == _XGMII_START:

                ctrl, bytes = ctrl[1:], bytes[1:]

                while self._add_payload(ctrl, bytes):
                    yield clk
                    ctrl, bytes = self._get_bytes()

            elif self.bytes == 8:
                if ctrl[4] and bytes[4] == _XGMII_START:

                    ctrl, bytes = ctrl[5:], bytes[5:]

                    while self._add_payload(ctrl, bytes):
                        yield clk
                        ctrl, bytes = self._get_bytes()

            if self._pkt:

                self.log.debug("Received:\n%s" % (hexdump(self._pkt)))

                if len(self._pkt) < 64 + 7:
                    self.log.error("Received a runt frame!")
                if len(self._pkt) < 12:
                    self.log.error("No data to extract")
                    self._pkt = ""
                    continue

                preamble_sfd = self._pkt[0:7]
                crc32 = self._pkt[-4:]
                payload = self._pkt[7:-4]

                if preamble_sfd != _PREAMBLE_SFD:
                    self.log.error("Got a frame with unknown preamble/SFD")
                    self.log.error(hexdump(preamble_sfd))
                    self._pkt = ""
                    continue

                expected_crc = struct.pack("<I",
                                           (zlib.crc32(payload) & 0xFFFFFFFF))

                if crc32 != expected_crc:
                    self.log.error("Incorrect CRC on received packet")
                    self.log.info("Expected: %s" % (hexdump(expected_crc)))
                    self.log.info("Received: %s" % (hexdump(crc32)))

                # Use scapy to decode the packet
                if _have_scapy:
                    p = Ether(payload)
                    self.log.debug("Received decoded packet:\n%s" % p.show2())
                else:
                    p = payload

                self._recv(p)
                self._pkt = ""
Exemplo n.º 54
0
def test_set_check(identifier, test_ratio):
    return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
Exemplo n.º 55
0
def read_png2(stream):
    if not stream.read(8) == b'\x89PNG\r\n\x1a\n':
        stream.seek(0, 0)  # 重置数据流
        return

    # 读取需要的数据
    _width = _height = _depth = _color_type = _interlace = _pixel_bytes = \
        _palette = _background = _extra_alpha = None
    _data = []

    while True:
        try:
            _length, _mask = struct.unpack('!I4s', stream.read(8))
            _chunk = stream.read(_length)
            _crc32 = struct.unpack('!I', stream.read(4))[0]

            if zlib.crc32(_mask + _chunk) != _crc32:
                break

            if _mask == _HEADER:
                (_width, _height, _depth, _color_type, _compress_method,
                 _filter_method, _interlace) = struct.unpack('!2I5B', _chunk)

                _planes = (1, -1, 3, 1, 2, -1, 4)[_color_type]
                _pixel_bytes = (_depth * _planes + 7) // 8

            elif _mask == _PALETTE:
                _palette = tuple(_chunk[_i:_i + 3]
                                 for _i in range(0, _length, 3))

            elif _mask == _DATA:
                _data.append(_chunk)

            elif _mask == _END:
                break

            elif _mask == b'tRNS':  # 透明信息
                if _color_type == _COLOR_TYPE_GRAY:
                    _extra_alpha = _chunk[1]  # 只取一半
                elif _color_type == _COLOR_TYPE_RGB:  # 只取一半
                    _extra_alpha = _chunk[1], _chunk[3], _chunk[5]
                if _color_type == _COLOR_TYPE_PALETTE:
                    # 这时候是一个 alpha table
                    _length = len(_chunk)
                    _extra_alpha = tuple(_chunk[_i] if _i < _length else 255
                                         for _i in range(256))

            elif _mask == b'bKGD':
                if _color_type in (_COLOR_TYPE_GRAY, _COLOR_TYPE_GRAY_ALPHA):
                    _background = _chunk[1]
                elif _color_type in (_COLOR_TYPE_RGB, _COLOR_TYPE_RGB_ALPHA):
                    _background = _chunk[1], _chunk[3], _chunk[5]
                elif _color_type == _COLOR_TYPE_PALETTE:
                    _background = ord(_chunk)  # 指向 _palette 不过这里写做代替

        except struct.error:
            break

    # LZ77 解压
    _decompress_obj = zlib.decompressobj()
    _unzip_data = itertools.chain(
        *(_decompress_obj.decompress(_chunk) for _chunk in _data),
        _decompress_obj.flush())

    # 按行分割,_adam 表示 pass 提取算法得到的缩小图的每行像素个数
    _adam = (_adam7iter if _interlace else _adam1iter)(_width, _height)
    _reduced_images = tuple([] for _ in range(7 if _interlace else 1))  # 容器
    _current_image = 0  # 不要这个变量也能正常运行

    while True:
        try:
            _line_bytes = next(_adam)
            if _line_bytes > 0:  # 大于 0 表示读取长度
                _reduced_images[_current_image].append(
                    bytearray(
                        # 这是一个从迭代器中依次读取数量个数的方法
                        # 下面的意思是从 _unzip_data 里读取一定数量的字节
                        # 这个字节长度是缩小图每行的字节长度 +1,多出来的是滤波标记
                        _read_iter(_unzip_data,
                                   _line_bytes * _pixel_bytes + 1)))
            else:  # 小于等于 0 表示切换图片,注意缩小图是 7 张
                _current_image = abs(_line_bytes)

        except StopIteration:
            break

    # 滤波重构
    _un_filter_images = (  # 把多个图片的打包在一起
        # 把一个图片的行打包在一起
        tuple(_un_filter_image_lines(_image_lines, _pixel_bytes))
        for _image_lines in _reduced_images)

    # 数据回填
    if _interlace:
        _result = _un_interlace(_un_filter_images, _width, _height,
                                _pixel_bytes)
    else:
        _result = itertools.chain(*next(_un_filter_images))  # 这时只有一个图片

    if _depth == 16:  # 要记得放缩深度哦
        _result = (_j for _i, _j in enumerate(_result) if (_i % 2))

    if _color_type == _COLOR_TYPE_GRAY:
        if _extra_alpha:  # 如果有额外的 alpha 通道
            if _background:  # 把额外通道合并到灰度通道上,这比较简单
                _mode = 'gray'
                _result = (_background +
                           (i - _background) * _extra_alpha // 255
                           for i in _result)

            else:
                _mode = 'full'  # 为了把透明度全部体现
                _result = _t_gray_to_rgb_alpha(_result, _extra_alpha)

        else:  # 不做任何处理
            _mode = 'gray'

    elif _color_type == _COLOR_TYPE_GRAY_ALPHA:
        # 因为已经有了 alpha 通道,所以不关心 _extra_alpha
        _mode = 'full'
        _result = _t_gray_alpha_to_rgb_alpha(_result, _background or 0)

    elif _color_type == _COLOR_TYPE_PALETTE:
        if _extra_alpha:
            if _background:
                _mode = 'rgb'
                _result = _t_palette_to_rgb(_result, _palette, _extra_alpha,
                                            _background)
            else:
                _mode = 'full'
                _result = _t_palette_to_rgb_alpha(_result, _palette,
                                                  _extra_alpha)

        else:
            _mode = 'rgb'
            _result = _t_palette_to_rgb2(_result, _palette)

    elif _color_type == _COLOR_TYPE_RGB:
        if _extra_alpha:
            if _background:
                _mode = 'rgb'
                _result = _t_rgb_to_rgb(_result, _extra_alpha, _background)

            else:
                _mode = 'full'
                _result = _t_rgb_to_rgb_alpha(_result, _extra_alpha)

        else:  # 不做任何处理
            _mode = 'rgb'

    elif _color_type == _COLOR_TYPE_RGB_ALPHA:
        if _background:
            _mode = 'rgb'
            _result = _t_rgb_alpha_to_rgb(_result, _background)
        else:
            _mode = 'full'

    else:
        raise

    return _width, _height, _mode, bytes(_result)
Exemplo n.º 56
0
# 报头信息。
head = {
    'filepath': r'D:\PythonProject\IPv6_file_trans',
    'filename': r'file.zip',
    'filesize': None,
    'CRC32': None
}
file_path = os.path.join(head['filepath'], head['filename'])

# 计算文件的大小
file_size = os.path.getsize(os.path.join(head['filepath'], head['filename']))
head['filesize'] = file_size

# 计算CRC32值并且和文件头里面的做比较。
with open(file_path, 'rb') as f:
    head['CRC32'] = zlib.crc32(f.read())

json_head = json.dumps(head)  # 利用json将字典转成字符串
bytes_head = json_head.encode('utf-8')  # 字符串转bytes

# 计算head长度
head_len = len(bytes_head)  # 报头的长度
# 利用struct将int类型的数据打包成4个字节的byte,所以服务器端接受这个长度的时候可以固定缓冲区大小为4
pack_len = struct.pack('i', head_len)
# 先将报头长度发出去
sender.send(pack_len)
# 再发送bytes类型的报头
sender.send(bytes_head)

listener.listen()
conn, addr = listener.accept()
Exemplo n.º 57
0
    def get_crc32(file_bytes):
        """Returns the CRC32."""

        return hex(zlib.crc32(file_bytes))
Exemplo n.º 58
0
def _crc32(data, seed=0):
    return zlib.crc32(data, seed) & 0xffffffff
Exemplo n.º 59
0
def parse_folder(target_folder, output_file):
    """
    read each file and produce a hash value.
    """
    # list folders and files to exclude
    banned_folders = ("/AUTO/", "/CPAK/", "/Documentation/", "/ED64/",
                      "/EDFC/", "/EDGB/", "/EDMD/",
                      "/Extended SSF Dev Demo Sample - Krikzz/src/",
                      "/Firmware Backup/", "/GBASYS/", "/Images/", "/MEGA/",
                      "/Manuals/", "/PALETTE/", "/PATTERN/", "/SAVE/",
                      "/SNAP/", "/SOUNDS/", "/SPED/", "/SYSTEM/",
                      "/System Test Images/", "/System Volume Information/",
                      "/TBED/", "/TEXT/", "/_PREVIEW/", "/menu/",
                      "/ntm_firmware_ver", "/sd2snes Themes/", "/sd2snes/")
    banned_suffixes = (".001", ".002", ".003", ".004", ".005", ".006", ".007",
                       ".008", ".009", ".aps", ".asm", ".bak", ".bat", ".bsa",
                       ".bps", ".BPS", ".bst", ".c", ".cht", ".dat", ".db",
                       ".docx", ".exe", ".ips", ".jpg", ".json", ".mso",
                       ".ods", ".odt", ".pc", ".pdf", ".png", ".sav", ".srm",
                       ".sto", ".txt", ".tmp", ".xdelta", ".xls", "OS.PCE",
                       "Thumbs.db", "menu.bin", "desktop.ini", ".DS_Store")
    with open(output_file, "w") as output_file:
        i = 0
        # make sure subfolders are alphanumerically sorted
        sorted_files = sorted(os.walk(target_folder))
        for dirpath, dirnames, filenames in sorted_files:
            if filenames:
                # make sure files are alphanumerically sorted
                filenames.sort()
                for f in filenames:
                    filename = os.path.join(os.path.normpath(dirpath), f)
                    absolute_filename = os.path.abspath(filename)
                    os.path.isfile(absolute_filename)
                    # convert to Unix format by default
                    filename = filename.replace("\\", "/")
                    # Report filenames with non-ASCII characters
                    try:
                        filename.encode('ascii')
                    except UnicodeEncodeError:
                        print("Error (non-ASCII character):",
                              filename,
                              file=sys.stdout)
                        time.sleep(10)  # alternatively: sys.exit(1)
                    sha256 = hashlib.sha256()
                    sha1 = hashlib.sha1()
                    md5 = hashlib.md5()
                    crc = 0
                    # exclude certain folders and files
                    if not (any(s in filename for s in banned_folders)
                            or filename.endswith(banned_suffixes)):
                        try:
                            with open(absolute_filename, "rb",
                                      buffering=0) as f:
                                # use a small buffer to compute hash
                                # values to avoid storing large files
                                # in memory (changing buffer size does
                                # not change parsing speed much)
                                for b in iter(lambda: f.read(128 * 1024), b''):
                                    sha256.update(b)
                                    sha1.update(b)
                                    md5.update(b)
                                    crc = zlib.crc32(b, crc)
                        except FileNotFoundError:
                            # Windows default API is limited to paths of
                            # 260 characters
                            absolute_filename = u'\\\\?\\' + absolute_filename
                            with open(absolute_filename, "rb",
                                      buffering=0) as f:
                                for b in iter(lambda: f.read(128 * 1024), b''):
                                    sha256.update(b)
                                    sha1.update(b)
                                    md5.update(b)
                                    crc = zlib.crc32(b, crc)
                        print(sha256.hexdigest(),
                              filename,
                              sha1.hexdigest(),
                              md5.hexdigest(),
                              '{0:08x}'.format(crc & 0xffffffff),
                              sep="\t",
                              file=output_file)
                        i += 1
                        print_progress(i, END_LINE)
        else:
            if not args.new_line:
                print_progress(i, "\n")

    return None
Exemplo n.º 60
0
def read_png(stream):
    if not stream.read(8) == b'\x89PNG\r\n\x1a\n':
        stream.seek(0, 0)  # 重置数据流
        return

    # 读取需要的数据
    _width = _height = _depth = _color_type = _interlace = _pixel_bytes = \
        _palette = None
    _data = []

    while True:
        try:
            _length, _mask = struct.unpack('!I4s', stream.read(8))
            _chunk = stream.read(_length)
            _crc32 = struct.unpack('!I', stream.read(4))[0]

            if zlib.crc32(_mask + _chunk) != _crc32:
                break

            if _mask == _HEADER:
                (_width, _height, _depth, _color_type, _compress_method,
                 _filter_method, _interlace) = struct.unpack('!2I5B', _chunk)

                _planes = (1, -1, 3, 1, 2, -1, 4)[_color_type]
                _pixel_bytes = (_depth * _planes + 7) // 8

            elif _mask == _PALETTE:
                _palette = tuple(_chunk[_i:_i + 3]
                                 for _i in range(0, _length, 3))

            elif _mask == _DATA:
                _data.append(_chunk)

            elif _mask == _END:
                break

        except struct.error:
            break

    # LZ77 解压
    _decompress_obj = zlib.decompressobj()
    _unzip_data = itertools.chain(
        *(_decompress_obj.decompress(_chunk) for _chunk in _data),
        _decompress_obj.flush())

    # 按行分割,_adam 表示 pass 提取算法得到的缩小图的每行像素个数
    _adam = (_adam7iter if _interlace else _adam1iter)(_width, _height)
    _reduced_images = tuple([] for _ in range(7 if _interlace else 1))  # 容器
    _current_image = 0  # 不要这个变量也能正常运行

    while True:
        try:
            _line_bytes = next(_adam)
            if _line_bytes > 0:  # 大于 0 表示读取长度
                _reduced_images[_current_image].append(
                    bytearray(
                        # 这是一个从迭代器中依次读取数量个数的方法
                        # 下面的意思是从 _unzip_data 里读取一定数量的字节
                        # 这个字节长度是缩小图每行的字节长度 +1,多出来的是滤波标记
                        _read_iter(_unzip_data,
                                   _line_bytes * _pixel_bytes + 1)))
            else:  # 小于等于 0 表示切换图片,注意缩小图是 7 张
                _current_image = abs(_line_bytes)

        except StopIteration:
            break

    # 滤波重构
    _un_filter_images = (  # 把多个图片的打包在一起
        # 把一个图片的行打包在一起
        tuple(_un_filter_image_lines(_image_lines, _pixel_bytes))
        for _image_lines in _reduced_images)

    # 数据回填
    if _interlace:
        _result = _un_interlace(_un_filter_images, _width, _height,
                                _pixel_bytes)
    else:
        _result = itertools.chain(*next(_un_filter_images))  # 这时只有一个图片

    if _depth == 16:  # 要记得放缩深度哦
        _result = (_j for _i, _j in enumerate(_result) if (_i % 2))

    elif _palette:
        _result = itertools.chain(*(_palette[_i] for _i in _result))

    return _width, _height, _color_type, bytes(_result)