Beispiel #1
0
 def _receive(self):
     while True:
         data = self._pipe.buffer
         pos = data.find(b"=")
         if pos >= 0:
             data = data[:pos]
         data = data.replace(b"\r", b"").replace(b"\n", b"")
         data = data.translate(self.TABLE)
         # TODO: check data size overflow
         self._crc = crc32(data, self._crc)
         self._file.write(data)
         if pos >= 0:  # Escape character (equals sign)
             self._pipe.buffer = self._pipe.buffer[pos + 1:]
             while True:
                 byte = yield from self._pipe.read_one()
                 if byte not in b"\r\n":
                     break
             # TODO: check for size overflow
             [byte] = byte
             data = bytes(((byte - 64 - 42) & bitmask(8),))
             self._crc = crc32(data, self._crc)
             self._file.write(data)
         else:
             try:
                 self._pipe.buffer = yield
             except EOFError:
                 break
Beispiel #2
0
def readReg(reg):
        global crcerrorcount,crcerrorcountout
        tries = 5
        okay = 0
        while (okay != 1 and tries > 0):
                tries = tries -1
                send = [0x02,reg,0,0,0,0]
                crcout = (binascii.crc32("".join( chr( val ) for val in send[0:6])) >> 16) & 0xFFFF
                resp = SPI.xfer(send+[(crcout>>8)&0xFF,crcout&0xFF,0,0,0,0,0,0,0,0,0])
                val = (resp[10] << 24) + (resp[11] << 16) + (resp[12] << 8) + (resp[13])
                crcin = (resp[14] << 8) + resp[15]
                vals = "".join( chr( val ) for val in resp[0:14] )
                crcis =  (binascii.crc32(vals) >> 16) & 0xFFFF
                okay = 1
                if (crcin != crcis):
                        crcerrorcount = crcerrorcount +1
                        okay = 0
                        #print("CRC ERROR ON RECV")
                        #print("crcrep "+format(crcin,'02x')+" vs "+format(crcis,'02x'))
                if (resp[16] != 0xFF):
                        crcerrorcountout = crcerrorcountout +1
                        okay = 0
                        #print("CRC SEND ERROR: "+format(resp[16],'02x'))
                #print("reg "+str(reg)+" "+str(val))
        if (okay == 0):
                print("ERROR READING")

        return val
Beispiel #3
0
 def process_request(self, request):
     if not request.META.has_key(self.header):
         raise ImproperlyConfigured('Requires SSL on frontend web server'
                     ' to be enabled and properly configured'
                     ' to pass SSL_* headers.')
     if request.META.get(self.header, None) == 'SUCCESS':
         (pem, serial, dn) = (
                 request.META.get(self.header_pem, ''),
                 request.META.get(self.header_serial, ''),
                 request.META.get(self.header_dn, '')
         )
         users = map(lambda i: i.certificate.user,
                 CertificateIndex.objects.filter(
                         pem_index=crc32(pem)&0xffffffff,
                         serial_index=crc32(serial)&0xffffffff,
                         dn_index=crc32(dn)&0xffffffff
                 )
         )
         if len(users) > 1:
             users = filter(lambda u: any(u.certificates.filter(pem=pem, serial=serial, dn=dn)), users)
         if not any(users):
             certificate, created = Certificate.objects.get_or_create(pem=pem, serial=serial, dn=dn)
             return
         if len(users) > 1:
             raise Exception('Too many users for certificate')
         user = users[0]
         if request.user.is_authenticated():
             if request.user == user:
                 return
         user = auth.authenticate(user=user)
         if user:
             request.user = user
             auth.login(request, user)
Beispiel #4
0
def compute_crc(buf, offset):
    before_buffer = (ctypes.c_uint8 * offset).from_buffer(buf)
    zero = (ctypes.c_uint8 * 4)()
    after_buffer = (ctypes.c_uint8 * (ctypes.sizeof(buf) - offset - 4)).from_buffer(buf, offset + 4)
    crc = binascii.crc32(before_buffer)
    crc = binascii.crc32(zero, crc)
    return binascii.crc32(after_buffer, crc)
Beispiel #5
0
def crc32_checksum(filepath, start=0, length=0):
    crc = 0

    with open(filepath, 'rb') as fp:
        if start:
            fp.seek(start)

        bytes_read = 0
        while True:
            data = fp.read(8192)

            if not data:
                break

            if length > 0:
                bytes_read += len(data)
                if bytes_read >= length:
                    if bytes_read > length:
                        data = data[:length - bytes_read]
                    crc = binascii.crc32(data, crc)
                    break

            crc = binascii.crc32(data, crc)

    return crc & 0xFFFFFFFF
Beispiel #6
0
    def unique_id(self, seen=None, account_id=None):
        """
        Get an unique ID for the transaction based on date, amount and raw.

        :param seen: if given, the method uses this dictionary as a cache to
                     prevent several transactions with the same values to have the same
                     unique ID.
        :type seen: :class:`dict`
        :param account_id: if given, add the account ID in data used to create
                           the unique ID. Can be useful if you want your ID to be unique across
                           several accounts.
        :type account_id: :class:`str`
        :returns: an unique ID encoded in 8 length hexadecimal string (for example ``'a64e1bc9'``)
        :rtype: :class:`str`
        """
        crc = crc32(str(self.date))
        crc = crc32(str(self.amount), crc)
        crc = crc32(re.sub('[ ]+', ' ', self.raw.encode("utf-8")), crc)

        if account_id is not None:
            crc = crc32(str(account_id), crc)

        if seen is not None:
            while crc in seen:
                crc = crc32("*", crc)

            seen.add(crc)

        return "%08x" % (crc & 0xffffffff)
Beispiel #7
0
    def _process_chunk(
            self,
            from_buffer,
            to_buffer,
            chunk_buffer,
            output_file,
            id,
            chunk_type):
        """
            Method called on each chunk
            Returns a dict with meta-data on chunk
        """

        if chunk_type > CHUNK_TYPE_KEEP:
            chunk_len = len(chunk_buffer)
        else:
            chunk_len = 0

        from_crc = binascii.crc32(from_buffer)
        to_crc = binascii.crc32(to_buffer)

        chunkheader = ChunkHeader(CHUNK_HEADER_MAGIC, CHUNK_HEADER_VERSION,
                                  chunk_type, id, chunk_len, 0, len(from_buffer),
                                  from_crc, len(to_buffer), to_crc)

        chunkheader.crc = binascii.crc32(bytearray(chunkheader))

        arr = bytearray(chunkheader)
        assert len(arr) == CHUNK_HEADER_SIZE
        chunkfile = open(output_file, "wb")
        chunkfile.write(arr)
        if chunk_type > CHUNK_TYPE_KEEP:
            chunkfile.write(chunk_buffer)
        chunkfile.close()

        chunk_len += CHUNK_HEADER_SIZE

        return {
            "_id": id,
            "_type": chunk_type,
            "_name": os.path.basename(output_file),
            "crc_from": "0x%x" %
            (from_crc & 0xffffffff),
            "crc_to": "0x%x" %
            (to_crc & 0xffffffff),
            "crc_header": "0x%x" %
            (chunkheader.crc & 0xffffffff),
            "chunk_header": "0x%x" %
            (binascii.crc32(
                open(
                    output_file,
                    "rb").read()) & 0xffffffff),
            "size_output": chunk_len,
            "size_input": len(to_buffer),
            "ratio": "%0.5f" %
            (1.0 *
             chunk_len /
             len(to_buffer)),
            "delta": chunk_len -
            len(to_buffer)}
Beispiel #8
0
	def save_max_drive(self, f):
		if lzari == None:
			raise error, ("The lzari module is needed to "
				      " decompress MAX Drive saves.")
		iconsysname = ""
		icon_sys = self.get_icon_sys()
		if icon_sys != None:
			title = icon_sys_title(icon_sys, "ascii")
			if len(title[0]) > 0 and title[0][-1] != ' ':
				iconsysname = title[0] + " " + title[1].strip()
			else:
				iconsysname = title[0] + title[1].rstrip()
		s = ""
		dirent = self.dirent
		for i in range(dirent[2]):
			(ent, data) = self.get_file(i)
			if not mode_is_file(ent[0]):
				raise error, "Non-file in save file."
			s += struct.pack("<L32s", ent[2], ent[8])
			s += data
			s += "\0" * (round_up(len(s) + 8, 16) - 8 - len(s))
		length = len(s)
		progress =  "compressing " + dirent[8] + ": "
		compressed = lzari.encode(s, progress)
		hdr = struct.pack("<12sL32s32sLLL", PS2SAVE_MAX_MAGIC,
				  0, dirent[8], iconsysname,
				  len(compressed) + 4, dirent[2], length)
		crc = binascii.crc32(hdr)
		crc = binascii.crc32(compressed, crc)
		f.write(struct.pack("<12sL32s32sLLL", PS2SAVE_MAX_MAGIC,
				    crc & 0xFFFFFFFF, dirent[8], iconsysname,
				    len(compressed) + 4, dirent[2], length))
		f.write(compressed)
		f.flush()
Beispiel #9
0
def compute_checksum(path):
    r"""Returns the checksum of the file pointed by path

    Parameters
    ----------
    path : str
        The path to compute the checksum

    Returns
    -------
    int
        The file checksum
    """
    crc = 0
    filepaths = []
    if isdir(path):
        for name, dirs, files in walk(path):
            join_f = partial(join, name)
            filepaths.extend(list(map(join_f, files)))
    else:
        filepaths.append(path)

    for fp in filepaths:
        with open(fp, "Ub") as f:
            # Go line by line so we don't need to load the entire file
            for line in f:
                if crc is None:
                    crc = crc32(line)
                else:
                    crc = crc32(line, crc)
    # We need the & 0xffffffff in order to get the same numeric value across
    # all python versions and platforms
    return crc & 0xffffffff
Beispiel #10
0
def genkey(name: str, version: Version) -> int:
	"""Return the key for `name` according to `version`.

	This is a low-level function that rarely needs to be used directly.
	"""
	n = name.encode(ENCODING, "strict").upper()
	if version is Version.TD or version is Version.RA:
		l = len(n)
		k = 0
		i = 0
		while i < l:
			a = 0
			for j in range(4):
				a >>= 8
				if i < l:
					a |= (n[i] << 24)
					i += 1
			k = (k << 1 | k >> 31) + a & 4294967295
		return k
	if version is Version.TS:
		l = len(n)
		a = l & -4
		if l & 3:
			n += bytes((l - a,))
			n += bytes((n[a],)) * (3 - (l & 3))
		return binascii.crc32(n)
	if version is Version.RG:
		return binascii.crc32(n)
	raise TypeError("`version` must be a Version enumeration member")
Beispiel #11
0
 def encode(self, id):
     body = self._encode_message_body()
     header = struct.pack(_HEADER_SPEC, self.msgtype, id, len(body))
     raw_crc = crc32(body, crc32(header)) & 0xffffffff
     crc = struct.pack(_FOOTER_SPEC, raw_crc)
     logger.debug("header:%s body:%s crc:%s", repr(header), repr(body)[:30], repr(crc))
     return b''.join((header, body, crc))
Beispiel #12
0
    def packet_sender(self, p_type, id_value, value, type_v):
        # This is building packets, the explanation can be found in Robert's
        # documentation which contains the structure of each packet. But it is
        # unlikely you'll have to modify start/adress/length/record/index
        # Only s_id (type of ID, can be found in DriveManager), param_ID (the
        # actual ID, also found in Drive Manager) and number (value to write in
        # ID) will have to be changed if you want to do other simple tasks.
        # The 2685547530 'magic' number is given in the documentation
        start = pack('<L', 2685547530)
        adress = pack('<L', 0)
        s_id = pack('<L', p_type)
        length = pack('<L', 16)
        record = pack('<l', -1)
        param_id = pack('<L', id_value)
        index = pack('<L', 0)
        if type_v == 'int32':
            number = pack('<L', value)
        elif type_v == 'float':
            number = pack('<f', value)
        else:
            number = pack('<i', value)
        # The 3344495068/3344494807 are necessary to obtain a correct checksum
        # They have been found by sniffing packets, and bruteforcing numbers
        # until both checksum are the same.
        checksum = crc32(start + adress + s_id + length, 3344495068)
        checksum_start = pack('<L', checksum)
        checksum = crc32(record + param_id + index + number, 3344494807)
        checksum_data = pack('<L', checksum)
        packet = start + adress + s_id + length + checksum_start
        packet += checksum_data + record + param_id + index + number
        self.s.send(packet)
        data = self.s.recv(self.buffer)

        return data
Beispiel #13
0
    def write_data(self, write_finished_cb):
        # First generate the header part
        header_data = struct.pack("<BIBB", 0xEB, self.pins, self.vid, self.pid)
        header_crc = crc32(header_data) & 0x0FF
        header_data += struct.pack("B", header_crc)

        # Now generate the elements part
        elem = ""
        logger.info(self.elements.keys())
        for element in reversed(self.elements.keys()):
            elem_string = self.elements[element]
            # logger.info(">>>> {}".format(elem_string))
            key_encoding = self._rev_element_mapping[element]
            elem += struct.pack("BB", key_encoding, len(elem_string))
            elem += elem_string

        elem_data = struct.pack("BB", 0x00, len(elem))
        elem_data += elem
        elem_crc = crc32(elem_data) & 0x0FF
        elem_data += struct.pack("B", elem_crc)

        data = header_data + elem_data

        # Write data
        p = ""
        for s in data:
            p += "0x{:02X} ".format(ord(s))
        logger.info(p)

        self.mem_handler.write(self, 0x00, struct.unpack("B" * len(data), data))

        self._write_finished_cb = write_finished_cb
Beispiel #14
0
 def __compare_current_version_data_with_data(self, p_version_data, p_data):
     """
     Compare a version data with some new data using crc32.
     @param p_version_data: version data
     @param p_data: new data
     """
     return crc32(p_version_data) != crc32(p_data)
Beispiel #15
0
def calculate_crc32_checksum(data):
    if sys.version_info < (3, 0):
        return "%08x" % (binascii.crc32(data) & 0xffffffff)
    else:
        if isinstance(data, str):
            data = str.encode(data)
        return "%08x" % (binascii.crc32(data) & 0xffffffff)
Beispiel #16
0
 def test_save(self):
     filelike = open('jieba.cache', 'rb').read()
     self.bucket.save('new_jieba.cache', filelike)
     contents = self.bucket.get_object_contents('new_jieba.cache')
     self.assertEqual(binascii.crc32(filelike) & 0xffffffff,
                      binascii.crc32(contents) & 0xffffffff)
     self.bucket.delete_object('new_jieba.cache')
Beispiel #17
0
    def write_data(self, write_finished_cb):
        # First generate the header part
        header_data = struct.pack('<BIBB', 0xEB, self.pins, self.vid, self.pid)
        header_crc = crc32(header_data) & 0x0ff
        header_data += struct.pack('B', header_crc)

        # Now generate the elements part
        elem = bytearray()
        logger.info(list(self.elements.keys()))
        for element in reversed(list(self.elements.keys())):
            elem_string = self.elements[element]
            # logger.info(">>>> {}".format(elem_string))
            key_encoding = self._rev_element_mapping[element]
            elem += struct.pack('BB', key_encoding, len(elem_string))
            elem += bytearray(elem_string.encode('ISO-8859-1'))

        elem_data = struct.pack('BB', 0x00, len(elem))
        elem_data += elem
        elem_crc = crc32(elem_data) & 0x0ff
        elem_data += struct.pack('B', elem_crc)

        data = header_data + elem_data

        self.mem_handler.write(self, 0x00,
                               struct.unpack('B' * len(data), data))

        self._write_finished_cb = write_finished_cb
Beispiel #18
0
def json_search(request):
    search_query = Search(indexes=['rt_main'], config=SphinxitConfig)
    keyword = request.GET['keyword']
    start = request.GET.get('start', 0)
    count = request.GET.get('count', 10)
    sort = request.GET.get('sort', '')
    category = request.GET.get('category', '')
    if request.GET.get('base64') == '1':
        keyword = keyword.decode('base64').decode('utf8')

    mckey = str(binascii.crc32((u'%s%s%s%s%s' % (keyword,start,count,sort,category)).encode('utf8')) & 0xFFFFFFFFL)
    cache = mc.get(mckey)
    if cache:
        print 'bingo', keyword.encode('utf8'), mckey
        return HttpResponse(cache)

    q = search_query.match(keyword)
    if category: q = q.filter(category__eq=binascii.crc32(category)&0xFFFFFFFFL)
    if sort == 'create_time': q = q.order_by('create_time', 'desc')
    if sort == 'length': q = q.order_by('length', 'desc')
    q = q.limit(start, count)
    q2 = search_query.match(keyword).select('category', Count()).group_by('category').named('cats')
    res = q.ask(subqueries=[q2])

    jsp = JsonResponse(res)
    mc.set(mckey, jsp.content)
    return jsp
Beispiel #19
0
 def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
              ext_package=None, tag='', force_generic_engine=False, **kwds):
     self.ffi = ffi
     self.preamble = preamble
     if not modulename:
         flattened_kwds = ffiplatform.flatten(kwds)
     vengine_class = _locate_engine_class(ffi, force_generic_engine)
     self._vengine = vengine_class(self)
     self._vengine.patch_extension_kwds(kwds)
     self.kwds = kwds
     #
     if modulename:
         if tag:
             raise TypeError("can't specify both 'modulename' and 'tag'")
     else:
         key = '\x00'.join([sys.version[:3], __version__, preamble,
                            flattened_kwds] +
                           ffi._cdefsources)
         if sys.version_info >= (3,):
             key = key.encode('utf-8')
         k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
         k1 = k1.lstrip('0x').rstrip('L')
         k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
         k2 = k2.lstrip('0').rstrip('L')
         modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
                                           k1, k2)
     suffix = _get_so_suffixes()[0]
     self.tmpdir = tmpdir or _caller_dir_pycache()
     self.sourcefilename = os.path.join(self.tmpdir, modulename + '.c')
     self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
     self.ext_package = ext_package
     self._has_source = False
     self._has_module = False
Beispiel #20
0
   def client_thread(self):
       while True:
           self.lock.acquire()
 
           if self.mode == "a":
               self.event_to_send = len(self.events) - 1
                               
           if self.event_to_send != -1:
               print "Sending event: %s" % (self.format_event(event)) 
               
               theevent = self.events[self.event_to_send]
               rawdata = theevent["data"].decode("string-escape")
               theevent["crc"] = crc32(rawdata)
               self.proxy.send_de(self.events[self.event_to_send])        
               del self.events[self.event_to_send]
               self.event_to_send = -1
                           
           newevents = self.proxy.getdebugq()
           
           for event in newevents:
               d = event["data"].decode("string-escape")
               dcrc = crc32(d)
               print "CRC: %08x for eventid:%s" % (dcrc, event["eventid"])                
               self.events.append(event)
               
           self.lock.release()
           
           time.sleep(.1)
        def try_to_recover():
            if response['offset'] == 0:
                # Nothing to recover
                return

            expected_crc = binascii.crc32(firmware[:response['offset']]) & 0xFFFFFFFF
            remainder    = response['offset'] % response['max_size']

            if expected_crc != response['crc']:
                # Invalid CRC. Remove corrupted data.
                response['offset'] -= remainder if remainder != 0 else response['max_size']
                response['crc']     = \
                        binascii.crc32(firmware[:response['offset']]) & 0xFFFFFFFF
                return

            if (remainder != 0) and (response['offset'] != len(firmware)):
                # Send rest of the page.
                try:
                    to_send             = firmware[response['offset'] : response['offset']
                                                + response['max_size'] - remainder]
                    response['crc']     = self.__stream_data(data   = to_send,
                                                             crc    = response['crc'],
                                                             offset = response['offset'])
                    response['offset'] += len(to_send)
                except ValidationException:
                    # Remove corrupted data.
                    response['offset'] -= remainder
                    response['crc']     = \
                        binascii.crc32(firmware[:response['offset']]) & 0xFFFFFFFF
                    return

            self.__execute()
            self._send_event(event_type=DfuEvent.PROGRESS_EVENT, progress=response['offset'])
	def _sweep(self, sweep_config):
		response = self.alh.post("sensing/quickSweepBin",
				"dev %d conf %d ch %d:%d:%d" % (
				sweep_config.config.device.id,
				sweep_config.config.id,
				sweep_config.start_ch,
				sweep_config.step_ch,
				sweep_config.stop_ch))

		data = response[:-4]
		crc = response[-4:]

		their_crc = struct.unpack("i", crc[-4:])[0]
		our_crc = binascii.crc32(data)
		if their_crc != our_crc:
			# Firmware versions 2.29 only calculate CRC on the
			# first half of the response due to a bug
			our_crc = binascii.crc32(data[:len(data)/2])
			if their_crc != our_crc:
				raise CRCError
			else:
				log.warning("working around broken CRC calculation! "
						"please upgrade node firmware")

		assert sweep_config.num_channels * 2 == len(data)

		result = []
		for n in xrange(0, len(data), 2):
			datum = data[n:n+2]

			dbm = struct.unpack("h", datum)[0]*1e-2
			result.append(dbm)

		return result
Beispiel #23
0
 def recv_data(self):
     # self.log("Waiting for content length..."
     self.check_timeout(3)
     #         s1, s2 = self.sock.recv(8), self.sock.recv(8)
     #         #print 'recv', s1, s2
     #         content_length, crc32 = hexstring2int( s1 ), hexstring2int( s2 )
     #         #print content_length, crc32
     content_length, crc32 = tuple([int(s) for s in self.readline().split()])
     self.log("Content-Length: %d\n" % content_length)
     recvbytes = 0
     content = []
     # self.log("Receiving data...")
     while recvbytes < content_length:
         recvstring = self.sock.recv(min(content_length - recvbytes, 2048))
         recvbytes += len(recvstring)
         self.log("Received: %d bytes (%3.1f%%)\r" % (recvbytes, (100.0 * recvbytes / content_length)))
         content.append(recvstring)
     self.log("Received: %d bytes.        " % (recvbytes) + "\n")
     content = "".join(content)
     if crc32 != binascii.crc32(content):
         str = """\
         expected crc %d, calculated crc %d
         expected content length %d, content length %d
         """ % (
             crc32,
             binascii.crc32(content),
             content_length,
             len(content),
         )
         raise IOError("CRC error while receiving data:\n" + str)
     return content
Beispiel #24
0
	def tryParseHeader(self, buf):
		"""
		Try to parse a buffer as a GPT header, return None on failure
		"""

		if len(buf) < self._gpt_size:
			raise NoGPT("Failed to locate GPT")

		data = dict(zip(
			self._gpt_head_fmt.keys(),
			self._gpt_struct.unpack(buf[0:self._gpt_size])
		))

		if data['header'] != self._gpt_header:
			return None

		tmp = data['crc32']
		data['crc32'] = 0

		crc = crc32(self._gpt_struct.pack(*[data[k] for k in self._gpt_head_fmt.keys()]))

		data['crc32'] = tmp

		# just in case future ones are larger
		crc = crc32(buf[self._gpt_size:data['headerSize']], crc)
		crc &= 0xFFFFFFFF

		if crc != data['crc32']:
			verbose("Warning: Found GPT candidate with bad CRC")
			return None

		return data
Beispiel #25
0
    def read_deflated(self, count = -1):
        if count < 1 or count > self.zinfo.file_size - self.byteswritten:
            count = self.zinfo.file_size - self.byteswritten
        if count < 1:
            return ''

        count = count
        self.fp.seek(self.zinfo.file_offset + self.bytesread)

        # First, fill up the buffer.
        while len(self.buffer) < count:
            bytes = self.fp.read(min(self.zinfo.compress_size - self.bytesread, 4096))
            self.bytesread += len(bytes)
            result = self.dc.decompress(bytes)
            if len(result):
                self.buffer += result
                self.crc = binascii.crc32(result, self.crc)

            if self.bytesread == self.zinfo.compress_size:
                bytes = self.dc.decompress('Z') + self.dc.flush()
                if len(bytes):
                    self.buffer += bytes
                    self.crc = binascii.crc32(bytes, self.crc)
                self._finalize()

        retval = self.buffer[:count]
        self.byteswritten += len(retval)
        self.buffer = self.buffer[count:]
        return retval
Beispiel #26
0
def VerifyPy2Exe():
    """Checks for presense of the modified zipextimporter.py.  We no longer want
       the modified version, and need the original."""
    # CRCs of the correct version, from both 'r', and 'rb' mode
    crcGood = [0xA56E66A6, 0x57925DA8]
    path = os.path.join(sys.prefix, u'Lib', u'site-packages',
                        u'zipextimporter.py')
    # First we'll test using 'r' mode, this way if the line endings differ,
    # but the contents are the same, the crc will still be equal
    with open(os.path.join(scripts, u'zipextimporter.py'), 'r') as ins:
        crcBad = binascii.crc32(ins.read())
    crcBad &= 0xFFFFFFFFL
    with open(path, 'r') as ins:
        crcTest = binascii.crc32(ins.read())
    crcTest &= 0xFFFFFFFFL
    if crcTest == crcBad:
        # Definitely using the old modified version, need to reinstall
        return False
    if crcTest in crcGood:
        # Definitely using the un-modified version, good to go
        return True
    # Now check if the current file's crc in 'rb' mode matches a known "good"
    # crc.
    with open(path, 'rb') as ins:
        crcTest = binascii.crc32(ins.read())
    crcTest &= 0xFFFFFFFFL
    if crcTest in crcGood:
        # Definitely using the un-modified version
        return True
    # Last test: see if the modified lines are present
    with open(path, 'r') as ins:
        return 'filename = fullname.replace(".","\\")' in ins.read()
Beispiel #27
0
 def calc_hash(self, widget, file_path):
     selection = self.hash_tree_view.get_selection()
     model, it = selection.get_selected()
     model.clear()
     for hash_type in self.type_conf.keys():
         if self.type_conf[hash_type] == '1' or self.type_conf[hash_type] == 'True':# If it's True we should calc this hash
             f = open(file_path, 'rb')
             if hash_type.lower() in ('md5', 'sha1', 'sha512', 'sha224', 'sha256', 'sha384'): # hashlib function
                 function = "hashlib.{0}()".format(hash_type.lower())
                 m = eval(function)
                 data = f.read(10240)
                 while (len(data) != 0):
                     m.update(data)
                     data = f.read(10240)
                 hash_value = str(m.hexdigest())
             elif hash_type.lower() in ('crc32'):
                 m = binascii.crc32("") & 0xffffffff
                 data = f.read(10240)
                 while (len(data) != 0):
                     m = binascii.crc32(data, m) & 0xffffffff
                     data = f.read(10240)
                 hash_value = str(hex(m))[2:]
             selection = self.hash_tree_view.get_selection()
             model, it = selection.get_selected()
             model.append([hash_type, hash_value])
             f.close()
     self.hash_tree_view.set_cursor(0)
     return
Beispiel #28
0
 def recv_file(self, filename):
     # the reason for this function is to store everything into a file and not have
     # the whole content in the memory at the same time
     # recv_data() actually has to content twice in the memory at the same time!
     self.check_timeout(3)
     f = open(filename, "wb")
     content_length, crc32 = tuple([int(s) for s in self.readline().split()])
     self.log("Content-Length: %d\n" % content_length)
     recvbytes = 0
     content = []
     # self.log("Receiving data...")
     recvstring = self.sock.recv(min(content_length - recvbytes, 2048))
     recvbytes += len(recvstring)
     f.write(recvstring)
     crc = binascii.crc32(recvstring)
     while recvbytes < content_length:
         recvstring = self.sock.recv(min(content_length - recvbytes, 2048))
         recvbytes += len(recvstring)
         f.write(recvstring)
         crc = binascii.crc32(recvstring, crc)
     f.close()
     self.log("Received: %d bytes.        " % (recvbytes) + "\n")
     if crc32 != crc:
         str = """\
         expected crc %d, calculated crc %d
         expected content length %d, content length %d
         """ % (
             crc32,
             crc,
             content_length,
             recvbytes,
         )
         raise IOError("CRC error while receiving data:\n" + str)
Beispiel #29
0
    def update(self, publisher, **kwargs):
        """
        Handle notifications from other classes we are observing. Typically
        this is a new event that needs to be placed into the debug queue.
        
        @param publisher: The object that sent the update
        """
        #print "[*] Debugger: got update event. Not ignoring it."
        event = kwargs["event"]
                        
        if event == "debug":                    
#            self.log.debug("Debugger: update: adding event to debug queue")
            de = DebugEvent()
            de.eventid = kwargs["eventid"]
            de.event = kwargs["event"]
            de.source = kwargs["src"]
            de.destination = kwargs["dest"]
            de.data = kwargs["thedata"]
            de.direction = kwargs["direction"]
            de.crc = binascii.crc32(kwargs["thedata"])
        
            self.log.debug("Debug.update: newevent: %08x [eventid:%s]" % 
                           (binascii.crc32(kwargs["thedata"]), 
                                            kwargs["eventid"]))
            
            self.debugq.put(de)
Beispiel #30
0
def get_ffi():
    import cffi

    ffi = cffi.FFI()

    # Load C definitions
    dir_path = dirname(abspath(inspect.getfile(inspect.currentframe())))
    decl_path = os.path.join(dir_path, 'decl.h')
    with codecs.open(decl_path, 'r', 'utf-8') as header:
        ffi.cdef(header.read())

    # The modulename
    # Simplified version of what cffi does: remove kwargs and vengine
    preamble = "#include <git2.h>"
    key = [sys.version[:3], cffi.__version__, preamble] + ffi._cdefsources
    key = '\x00'.join(key)
    if sys.version_info >= (3,):
        key = key.encode('utf-8')
    k1 = hex(crc32(key[0::2]) & 0xffffffff).lstrip('0x').rstrip('L')
    k2 = hex(crc32(key[1::2]) & 0xffffffff).lstrip('0').rstrip('L')
    modulename = 'pygit2_cffi_%s%s' % (k1, k2)

    # Load extension module
    libgit2_bin, libgit2_include, libgit2_lib = get_libgit2_paths()
    C = ffi.verify(preamble, modulename=modulename, libraries=["git2"],
                   include_dirs=[libgit2_include], library_dirs=[libgit2_lib])

    # Ok
    return ffi, C
Beispiel #31
0
 def hexdigest(this):
     result = binascii.crc32(this.content) & 0xFFFFFFFF
     return "%08X" % result
Beispiel #32
0
 def crc32(self, data):
     return binascii.crc32(data) & 0xFFFFFFFF
Beispiel #33
0
    def handle(self, *args, **options):
        files = options['files']
        admin = User.objects.get(username='******')
        wells = set()
        tz = pytz.timezone('Europe/Amsterdam')
        for fname in files:
            logger.info('Importing data from {}'.format(fname))
            df = pd.read_csv(fname,
                             sep='\t',
                             index_col=0,
                             parse_dates=True,
                             na_values=['-'])
            df.drop('Datum', axis=1, inplace=True)
            span = [tz.localize(df.index.min()), tz.localize(df.index.max())]
            start, stop = span
            screens = set()
            for col in df.columns:
                serial, _peilbuis, name = map(lambda x: x.strip(),
                                              re.split('[:-]', col))
                series = df[col]
                logger.info(series.name)
                try:
                    datalogger = Datalogger.objects.get(serial=serial)
                    datasource = LoggerDatasource.objects.get(
                        logger=datalogger)
                    io = StringIO()
                    io.write('Datum\t{}\n'.format(name))
                    series.to_csv(io, sep='\t', header=False)
                    contents = io.getvalue()
                    crc = abs(binascii.crc32(contents))
                    filename = 'Export_{}_{}_{:%Y%m%d}_{:%Y%m%d}'.format(
                        serial, name, start, stop)
                    sourcefile = SourceFile(name=filename,
                                            datasource=datasource,
                                            user=admin,
                                            crc=crc)
                    sourcefile.file.save(name=filename, content=io, save=True)
                except Exception as ex:
                    logger.error(
                        'Cannot create sourcefile for logger {}: {}'.format(
                            serial, ex))

                # find out where logger is
                # we could use the name from the header, but this is not equal to the id of the screen in the database
                query = LoggerPos.objects.filter(logger=datalogger)
                pos = None
                if query.count() == 1:
                    pos = query.first()
                else:
                    # TODO: klopt niet, de if-else hieronder
                    query1 = query.filter(start_date__range=span)
                    if query1.count == 1:
                        pos = query1.first()
                    else:
                        query2 = query.filter(end_date__range=span)
                        if query2.count == 1:
                            pos = query2.first()
                if pos is None:
                    logger.error(
                        'Cannot find installation for logger {}'.format(
                            serial))
                    continue
                screens.add(pos.screen)

            logger.info('File import completed')
            if len(screens) > 0:
                logger.info('Updating time series')
                for screen in screens:
                    series = screen.find_series()
                    if series:
                        series.update(start=start, stop=stop)
                        wells.add(screen.well)
        if len(wells) > 0:
            logger.info('Updating well charts')
            make_wellcharts(None, None, wells)
        logger.info('Done.')
Beispiel #34
0
def crc32(domain):
    return binascii.crc32(domain.encode('utf-8'))
        if port.read() != b'\x55':
            print('No ACK for ADDR. Exiting.')
            sys.exit(4)
        elif args.verbose:
            print(f'ADDR<- ACK')

        if args.verbose:
            print(f'DATA-> {binascii.hexlify(memory_view[cur_page:cur_page+pagesize])}')
        port.write(bytes(memory_view[cur_page:cur_page+pagesize]))
        if port.read() != b'\x55':
            print('No ACK for DATA. Exiting.')
            sys.exit(4)
        elif args.verbose:
            print(f'DATA<- ACK')

        chksum = binascii.crc32(memory_view[cur_page:cur_page+pagesize]) & 0xFFFFFFFF
        if args.verbose:
            print(f'CHK -> {chksum:08X}')
        port.write(struct.pack('<I', chksum))
        if port.read() != b'\x77':
            print('No ACK for CHK. Exiting.')
            sys.exit(4)
        elif args.verbose:
            print(f'CHK <- ACK')

        if port.read() != b'\x55':
            print('Flash failed. Exiting.')
            sys.exit(4)
        print(f'Page {index}/{no_pages} written.')
        elif args.verbose:
            print('-'*80)
Beispiel #36
0
def get_crc32(s):
    return binascii.crc32(binascii.unhexlify(s.lower()))
Beispiel #37
0
def add_fib_at_start(arginput):
    input_file = arginput + ".bin"
    file_name_hex = arginput + "_fib.hex"
    file_name_bin = arginput + ".bin"

    # Read in hex file
    input_hex_file = intelhex.IntelHex()
    input_hex_file.padding = 0x00
    input_hex_file.loadbin(input_file, offset=FLASH_BASE)

    output_hex_file = intelhex.IntelHex()
    output_hex_file.padding = 0x00

    # Get the starting and ending address
    addresses = input_hex_file.addresses()
    addresses.sort()
    start_end_pairs = list(ranges(addresses))
    regions = len(start_end_pairs)

    if regions == 1:
        start, end = start_end_pairs[0]
    else:
        start = min(min(start_end_pairs))
        end = max(max(start_end_pairs))

    assert start >= FLASH_BASE, (
        "Error - start 0x%x less than begining of user\
    flash area" % start)
    # Compute checksum over the range (don't include data at location of crc)
    size = end - start + 1
    data = input_hex_file.tobinarray(start=start, size=size)
    crc32 = binascii.crc32(data) & 0xFFFFFFFF

    fw_rev = FW_REV

    checksum = (start + size + crc32 + fw_rev) & 0xFFFFFFFF

    print("Writing FIB: base 0x%08X, size 0x%08X, crc32 0x%08X, fw rev 0x%08X,\
    checksum 0x%08X" % (start, size, crc32, fw_rev, checksum))

    #expected initial values used by daplink to validate that it is a valid bin
    #file added as dummy values in this file because the fib area preceeds the
    #application area the bootloader will ignore these dummy values
    #  00 is stack pointer (RAM address)
    #  04 is Reset vector  (FLASH address)
    #  08 NMI_Handler      (FLASH address)
    #  0C HardFault_Handler(FLASH address)
    #  10 dummy
    dummy_sp = 0x3FFFFC00
    dummy_reset_vector = 0x00003625
    dummy_nmi_handler = 0x00003761
    dummy_hardfault_handler = 0x00003691
    dummy_blank = 0x00000000

    #expected fib structure
    #typedef struct fib{
    #uint32_t base;     /**< Base offset of firmware, indicating what flash the
    #                        firmware is in. (will never be 0x11111111) */
    #uint32_t size;     /**< Size of the firmware */
    #uint32_t crc;      /**< CRC32 for firmware correctness check */
    #uint32_t rev;      /**< Revision number */
    #uint32_t checksum; /**< Check-sum of information block */
    #}fib_t, *fib_pt;

    fib_start = FIB_BASE
    dummy_fib_size = 20
    fib_size = 20
    trim_size = 24
    user_code_start = FLASH_BASE
    trim_area_start = TRIM_BASE

    # Write FIB to the file in little endian
    output_hex_file[fib_start + 0] = (dummy_sp >> 0) & 0xFF
    output_hex_file[fib_start + 1] = (dummy_sp >> 8) & 0xFF
    output_hex_file[fib_start + 2] = (dummy_sp >> 16) & 0xFF
    output_hex_file[fib_start + 3] = (dummy_sp >> 24) & 0xFF

    output_hex_file[fib_start + 4] = (dummy_reset_vector >> 0) & 0xFF
    output_hex_file[fib_start + 5] = (dummy_reset_vector >> 8) & 0xFF
    output_hex_file[fib_start + 6] = (dummy_reset_vector >> 16) & 0xFF
    output_hex_file[fib_start + 7] = (dummy_reset_vector >> 24) & 0xFF

    output_hex_file[fib_start + 8] = (dummy_nmi_handler >> 0) & 0xFF
    output_hex_file[fib_start + 9] = (dummy_nmi_handler >> 8) & 0xFF
    output_hex_file[fib_start + 10] = (dummy_nmi_handler >> 16) & 0xFF
    output_hex_file[fib_start + 11] = (dummy_nmi_handler >> 24) & 0xFF

    output_hex_file[fib_start + 12] = (dummy_hardfault_handler >> 0) & 0xFF
    output_hex_file[fib_start + 13] = (dummy_hardfault_handler >> 8) & 0xFF
    output_hex_file[fib_start + 14] = (dummy_hardfault_handler >> 16) & 0xFF
    output_hex_file[fib_start + 15] = (dummy_hardfault_handler >> 24) & 0xFF

    output_hex_file[fib_start + 16] = (dummy_blank >> 0) & 0xFF
    output_hex_file[fib_start + 17] = (dummy_blank >> 8) & 0xFF
    output_hex_file[fib_start + 18] = (dummy_blank >> 16) & 0xFF
    output_hex_file[fib_start + 19] = (dummy_blank >> 24) & 0xFF

    # Write FIB to the file in little endian
    output_hex_file[fib_start + 20] = (start >> 0) & 0xFF
    output_hex_file[fib_start + 21] = (start >> 8) & 0xFF
    output_hex_file[fib_start + 22] = (start >> 16) & 0xFF
    output_hex_file[fib_start + 23] = (start >> 24) & 0xFF

    output_hex_file[fib_start + 24] = (size >> 0) & 0xFF
    output_hex_file[fib_start + 25] = (size >> 8) & 0xFF
    output_hex_file[fib_start + 26] = (size >> 16) & 0xFF
    output_hex_file[fib_start + 27] = (size >> 24) & 0xFF

    output_hex_file[fib_start + 28] = (crc32 >> 0) & 0xFF
    output_hex_file[fib_start + 29] = (crc32 >> 8) & 0xFF
    output_hex_file[fib_start + 30] = (crc32 >> 16) & 0xFF
    output_hex_file[fib_start + 31] = (crc32 >> 24) & 0xFF

    output_hex_file[fib_start + 32] = (fw_rev >> 0) & 0xFF
    output_hex_file[fib_start + 33] = (fw_rev >> 8) & 0xFF
    output_hex_file[fib_start + 34] = (fw_rev >> 16) & 0xFF
    output_hex_file[fib_start + 35] = (fw_rev >> 24) & 0xFF

    output_hex_file[fib_start + 36] = (checksum >> 0) & 0xFF
    output_hex_file[fib_start + 37] = (checksum >> 8) & 0xFF
    output_hex_file[fib_start + 38] = (checksum >> 16) & 0xFF
    output_hex_file[fib_start + 39] = (checksum >> 24) & 0xFF

    #pad the rest of the file
    for i in range(fib_start + dummy_fib_size + fib_size, trim_area_start):
        output_hex_file[i] = 0xFF

    # Read in configuration data from the config parameter in targets.json
    configData = Config('NCS36510')
    paramData = configData.get_target_config_data()
    for v in paramData.values():
        if (v.name == "target.mac-addr-high"):
            mac_addr_high = int(v.value, 16)
        elif (v.name == "target.mac-addr-low"):
            mac_addr_low = int(v.value, 16)
        elif (v.name == "target.32KHz-clk-trim"):
            clk_32k_trim = int(v.value, 16)
        elif (v.name == "target.32MHz-clk-trim"):
            clk_32m_trim = int(v.value, 16)
        elif (v.name == "target.rssi-trim"):
            rssi = int(v.value, 16)
        elif (v.name == "target.txtune-trim"):
            txtune = int(v.value, 16)
        else:
            print("Not a valid param")

    output_hex_file[trim_area_start + 0] = mac_addr_low & 0xFF
    output_hex_file[trim_area_start + 1] = (mac_addr_low >> 8) & 0xFF
    output_hex_file[trim_area_start + 2] = (mac_addr_low >> 16) & 0xFF
    output_hex_file[trim_area_start + 3] = (mac_addr_low >> 24) & 0xFF

    output_hex_file[trim_area_start + 4] = mac_addr_high & 0xFF
    output_hex_file[trim_area_start + 5] = (mac_addr_high >> 8) & 0xFF
    output_hex_file[trim_area_start + 6] = (mac_addr_high >> 16) & 0xFF
    output_hex_file[trim_area_start + 7] = (mac_addr_high >> 24) & 0xFF

    output_hex_file[trim_area_start + 8] = clk_32k_trim & 0xFF
    output_hex_file[trim_area_start + 9] = (clk_32k_trim >> 8) & 0xFF
    output_hex_file[trim_area_start + 10] = (clk_32k_trim >> 16) & 0xFF
    output_hex_file[trim_area_start + 11] = (clk_32k_trim >> 24) & 0xFF

    output_hex_file[trim_area_start + 12] = clk_32m_trim & 0xFF
    output_hex_file[trim_area_start + 13] = (clk_32m_trim >> 8) & 0xFF
    output_hex_file[trim_area_start + 14] = (clk_32m_trim >> 16) & 0xFF
    output_hex_file[trim_area_start + 15] = (clk_32m_trim >> 24) & 0xFF

    output_hex_file[trim_area_start + 16] = rssi & 0xFF
    output_hex_file[trim_area_start + 17] = (rssi >> 8) & 0xFF
    output_hex_file[trim_area_start + 18] = (rssi >> 16) & 0xFF
    output_hex_file[trim_area_start + 19] = (rssi >> 24) & 0xFF

    output_hex_file[trim_area_start + 20] = txtune & 0xFF
    output_hex_file[trim_area_start + 21] = (txtune >> 8) & 0xFF
    output_hex_file[trim_area_start + 22] = (txtune >> 16) & 0xFF
    output_hex_file[trim_area_start + 23] = (txtune >> 24) & 0xFF

    # pad the rest of the area with 0xFF
    for i in range(trim_area_start + trim_size, user_code_start):
        output_hex_file[i] = 0xFF

    #merge two hex files
    output_hex_file.merge(input_hex_file, overlap='error')

    # Write out file(s)
    output_hex_file.tofile(file_name_hex, 'hex')
    output_hex_file.tofile(file_name_bin, 'bin')
def checkHash(packet):
    h = packet.hash
    packet.hash = 0
    result = (h == binascii.crc32(packet.__serialize__()) & 0xffffffff)
    packet.hash = h
    return result
Beispiel #39
0
# coding: utf-8

import binascii

print(binascii.crc32("The quick brown fox jumped over the lazy dog.".encode()))
Beispiel #40
0
jarFilePath = jarDirPath + "pixiedust.jar"

dir = os.path.dirname(jarDirPath)
if not os.path.exists(dir):
    os.makedirs(dir)

def installPixiedustJar():
    with pkg_resources.resource_stream(__name__, "resources/pixiedust.jar") as resJar:
        with open( jarFilePath, 'wb+' ) as installedJar:
            shutil.copyfileobj(resJar, installedJar)
            print("Pixiedust runtime updated. Please restart kernel")

copyFile = True
if os.path.isfile(jarFilePath):
    with open( jarFilePath, 'rb' ) as installedJar:
        installedCRC = binascii.crc32( installedJar.read() )
        with pkg_resources.resource_stream(__name__, "resources/pixiedust.jar") as resJar:
            copyFile = installedCRC != binascii.crc32( resJar.read() )

if copyFile:
    installPixiedustJar()

def checkVersion():
    import json
    try:
        from urllib.request import urlopen
    except ImportError:
        from urllib2 import urlopen

    def printHTML(html):
        from IPython.display import display, HTML
Beispiel #41
0
        typeid = '0x' + typeid

        cleanline = nametype.group(1) + nametype.group(
            3) + '= ' + nametype.group(4)
        cleanline = re.sub(r' [a-zA-Z0-9_]+\:flags\.[0-9]+\?true', '',
                           cleanline)
        cleanline = cleanline.replace('<',
                                      ' ').replace('>',
                                                   ' ').replace('  ', ' ')
        cleanline = re.sub(r'^ ', '', cleanline)
        cleanline = re.sub(r' $', '', cleanline)
        cleanline = cleanline.replace(':bytes ', ':string ')
        cleanline = cleanline.replace('?bytes ', '?string ')
        cleanline = cleanline.replace('{', '')
        cleanline = cleanline.replace('}', '')
        countTypeId = binascii.crc32(binascii.a2b_qp(cleanline))
        if (countTypeId < 0):
            countTypeId += 2**32
        countTypeId = '0x' + re.sub(r'^0x|L$', '', hex(countTypeId))
        if (typeid != countTypeId):
            print('Warning: counted ' + countTypeId +
                  ' mismatch with provided ' + typeid + ' (' + cleanline + ')')
            continue

        params = nametype.group(3)
        restype = nametype.group(4)
        if (restype.find('<') >= 0):
            templ = re.match(r'^([vV]ector<)([A-Za-z0-9\._]+)>$', restype)
            if (templ):
                vectemplate = templ.group(2)
                if (re.match(r'^[A-Z]', vectemplate)
def hashPacket(packet):
    packet.hash = 0
    packet.hash = binascii.crc32(packet.__serialize__()) & 0xffffffff
Beispiel #43
0
def getCRC32(input, prevRes=0):
    from binascii import crc32
    return crc32(input, prevRes)
Beispiel #44
0
def CRC32(buf):
		import binascii
		buf = (binascii.crc32(buf) & 0xFFFFFFFF)
		return str("%08X" % buf)
Beispiel #45
0
c = s[:BS]  # our first cipher block

# PNG signature, chunk size, our dummy chunk type
p = PNGSIG + struct.pack(">I", size) + chunktype

#let's generate our IV
c = "".join([chr(ord(c[i]) ^ ord(p[i])) for i in range(BS)])
IV = AES.new(key, AES.MODE_ECB).decrypt(c)

ctr = Counter.new(128, initial_value=int(IV.encode('hex'), 16))
result = AES.new(key, AES.MODE_CTR, counter=ctr).decrypt(result)

result += pad(4 + len(t[8:]))
#not fixing the CRC on the decrypted file - lazy :D
result += struct.pack(">I", binascii.crc32(result[0xc:]) % 0x100000000)
#we append the whole target image
result += t[8:]

result += pad(len(result))

ctr = Counter.new(128, initial_value=int(IV.encode('hex'), 16))
result = AES.new(key, AES.MODE_CTR, counter=ctr).encrypt(result)
#write the CRC of the remaining of s at the end of our dummy block

result += struct.pack(">I", binascii.crc32(result[len(s) - 8:]) % 0x100000000)
result += s[-12:]  # our IEND chunk

#we have our result, key and IV

#generate the result file
    def thg_encode(self, args):
        """modulo referente a encode de estrings"""
        arg_mensage = args.split(" ")
        if arg_mensage[0] == "":
            print("""suporte encode:

Este módulo fornece funções para codificar dados binários em caracteres ASCII 
imprimíveis e decodificar essas codificações de volta para dados binários.
Ele fornece funções de codificação e decodificação para as codificações 
especificadas em RFC 3548 ,que define os algoritmos Base16, Base32 e Base64,
e para as codificações Ascii85 e Base85 padrão de fato.

a2b_uu
b2a_uu
a2b_base64
b2a_base64
a2b_qp
b2a_qp
a2b_hqx
rledecode_hqx
rlecode_hqx
b2a_hqx
crc_hqx
crc32
b2a_hex
a2b_hex
hexlify
unhexlify
Charcode
binary
base62
basen
bcd
ur
unicode_normalize
qp_encoding
        encode type[2,16,32,64]  str
        
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))

        elif arg_mensage[0] == "64":
            arg_mensage[1] = arg_mensage[1].encode('ascii')
            base64_bytes = base64.b64encode(arg_mensage[1])
            by_to_st(base64_bytes)
        elif arg_mensage[0] == "32":
            arg_mensage[1] = arg_mensage[1].encode('ascii')
            b32encode_bytes = base64.b32encode(arg_mensage[1])
            by_to_st(b32encode_bytes)
        elif arg_mensage[0] == "16":
            arg_mensage[1] = arg_mensage[1].encode('ascii')
            b16encode_bytes = base64.b16encode(arg_mensage[1])
            by_to_st(b16encode_bytes)
        elif arg_mensage[0] == "a85encode":
            arg_mensage[1] = arg_mensage[1].encode('ascii')
            a85encode_bytes = base64.a85encode(arg_mensage[1])
            by_to_st(a85encode_bytes)
        elif arg_mensage[0] == "b85encode":
            arg_mensage[1] = arg_mensage[1].encode('ascii')
            b85encode_bytes = base64.b85encode(arg_mensage[1])
            by_to_st(b85encode_bytes)
        elif arg_mensage[0] == "a2b_uu":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta uma única linha de dados uuencodificados de volta em binários e retorne os dados binários. As linhas normalmente contêm 45 bytes (binários), exceto a última linha. Os dados da linha podem ser seguidos de espaços em branco.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.a2b_uu(arg_mensage[1])))
        elif arg_mensage[0] == "a2b_base64":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(binascii.a2b_base64(arg_mensage[1]))
        elif arg_mensage[0] == "b2a_base64":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII na codificação base64. O valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 57 para aderir ao padrão base64.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(binascii.b2a_base64(b'arg_mensage[1]'))
        elif arg_mensage[0] == "a2b_qp":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta um bloco de dados imprimíveis entre aspas de volta em binários e retorne os dados binários. Mais de uma linha pode ser passada por vez. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os sublinhados serão decodificados como espaços.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(binascii.a2b_qp(arg_mensage[1]))
        elif arg_mensage[0] == "b2a_qp":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados binários em uma (s) linha (s) de caracteres ASCII em codificação imprimível entre aspas. O valor de retorno é a (s) linha (s) convertida (s). Se o argumento opcional quotetabs estiver presente e verdadeiro, todas as tabulações e espaços serão codificados. Se o argumento opcional istext estiver presente e verdadeiro, as novas linhas não serão codificadas, mas os espaços em branco finais serão codificados. Se o cabeçalho do argumento opcional estiver presente e verdadeiro, os espaços serão codificados como sublinhados de acordo com RFC1522. Se o cabeçalho do argumento opcional estiver presente e for falso, os caracteres de nova linha também serão codificados; caso contrário, a conversão de alimentação de linha pode corromper o fluxo de dados binários.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(binascii.a2b_qp(arg_mensage[1].encode()))
        elif arg_mensage[0] == "a2b_hqx":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED}Converta dados ASCII formatados de binhex4 em binários, sem fazer a descompressão RLE. A string deve conter um número completo de bytes binários ou (no caso da última parte dos dados binhex4) ter os bits restantes zero.
""".format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(binascii.a2b_hqx(arg_mensage[1]))
        elif arg_mensage[0] == "rledecode_hqx":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a descompressão RLE nos dados, de acordo com o padrão binhex4. O algoritmo usa 0x90 após um byte como um indicador de repetição, seguido por uma contagem. Uma contagem de 0 especifica um valor de byte de 0x90 . A rotina retorna os dados descompactados, a menos que os dados de entrada de dados terminem em um indicador de repetição órfão, caso em que a exceção Incompleta é levantada.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.rledecode_hqx(arg_mensage[1].encode())))
        elif arg_mensage[0] == "rlecode_hqx":
            if arg_mensage[1] == "help":
                print(
                    """{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a compactação RLE no estilo binhex4 nos dados e retorne o resultado.""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.rlecode_hqx(arg_mensage[1].encode())))
        elif arg_mensage[0] == "b2a_hqx":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Execute a conversão hexbin4 binário para ASCII e retorne a string resultante. O argumento já deve ser codificado por RLE e ter um comprimento divisível por 3 (exceto possivelmente o último fragmento).
""".format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.b2a_hqx(arg_mensage[1].encode())))
        elif arg_mensage[0] == "crc_hqx":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule o valor binhex4 crc dos dados , começando com um crc inicial e retornando o resultado.
""".format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(
                    (binascii.crc_hqx(arg_mensage[1].encode(), int(arg_mensage[2]))))
        elif arg_mensage[0] == "crc32":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Calcule CRC-32, a soma de verificação de dados de 
                32 bits, começando com um crc inicial. Isso é consistente com a soma de verificação do arquivo ZIP. 
                Uma vez que o algoritmo é projetado para uso como um algoritmo de soma de verificação, não é adequado 
                para uso como um algoritmo de hash geral. 

{YELLOW}Nota{YELLOW}{RED} Para gerar o mesmo valor numérico em todas as versões e plataformas Python, {RED}{BLUE}use crc32 (dados) & 0xffffffff{BLUE}{RED}. Se você estiver usando apenas a soma de verificação no formato binário compactado, isso não é necessário, pois o valor de retorno é a representação binária correta de 32 bits, independentemente do sinal.
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.crc32(arg_mensage[1].encode())))
        elif arg_mensage[0] == "hexlify":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna a representação hexadecimal dos dados 
                binários . Cada byte de dados é convertido na representação hexadecimal de 2 dígitos correspondente. 
                A string resultante é, portanto, o dobro do comprimento dos dados . 

        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(
                    (binascii.hexlify(arg_mensage[1].encode(), arg_mensage[2].encode())))
        elif arg_mensage[0] == "b2a_hex":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} hex
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(
                    (binascii.b2a_hex(arg_mensage[1].encode(), int(arg_mensage[2]))))
        elif arg_mensage[0] == "unhexlify":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Retorna os dados binários representados pela string hexadecimal hexstr . Esta função é o inverso de b2a_hex () . hexstr deve conter um número par de dígitos hexadecimais (que podem ser maiúsculas ou minúsculas), caso contrário, um TypeError é gerado.

        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st((binascii.unhexlify(arg_mensage[1].encode())))
        elif arg_mensage[0] == "b2a_uu":
            if arg_mensage[1] == "help":
                print("""{YELLOW}a2b_uu{YELLOW}{BLUE} =>{BLUE}{RED} Converta dados binários em uma linha de caracteres ASCII, o valor de retorno é a linha convertida, incluindo um caractere de nova linha. O comprimento dos dados deve ser de no máximo 45.

        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                by_to_st(
                    (binascii.b2a_uu(arg_mensage[1].encode(), int(arg_mensage[2]))))
        elif arg_mensage[0] == "charcode":
            if arg_mensage[1] == "help":
                print("""{YELLOW}charcode{YELLOW}{BLUE} =>{BLUE}{RED}converte string em charcode
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                print(ord(arg_mensage[1].encode()))
        elif arg_mensage[0] == "binary":
            if arg_mensage[1] == "help":
                print("""{YELLOW}binary{YELLOW}{BLUE} =>{BLUE}{RED}converte string em binary
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                print(' '.join(format(ord(x), 'b') for x in arg_mensage[1]))
        elif arg_mensage[0] == "base62":
            if arg_mensage[1] == "help":
                print("""{YELLOW}base62{YELLOW}{BLUE} =>{BLUE}{RED}converte string em base62
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                print(decode62(arg_mensage[1]))
        elif arg_mensage[0] == "basen":
            if arg_mensage[1] == "help":
                print("""{YELLOW}basen{YELLOW}{BLUE} =>{BLUE}{RED}converte decimal em basen
        """.format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
            else:
                print(numpy.base_repr(int(arg_mensage[1]), base=int(arg_mensage[2])))
        elif arg_mensage[0] == "url":
            try:
                if arg_mensage[1] == "help":
                    print(
                        """{YELLOW}url_encode{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\nencode url_encode safa[] encoding""".format(
                            YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
                else:
                    print(quote(arg_mensage[1], safe=arg_mensage[2], encoding=arg_mensage[3]))
            except IndexError:
                print("digite a sintaxe correta\nncode url_encode safa[] encoding\n ou use o comando help")
        elif arg_mensage[0] == "unicode_normalize":
            try:
                if arg_mensage[1] == "help":
                    print("""{YELLOW}unicode_normalize{YELLOW}{BLUE} =>{BLUE}{RED}Transforme caracteres Unicode em uma das formas de normalização['NFC', 'NFKC', 'NFD','NFKD']\n                   
{YELLOW}NFD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Decomposition
{YELLOW}NFC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Canonical Composition
{YELLOW}NFKD{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Decomposition
{YELLOW}NFKC{YELLOW}{BLUE} =>{BLUE}{RED}Normalisation Form Compatibility Composition    
encode unicode_normalize str encoding['NFC', 'NFKC', 'NFD','NFKD']\n""".format(YELLOW=Fore.YELLOW, BLUE=Fore.BLUE,
                                                                               RED=Fore.RED))
                else:
                    print(unicodedata.normalize(arg_mensage[1], arg_mensage[2]))
            except IndexError:
                print("digite a sintaxe correta\nncode url_encode safa[] encoding\n ou use o comando help")
        elif arg_mensage[0] == "qp_encoding":
            try:
                if arg_mensage[1] == "help":
                    print("""{YELLOW}qp_encoding{YELLOW}{BLUE} =>{BLUE}{RED}
                    Quoted-Printable, ou QP encoding, 
                    é uma codificação que usa caracteres ASCII imprimíveis (alfanuméricos e o sinal de igual '=') 
                    para transmitir dados de 8 bits em um caminho de dados de 7 bits ou, geralmente, em um meio que não é 8- um pouco limpo. 
                    É definido como uma codificação de transferência de conteúdo MIME para uso em e-mail.
                    QP funciona usando o sinal de igual '=' como um caractere de escape. Ele também limita o comprimento da linha a 76, pois alguns softwares têm limites no comprimento da linha\nencode qp_encoding TXT encode""".format(
                        YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
                else:
                    encoded = quopri.encodestring(arg_mensage[1].encode(arg_mensage[2]))
                    print(encoded.decode())
            except IndexError:
                print("digite a sintaxe correta\nencode qp_encoding é utf-16\n ou use o comando help")
        elif arg_mensage[0] == "idna":
            try:
                if arg_mensage[1] == "help":
                    print(
                        """{YELLOW}idna{YELLOW}{BLUE} =>{BLUE}{RED}encode personalidado para url\nencode url_encode safa[] encoding""".format(
                            YELLOW=Fore.YELLOW, BLUE=Fore.BLUE, RED=Fore.RED))
                else:
                    print(idna.encode(arg_mensage[1]).decode(arg_mensage[2]))
            except IndexError:
                print("digite a sintaxe correta\nncode idna string encoding\n ou use o comando help")

        else:
            pass
        try:
            pass

        except IndexError:
            print("verificar a saida")
Beispiel #47
0
def hash_func(port):
    m = 6
    size = 2**m
    hash_val = binascii.crc32(str(port).encode())
    return hash_val % size
Beispiel #48
0
def RC4_main(filename, key):
    # 获得输入文件
    try:
        fin = open(filename, 'rb')
    except:
        print('打开文件失败!')
    print(filename)
    # 打开输出文件
    if filename[-4:] == '.RC4':
        eID = 1
        key = key.encode()
        ofilename = filename[:-4]
    else:
        eID = 2
        # key = input('输入加密密钥: ').encode()
        key = key.encode()
        ofilename = filename + '.RC4'
    key = Coding(CreatKey(key))
    key = UpdataKey(key)

    fout = open(ofilename, 'wb')
    print(ofilename)
    # 解密
    if eID == 1:
        # 读文件长度
        filelen = struct.unpack('I', fin.read(4))[0]
        print('FlieLen =', filelen, '\n......')
        while 1:
            ps = fin.read(2)
            if not ps:
                break
            packsize = struct.unpack('H', ps)[0]
            # 读数据
            dd = fin.read(packsize)
            # 解密
            dd = Coding(dd)
            x = RC4(key, len(key), dd, len(dd))
            key = UpdataKey(key)
            # crc
            crc = struct.unpack('I', fin.read(4))[0]
            if binascii.crc32(x) != crc:
                print('CRC32校验错误!', crc, binascii.crc32(x))
            fout.write(x)
        # 裁剪末尾填充位
        fout.truncate(filelen)
    # 加密
    elif eID == 2:
        # 获得文件长度
        fin.seek(0, 2)
        filelen = fin.tell()
        print('FlieLen =', filelen, '\n......')
        fin.seek(0, 0)
        fout.write(struct.pack('I', filelen))
        while 1:
            # 读数据
            dd = fin.read(65534)
            if not dd:
                # 文件结束
                break
            # 末尾填充
            srl = len(dd)
            if srl % 2:
                srl += 1
                dd += b'\0'
            # crc
            crc = struct.pack('I', binascii.crc32(dd))
            # 加密数据
            dd = Coding(dd)
            x = RC4(key, len(key), dd, len(dd))
            key = UpdataKey(key)
            # 写入文件
            fout.write(struct.pack('H', srl))
            fout.write(x)
            fout.write(crc)
    fin.close()
    fout.close()
    print('encode success')
Beispiel #49
0
 def frame(cls, destination, command, data, index=0):
     packet = struct.pack('<IBB', destination, command, index) + data
     cs = binascii.crc32(packet) & ((1 << 32) - 1)
     cs_bytes = struct.pack('<I', cs)
     return cls.cobs_encode(packet + cs_bytes), cs_bytes
def calculate_crc32(response):
    return crc32(to_bytes(response.content)) & 0xffffffff
Beispiel #51
0
def docker_tagify(cid: str) -> str:
    """modify a challenge id so that it is an acceptable docker tag"""
    tag = "r8:" + re.sub(r"[^a-zA-Z0-9_.-]", "_", cid)
    if len(tag) > 128:
        tag = tag[:118] + hex(binascii.crc32(tag.encode()))
    return tag
Beispiel #52
0
 def unframe(cls, packet):
     packet = cls.cobs_decode(packet)
     cs = binascii.crc32(packet) & ((1 << 32) - 1)
     if cs != 0x2144DF1C:
         raise DecodeError("BAD CRC: " + hex(cs), repr(packet))
     return struct.unpack('<IBB', packet[0:6]) + (packet[6:-4], packet[-4:])
Beispiel #53
0
def crc32(s):
    return (~binascii.crc32(s, -1)) & 0xFFFFFFFF
Beispiel #54
0
def save_array_as_PNG(img, fname, kwArgCheck=None, ftype_req=-1):
    """
    Save an array as a PNG image.

    img -- a 3D NumPy array of type uint8 with shape (ny,nx,3)
    fname -- output file name
    ftype_req -- filter type to be used

    The PNG specification defines 5 different possible filters which are
    numbered 0 to 4 (inclusively). Filter #0 is "no filtering". If the user
    defines "ftype_req" as one of the identifying integers then that filter will
    be used for the entire PNG file. If the user defines "ftype_req" as "-1" (or
    does not define "ftype_req" at all) then adaptive filtering will be used
    whereby an attempt is made to predict which filtering method will yield the
    smallest compressed stream.
    """

    # Import standard modules ...
    import binascii
    import zlib

    # Import special modules ...
    try:
        import numpy
    except:
        raise Exception(
            "\"numpy\" is not installed; run \"pip install --user numpy\""
        ) from None

    # Import sub-functions ...
    from .paeth_filter import paeth_filter

    # Check keyword arguments ...
    if kwArgCheck is not None:
        print(
            f"WARNING: \"{__name__}\" has been called with an extra positional argument"
        )

    # Find image size ...
    ny, nx, nc = img.shape

    # Check image ...
    if not img.dtype == "uint8":
        raise TypeError("\"img\" must be a \"uint8\" array") from None
    if nc != 3:
        raise Exception("\"img\" must be a 3-channel array") from None

    # Try opening the PNG ...
    with open(fname, "wb") as fobj:
        # **********************************************************************
        # *                        WRITE THE SIGNATURE                         *
        # **********************************************************************

        fobj.write(binascii.unhexlify("89504E470D0A1A0A"))

        # **********************************************************************
        # *                  CREATE "IHDR" CHUNK AND WRITE IT                  *
        # **********************************************************************

        ihdr = bytearray()
        ihdr += numpy.uint32(13).byteswap().tobytes()  # Length
        ihdr += bytearray("IHDR", encoding="ascii")  # Chunk type
        ihdr += numpy.uint32(nx).byteswap().tobytes()  # IHDR : Width
        ihdr += numpy.uint32(ny).byteswap().tobytes()  # IHDR : Height
        ihdr += numpy.uint8(8).byteswap().tobytes()  # IHDR : Bit depth
        ihdr += numpy.uint8(2).byteswap().tobytes()  # IHDR : Colour type
        ihdr += numpy.uint8(
            0).byteswap().tobytes()  # IHDR : Compression method
        ihdr += numpy.uint8(0).byteswap().tobytes()  # IHDR : Filter method
        ihdr += numpy.uint8(0).byteswap().tobytes()  # IHDR : Interlace method
        ihdr += numpy.uint32(binascii.crc32(
            ihdr[4:])).byteswap().tobytes()  # CRC-32
        fobj.write(ihdr)
        del ihdr

        # **********************************************************************
        # *                  CREATE "IDAT" CHUNK AND WRITE IT                  *
        # **********************************************************************

        idat = bytearray()
        idat += numpy.uint32(0).byteswap().tobytes()  # Length
        idat += bytearray("IDAT", encoding="ascii")  # Chunk type
        stream = bytearray()

        # Loop over rows ...
        for iy in range(ny):
            row = numpy.zeros((5, nc, nx), dtype=numpy.uint8)

            # Calculate stream for "none" filter (if required) ...
            if ftype_req in [-1, 0]:
                ftype = 0
                for ix in range(nx):
                    row[ftype, :, ix] = img[iy, ix, :]

            # Calculate stream for "sub" filter (if required) ...
            if ftype_req in [-1, 1]:
                ftype = 1
                for ix in range(nx):
                    for ic in range(nc):
                        if ix == 0:
                            p1 = numpy.int16(0)
                        else:
                            p1 = img[iy, ix - 1, ic].astype(numpy.int16)
                        diff = img[iy, ix, ic].astype(numpy.int16) - p1
                        diff = numpy.mod(diff, 256)
                        row[ftype, ic, ix] = diff.astype(numpy.uint8)

            # Calculate stream for "up" filter (if required) ...
            if ftype_req in [-1, 2]:
                ftype = 2
                for ix in range(nx):
                    for ic in range(nc):
                        if iy == 0:
                            p1 = numpy.int16(0)
                        else:
                            p1 = img[iy - 1, ix, ic].astype(numpy.int16)
                        diff = img[iy, ix, ic].astype(numpy.int16) - p1
                        diff = numpy.mod(diff, 256)
                        row[ftype, ic, ix] = diff.astype(numpy.uint8)

            # Calculate stream for "average" filter (if required) ...
            if ftype_req in [-1, 3]:
                ftype = 3
                for ix in range(nx):
                    for ic in range(nc):
                        if ix == 0:
                            p1 = numpy.int16(0)
                        else:
                            p1 = img[iy, ix - 1, ic].astype(numpy.int16)
                        if iy == 0:
                            p2 = numpy.int16(0)
                        else:
                            p2 = img[iy - 1, ix, ic].astype(numpy.int16)
                        diff = img[iy, ix, ic].astype(numpy.int16) - (
                            (p1 + p2) // numpy.int16(2))
                        diff = numpy.mod(diff, 256)
                        row[ftype, ic, ix] = diff.astype(numpy.uint8)

            # Calculate stream for "Paeth" filter (if required) ...
            if ftype_req in [-1, 4]:
                ftype = 4
                for ix in range(nx):
                    for ic in range(nc):
                        if ix == 0:
                            p1 = numpy.int16(0)
                        else:
                            p1 = img[iy, ix - 1, ic].astype(numpy.int16)
                        if iy == 0:
                            p2 = numpy.int16(0)
                        else:
                            p2 = img[iy - 1, ix, ic].astype(numpy.int16)
                        if ix == 0 or iy == 0:
                            p3 = numpy.int16(0)
                        else:
                            p3 = img[iy - 1, ix - 1, ic].astype(numpy.int16)
                        diff = img[iy, ix,
                                   ic].astype(numpy.int16) - paeth_filter(
                                       p1, p2, p3).astype(numpy.int16)
                        diff = numpy.mod(diff, 256)
                        row[ftype, ic, ix] = diff.astype(numpy.uint8)

            # Figure out which stream to use ...
            if ftype_req == -1:
                tmp1 = numpy.uint64(255 * nx)
                for ftype in range(5):
                    tmp2 = row[ftype, :, :].astype(numpy.uint64).sum()
                    if tmp2 < tmp1:
                        tmp1 = tmp2
                        ftype_best = ftype
            else:
                ftype_best = ftype_req

            # Use the best/requested stream for this row ...
            stream += numpy.uint8(ftype_best).byteswap().tobytes()
            for ix in range(nx):
                stream += row[ftype_best, :, ix].byteswap().tobytes()

            # Clean up ...
            del row

        idat += zlib.compress(stream, 9)  # IDAT : Data
        idat[0:4] = numpy.uint32(len(idat[8:])).byteswap().tobytes()  # Length
        idat += numpy.uint32(binascii.crc32(
            idat[4:])).byteswap().tobytes()  # CRC-32
        fobj.write(idat)
        del idat

        # **********************************************************************
        # *                  CREATE "IEND" CHUNK AND WRITE IT                  *
        # **********************************************************************

        iend = bytearray()
        iend += numpy.uint32(0).byteswap().tobytes()  # Length
        iend += bytearray("IEND", encoding="ascii")  # Chunk type
        iend += numpy.uint32(binascii.crc32(
            iend[4:])).byteswap().tobytes()  # CRC-32
        fobj.write(iend)
        del iend
    def processList(self):
        import time
        # begin loop through of email list
        for line in exchangeDigest.itemArray:

            # get individual email
            xml = self.getItemXML(line[0], line[1])
            attr = exchangeDigest.c.service.ResolveNames(__inject={'msg': xml})
            data = attr.GetItemResponseMessage.Items.Message

            a = str(data.DateTimeSent)
            '''
      try:
        strptime = time.strptime()
      except AttributeError:
        import time
      '''

            dt_obj = time.strptime(a, "%Y-%m-%d %H:%M:%S")
            exchangeDigest.timeSent = str(int(time.mktime(dt_obj)))

            try:
                body = self.cleanText(data.Body.value)
                body_md5 = str(hashlib.md5(body).hexdigest())
            except AttributeError:
                body = ''
                body_md5 = ''

            sql = "select post_id from phpbb_posts where post_time = %s and post_checksum = %s"
            cursor.execute(sql, (exchangeDigest.timeSent, body_md5))
            rows = cursor.fetchall()
            copy = 0
            if (len(rows) > 0):
                copy = 1

        #  print data.From.Mailbox.Name
            if (copy == 0):
                try:
                    emailAddress = str(data.From.Mailbox.EmailAddress)
                except UnicodeEncodeError:
                    a = data.From.Mailbox.EmailAddress
                    b = unicode(a)
                    emailAddress = b.encode("utf-8")
                except AttributeError:
                    emailAddress = '*****@*****.**'

                tmp = emailAddress.partition('@')
                if (tmp[2] == 'yourdomain.com'):
                    exchangeDigest.From = tmp[0].lower()
                else:
                    exchangeDigest.From = emailAddress

                sql = "select * from phpbb_users where username = '******'"
                cursor.execute(sql)
                rows = cursor.fetchall()
                if (len(rows) > 0):
                    #User exists
                    for row in rows:
                        user_info = row
                else:
                    #User does not exist
                    try:
                        test = urllib2.urlopen(
                            "http://www.yourdomain.com/images/staff/" +
                            exchangeDigest.From + ".jpg")
                        user_avatar = "http://www.yourdomain.com/images/staff/" + exchangeDigest.From + ".jpg"
                    except urllib2.HTTPError:
                        user_avatar = "images/avatars/gallery/baby_cute_face.jpg"

                    email_hash = str(binascii.crc32(emailAddress))
                    salt = str(
                        hashlib.md5(exchangeDigest.timeSent).hexdigest())
                    sql = "insert into phpbb_users (user_type, group_id, user_permissions, user_perm_from, user_ip, user_regdate, username, username_clean, user_password, user_passchg, user_pass_convert, user_email, user_email_hash, user_birthday, user_lastvisit, user_lastmark, user_lastpost_time, user_lastpage, user_last_confirm_key, user_last_search, user_warnings, user_last_warning, user_login_attempts, user_inactive_reason, user_inactive_time, user_posts, user_lang, user_timezone, user_dst, user_dateformat, user_style, user_rank, user_colour, user_new_privmsg, user_unread_privmsg, user_last_privmsg, user_message_rules, user_full_folder, user_emailtime, user_topic_show_days, user_topic_sortby_type, user_topic_sortby_dir, user_post_show_days, user_post_sortby_type, user_post_sortby_dir, user_notify, user_notify_pm, user_notify_type, user_allow_pm, user_allow_viewonline, user_allow_viewemail, user_allow_massemail, user_options, user_avatar, user_avatar_type, user_avatar_width, user_avatar_height, user_sig, user_sig_bbcode_uid, user_sig_bbcode_bitfield, user_from, user_icq, user_aim, user_yim, user_msnm, user_jabber, user_website, user_occ, user_interests, user_actkey, user_newpasswd, user_form_salt, user_new, user_reminded, user_reminded_time, user_digest_filter_type, user_digest_format, user_digest_max_display_words, user_digest_max_posts, user_digest_min_words, user_digest_new_posts_only, user_digest_pm_mark_read, user_digest_remove_foes, user_digest_reset_lastvisit, user_digest_send_hour_gmt, user_digest_send_on_no_posts, user_digest_show_mine, user_digest_show_pms, user_digest_sortby, user_digest_type, user_digest_has_ever_unsubscribed, user_digest_no_post_text) values(0, 2, '00000000006xrqeiww\n\n\nzik0zi000000\nzik0zi000000\nzik0zi000000', 0, '10.1.12.70', %s, %s, %s, '', %s, 0, %s, %s, '', %s, %s, 0, '', '', 0, 0, 0, 0, 0, 0, 0, 'en', '-10.00', 0, 'D M d, Y g:i a', 1, 0, '', 0, 0, 0, 0, -3, 0, 0, 't', 'd', 0, 't', 'a', 0, 1, 0, 1, 1, 1, 1, 230271, %s, 2, 90, 90, '', '', '', '', '', '', '', '', '', '', '', '', '', '', %s, 1, 0, 0, 'ALL', 'HTML', 0, 0, 0, 0, 0, 0, 1, 0.00, 0, 1, 1, 'board', 'NONE', 0 ,0)"
                    cursor.execute(
                        sql,
                        (exchangeDigest.timeSent, exchangeDigest.From,
                         exchangeDigest.From, exchangeDigest.timeSent,
                         emailAddress, email_hash, exchangeDigest.timeSent,
                         exchangeDigest.timeSent, user_avatar, salt))
                    sql = "select * from phpbb_users where username = '******'"
                    cursor.execute(sql)
                    rows = cursor.fetchall()
                    for row in rows:
                        user_info = row
                exchangeDigest.user_id = str(user_info[0])

                try:
                    exchangeDigest.subj = str(data.Subject)
                except UnicodeEncodeError:
                    a = data.Subject
                    b = unicode(a)
                    exchangeDigest.subj = b.encode("utf-8")

                if (body != ''):
                    has_attachments = '0'
                    if (str(data.HasAttachments) == 'True'):
                        has_attachments = '1'

                    sql = "insert into phpbb_topics (forum_id, icon_id, topic_attachment, topic_approved, topic_reported, topic_title, topic_poster, topic_time, topic_time_limit, topic_views, topic_replies, topic_replies_real, topic_status, topic_type, topic_first_post_id, topic_first_poster_name, topic_first_poster_colour, topic_last_post_id, topic_last_poster_id, topic_last_poster_name, topic_last_poster_colour, topic_last_post_subject, topic_last_post_time, topic_last_view_time, topic_moved_id, topic_bumped, topic_bumper, poll_title, poll_start, poll_length, poll_max_options, poll_last_vote, poll_vote_change) values(%s, 0, %s, 1, 0, %s, %s, %s, 0, 0, 0, 0, 0, 0, %s, %s, '', %s, %s, %s, '', %s, %s, %s, 0, 0, 0, '', 0, 0, 1, 0, 0)"
                    cursor.execute(
                        sql,
                        (self.forum_id, has_attachments, exchangeDigest.subj,
                         exchangeDigest.user_id, exchangeDigest.timeSent,
                         exchangeDigest.post_id, exchangeDigest.From,
                         exchangeDigest.post_id, exchangeDigest.user_id,
                         exchangeDigest.From, exchangeDigest.subj,
                         exchangeDigest.timeSent, exchangeDigest.timeSent))
                    topic_id = str(cursor.lastrowid)

                    sql = "insert into phpbb_posts (topic_id, forum_id, poster_id, icon_id, poster_ip, post_time, post_approved, post_reported, enable_bbcode, enable_smilies, enable_magic_url, enable_sig, post_username, post_subject, post_text, post_checksum, post_attachment, bbcode_bitfield, bbcode_uid, post_postcount, post_edit_time, post_edit_reason, post_edit_user, post_edit_count, post_edit_locked) values(%s, %s, %s, 0, '10.1.12.70', %s, 1, 0, 1, 1, 1, 1, '', %s, %s, %s, %s, '', '', 0, 0, '', 0, 0, 0)"
                    cursor.execute(
                        sql, (topic_id, self.forum_id, exchangeDigest.user_id,
                              exchangeDigest.timeSent, exchangeDigest.subj,
                              body, body_md5, has_attachments))
                    exchangeDigest.post_id = str(cursor.lastrowid)

                    sql = "update phpbb_users set user_posts = user_posts+1 where user_id = %s"
                    cursor.execute(sql, (exchangeDigest.user_id))

                    if (has_attachments == '1'):
                        try:
                            for attachment in data.Attachments.FileAttachment:
                                try:
                                    id = str(line.AttachmentId._Id)
                                    self.writeFile(id, exchangeDigest.post_id,
                                                   topic_id,
                                                   exchangeDigest.user_id,
                                                   exchangeDigest.timeSent)
                                except AttributeError:
                                    id = str(
                                        getattr(attachment[1], "_Id", None))
                                    mimetype = str(
                                        getattr(attachment[1], "ContentType",
                                                None))
                                    if (id != "None"):
                                        self.writeFile(id,
                                                       exchangeDigest.post_id,
                                                       topic_id,
                                                       exchangeDigest.user_id,
                                                       exchangeDigest.timeSent)
                        except AttributeError:
                            error = AttributeError
Beispiel #56
0
 def test_same_as_binascii_crc32(self):
     foo = b'abcdefghijklmnop'
     crc = 2486878355
     self.assertEqual(binascii.crc32(foo), crc)
     self.assertEqual(zlib.crc32(foo), crc)
     self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
Beispiel #57
0
def scrapy_comment_user_ifeng(guid, username, adminid, address):
    '''
    凤凰网个人页面爬取
    http://comment.ifeng.com/get? job=7 & format=json & pagesize=20 & _1460705534 & guid=65969467 & p=1
    '''
    username_decode = unquote(username)
    siteid = 2
    # 判断用户是否存在
    sql = 'select userid,adminid from system_site_user where siteid=%s and username=%s'
    r = mysql_cursor.query(sql, siteid, username_decode)
    if r:
        if int(adminid) != int(r[0]['adminid']):
            print '网站帐号存在,且adminid不符'
            processlog('auto_scrapyuser', 1, 'scrapy_comment_user_ifeng',
                       '网站帐号存在,添加人不匹配,现:%s, 原:%s' % (adminid, r[0]['adminid']))
            return
        print '网站帐号存在'
        userid = r[0]['userid']
        setAddressStatus(userid, 1)
    else:
        processlog('auto_scrapyuser', 1, 'scrapy_comment_user_ifeng',
                   '网站帐号不存在,添加:%s' % username)
        crc32_address = crc32(address) & 0xffffffff
        sql = 'insert into system_site_user(`siteid`, `username`,`createtime`, `adminid`, `address`, `crc32address`, `status`) values(%s, %s, now(), %s, %s, %s, 1)'
        userid = mysql_cursor.execute_lastrowid(sql, siteid, username_decode,
                                                adminid, address,
                                                crc32_address)

    headers = {
        'Host': 'comment.news.163.com',
        'User-Agent':
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive'
    }
    # 默认爬取6页
    for page in xrange(scrapy_page):
        url = 'http://comment.ifeng.com/get?job=7&format=json&pagesize=20&guid=%s&p=%s' % (
            guid, page)

        req = requests.get(url, headers=headers, timeout=timeout)
        if req.status_code == 200:
            data = json.loads(req.text)
            comments = data['comments']
            for comment in comments:
                _url = comment['doc_url']
                # 判断url是否支持
                res = r'://(.*?)/'
                ret = re.findall(res, _url)
                if ret:
                    shorturl = ret[0]
                    if shorturl in [
                            'news.ifeng.com',
                    ]:

                        title = comment['doc_name']  #帖子标题
                        content = comment['comment_contents']

                        createTime = time.strftime(
                            '%Y-%m-%d %H:%M:%S',
                            time.localtime(int(comment['create_time'])))
                        commentId = comment['comment_id']
                        nickname = comment['uname']

                        url_post = 'http://gentie.ifeng.com/view.html?docUrl=' + quote(
                            _url.encode('utf8')) + '&docName=' + quote(
                                title.encode('utf8'))

                        #判断帖子是否保存过
                        sql = 'select postid from system_url_posts where `commentIds`=%s and createTime=%s and `adminid`=%s'
                        r = mysql_cursor.query(sql, commentId, createTime,
                                               adminid)
                        if not r:
                            #判断url是否添加过
                            crc32_url = crc32(url_post) & 0xffffffff
                            sql = 'select urlid from system_url_list where `crc32url`=%s and adminid=%s'
                            ret = mysql_cursor.query(sql, crc32_url, adminid)

                            if ret:  #添加过
                                urlid = ret[0]['urlid']
                            else:
                                sql = 'insert into system_url_list(`siteid`, `title`, `url`, `crc32url`, `addtime`,`status`, `adminid`) values(%s, %s, %s, %s, now(), 1, %s)'
                                urlid = mysql_cursor.execute_lastrowid(
                                    sql, siteid, title, url_post, crc32_url,
                                    adminid)
                                processlog('auto_scrapyuser', 1,
                                           'scrapy_comment_user_ifeng',
                                           'url未添加过,添加url,urlid:%s' % urlid)

                            try:
                                #保存帖子
                                sql = 'insert into system_url_posts(`urlid`, `userid`, `commentIds`, `content`, `nickname`'\
                                      ', `createTime`, `adminid`) values(%s,%s,%s,%s,%s,%s,%s)'
                                postid = mysql_cursor.execute_lastrowid(
                                    sql, urlid, userid, commentId, content,
                                    nickname, createTime, adminid)

                                print '保存帖子: %s; postid :%s ; adminid : %s' % (
                                    nickname, postid, adminid)
                                processlog(
                                    'auto_scrapyuser', 1,
                                    'scrapy_comment_user_ifeng',
                                    '保存帖子: %s; postid :%s ; adminid : %s' %
                                    (nickname, postid, adminid))
                            except Exception, e:
                                if 'Data too long for column' in str(e):
                                    processlog(
                                        'auto_scrapyuser', 1,
                                        'scrapy_comment_user_ifeng',
                                        '帖子内容过长,重新截取写入,urlid:%s' % urlid)
                                    content = content[:255]
                                    #保存帖子
                                    sql = 'insert into system_url_posts(`urlid`, `userid`, `commentIds`, `content`, `nickname`'\
                                          ', `createTime`, `adminid`) values(%s,%s,%s,%s,%s,%s,%s)'
                                    postid = mysql_cursor.execute_lastrowid(
                                        sql, urlid, userid, commentId, content,
                                        nickname, createTime, adminid)

                                    print '保存帖子: %s; postid :%s ; adminid : %s' % (
                                        nickname, postid, adminid)
                                    processlog(
                                        'auto_scrapyuser', 1,
                                        'scrapy_comment_user_ifeng',
                                        '保存帖子: %s; postid :%s ; adminid : %s' %
                                        (nickname, postid, adminid))
                                # 更新site_user状态
                                setAddressStatus(userid, 0)
                        else:
                            print '帖子已经添加过: commentId:%s' % commentId
                            # processlog('auto_scrapyuser', 1, 'scrapy_comment_user_ifeng', '帖子已经添加过: commentId:%s' % commentId)

            #如果到最后一页,退出循环
            total = data['count']
            if (page + 1) * 20 >= total:
                break

        else:
            print req.text
Beispiel #58
0
 def test_get_metadata_checksums_correctly(self):
     parts = self._get_written_line().decode('ascii').strip().split(' ')
     expected_checksum = '{0:08x}'.format(
         crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
     checksum = parts[2]
     self.assertEqual(expected_checksum, checksum)
Beispiel #59
0
 def getpwuuid(self, uuid):
     tmp = (self.domain_name or "").encode('utf-8')
     (checksum, uid) = parse_uuid2(uuid)
     if crc32(tmp) != checksum:
         return None
     return self._convert_user(self.server.getpwuid(uid))
Beispiel #60
0
def scrapy_comment_user_163(username, adminid, address):
    '''
    网易用户所有跟贴的爬取
    '''
    username_decode = base64.b64decode(username)
    siteid = mysql_cursor.query(
        'select siteid from system_site_list where shorturl="comment.news.163.com"'
    )[0]['siteid']
    # 判断用户是否存在
    sql = 'select userid,adminid from system_site_user where siteid=%s and username=%s '
    r = mysql_cursor.query(sql, int(siteid), username_decode)
    if r:
        if int(adminid) != int(r[0]['adminid']):
            print '网站帐号存在,添加人不匹配'
            processlog('auto_scrapyuser', 1, 'scrapy_comment_user_163',
                       '网站帐号存在,添加人不匹配,现:%s, 原:%s' % (adminid, r[0]['adminid']))
            return
        userid = r[0]['userid']
        setAddressStatus(userid, 1)
    else:
        processlog('auto_scrapyuser', 1, 'scrapy_comment_user_163',
                   '网站帐号不存在,添加:%s,userid:%s' % (username, adminid))

        crc32_address = crc32(address) & 0xffffffff
        sql = 'insert into system_site_user(`siteid`, `username`,`createtime`, `adminid`, `address`,  `crc32address`, `status`) values(%s, %s, now(), %s, %s, %s, 1)'
        userid = mysql_cursor.execute_lastrowid(sql, siteid, username_decode,
                                                adminid, address,
                                                crc32_address)

    headers = {
        'Host': 'comment.news.163.com',
        'User-Agent':
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive'
    }
    #默认爬取6页
    for page in xrange(scrapy_page):
        url = 'http://comment.api.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/users/0/comments?username=%s&offset=%s&limit=30&ibc=newspc' % (
            username, page)

        req = requests.get(url, headers=headers, timeout=timeout)
        if req.status_code == 200:
            data = json.loads(req.text)

            threads = data['threads']
            urllist = []

            for k, v in threads.items():
                param = {}

                _url = v['url']
                # 判断url是否支持
                res = r'://(.*?)/'
                ret = re.findall(res, _url)
                if ret:
                    shorturl = ret[0]
                    if shorturl in [
                            'news.163.com',
                    ]:
                        boardId = v['boardId']
                        param['docId'] = v['docId']
                        param['title'] = v['title']
                        param[
                            'url'] = 'http://comment.news.163.com/' + boardId + '/' + v[
                                'docId'] + '.html'
                        urllist.append(param)
                else:
                    processlog('auto_scrapyuser', 1, 'crapy_comment_user_163',
                               'url不支持:%s' % _url)

            comments = data['comments']

            for k, v in comments.items():
                url_post = ''
                title = ''
                for u in urllist:
                    if u['docId'] == k.split('_')[0]:
                        url_post = u['url']
                        title = u['title']
                buildLevel = v['buildLevel']
                # 判断是否含有nickname, 是否是最外层的评论
                if url_post and title and v['user'].has_key(
                        'nickname') and buildLevel == 1:
                    nickname = v['user']['nickname']
                    commentId = v['commentId']
                    createTime = v['createTime']
                    content = v['content'].encode('utf8')

                    #判断帖子是否保存过
                    sql = 'select postid from system_url_posts where `commentIds`=%s and createTime=%s and `adminid`=%s'
                    r = mysql_cursor.query(sql, commentId, createTime, adminid)
                    if not r:
                        #判断url是否添加过
                        crc32_url = crc32(url_post) & 0xffffffff
                        sql = 'select urlid from system_url_list where `crc32url`=%s and `adminid`=%s'
                        ret = mysql_cursor.query(sql, crc32_url, adminid)
                        if ret:  #添加过
                            urlid = ret[0]['urlid']
                        else:
                            sql = 'insert into system_url_list(`siteid`, `title`, `url`, `crc32url`, `addtime`, `status`, `adminid`) values(%s,%s,%s,%s,now(),1, %s)'
                            urlid = mysql_cursor.execute_lastrowid(
                                sql, siteid, title, url_post, crc32_url,
                                adminid)

                            processlog('auto_scrapyuser', 1,
                                       'scrapy_comment_user_163',
                                       'url未添加过,添加url,urlid:%s' % urlid)
                        #保存帖子
                        try:
                            sql = 'insert into system_url_posts(`urlid`, `userid`, `commentIds`, `content`, `nickname`'\
                                  ', `createTime`, `adminid`) values(%s,%s,%s,%s,%s,%s,%s)'
                            postid = mysql_cursor.execute_lastrowid(
                                sql, urlid, userid, commentId, content,
                                nickname, createTime, adminid)
                            print '保存帖子: %s; postid :%s ; adminid : %s' % (
                                nickname, postid, adminid)
                            processlog(
                                'auto_scrapyuser', 1,
                                'scrapy_comment_user_163',
                                '保存帖子: %s; postid :%s ; adminid : %s' %
                                (nickname, postid, adminid))

                        except Exception, e:
                            # 有的字符集无法保存
                            if 'Incorrect string value:' in str(e):
                                print '存在表情,无法保存content, nickname:%s' % nickname
                                processlog(
                                    'auto_scrapyuser', 0,
                                    'scrapy_comment_user_163',
                                    '存在表情,无法保存content, nickname:%s' % nickname)

                            elif 'Data too long for column' in str(e):
                                processlog('auto_scrapyuser', 1,
                                           'scrapy_comment_user_163',
                                           '帖子内容过长,重新截取写入,urlid:%s' % urlid)
                                content = content[:255]
                                sql = 'insert into system_url_posts(`urlid`, `userid`, `commentIds`, `content`, `nickname`'\
                                  ', `createTime`, `adminid`) values(%s,%s,%s,%s,%s,%s,%s)'
                                postid = mysql_cursor.execute_lastrowid(
                                    sql, urlid, userid, commentId, content,
                                    nickname, createTime, adminid)
                                print '保存帖子: %s; postid :%s ; adminid : %s' % (
                                    nickname, postid, adminid)
                                processlog(
                                    'auto_scrapyuser', 1,
                                    'scrapy_comment_user_163',
                                    '保存帖子: %s; postid :%s ; adminid : %s' %
                                    (nickname, postid, adminid))

                            else:
                                print e
                                processlog('auto_scrapyuser', 0,
                                           'scrapy_comment_user_163', str(e))
                            # 更新site_user状态
                            setAddressStatus(userid, 0)
                    else:
                        print '帖子保存过:postid:%s' % r[0]['postid']
                        # processlog('auto_scrapyuser', 1, 'scrapy_comment_user_163', '帖子保存过:postid:%s' % r[0]['postid'])

            #如果到最后一页,退出循环
            total = data['total']
            if (page + 1) * 30 >= total:
                break
        else:
            print req.text