def make_md5(data): """Make a md5 hash based on``data``. Specifically, this knows about ``Hunk`` objects, and makes sure the actual content is hashed. This is very conservative, and raises an exception if there are data types that it does not explicitly support. This is because we had in the past some debugging headaches with the cache not working for this very reason. MD5 is faster than sha, and we don't care so much about collisions. We care enough however not to use hash(). """ def walk(obj): if isinstance(obj, (tuple, list)): for item in obj: for d in walk(item): yield d elif isinstance(obj, dict): for k in sorted(obj.keys()): for d in walk(k): yield d for d in walk(obj[k]): yield d elif isinstance(obj, BaseHunk): yield obj.data() elif isinstance(obj, Filter): yield str(hash(obj)) elif isinstance(obj, (int, basestring)): yield str(obj) else: raise ValueError('Cannot MD5 type %s' % type(obj)) md5 = md5_constructor() for d in walk(data): md5.update(d) return md5.hexdigest()
def key(self): """Return a key we can use to cache this hunk. """ # MD5 is faster than sha, and we don't care so much about collisions. # We care enough however not to use hash(). md5 = md5_constructor() md5.update(self.data()) return md5.hexdigest()
def md5digest(data): try: import hashlib md5 = hashlib.md5(data) except ImportError: import md5 md5 = md5.new(data) return md5.hexdigest()
def md5sum(fname): md5 = hashlib.md5() f = open(fname) while True: chunk = f.read(8192) if not len(chunk): break md5.update(chunk) return md5.hexdigest()
def get_md5(self, content): md5 = hashlib.md5() try: content = unicode(content).encode('utf-8') except: pass md5.update(content) return md5.hexdigest()
def md5sum(fname): md5 = hashlib.md5() f=open(fname) while True: chunk=f.read(8192) if not len(chunk): break md5.update(chunk) return md5.hexdigest()
def get_md5(filename, block_size=2 ** 20): """Returns an MD5 hash for a filename.""" f = open(filename, 'rb') md5 = hashlib.md5() while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest()
def get_file_md5sum(filename): """Compute MD5Sum of a file. :param fileid: Either a filename or a File object. :returns: md5sum in 32 character hexdigest format. """ fd = open(filename) md5 = hashlib.md5() for chunk in iter(lambda: fd.read(8192), b''): md5.update(chunk) return md5.hexdigest()
def upload_file(self,name,dest=None): import os if dest is None: dest = os.path.basename(name) print 'upload... %s <- %s'%(dest,name) import xmlrpclib s = xmlrpclib.ServerProxy(self.url) md5,rawbuf = self.md5sum(name) buffer = xmlrpclib.Binary(rawbuf) s.send_output(dest,'.',buffer) s.check_transfer(dest,'.',md5.hexdigest())
def vulPocCheck(task_id, task_name, netloc, pluginInfo, queue): netloc = [netloc["host"],netloc["port"]] poc_request = set_request(netloc, pluginInfo) try: res = urllib2.urlopen(poc_request, timeout=5) res_html = res.read(204800) header = res.headers except urllib2.HTTPError as e: header = e.headers res_html = e.read(204800) except Exception as e: return try: html_code = get_code(header, res_html).strip() if html_code and len(html_code) < 12: res_html = res_html.decode(html_code).encode('utf-8') except Exception as e: pass an_type = pluginInfo['analyzing'] vul_tag = pluginInfo['tag'] analyzingdata = pluginInfo['analyzingdata'] vul_scan_result = { "tags": "vulTask", "task_id": task_id, "task_name": task_name, "host": netloc[0], "port": netloc[1], "queue": queue, "isvul": False } if an_type == 'keyword': if analyzingdata.encode("utf-8") in res_html: vul_scan_result["isvul"] = '存在漏洞' elif an_type == 'regex': if re.search(analyzingdata, res_html, re.I): vul_scan_result["isvul"] = '存在漏洞' elif an_type == 'md5': md5 = hashlib.md5() md5.update(res_html) if md5.hexdigest() == analyzingdata: vul_scan_result["isvul"] = '存在漏洞' else: vul_scan_result["isvul"] = None getPoolBR().lpush(RedisConfig.VULTASKKEY, json.dumps(vul_scan_result))
def make_key(*stuff): """Create a cache key by hasing the given data. This knows about certain data types that are relevant for us, for example filters. """ # MD5 is faster than sha, and we don't care so much about collisions md5 = md5_constructor() def feed(data): for d in data: if isinstance(d, list): feed(d) elif isinstance(d, Filter): md5.update("%d" % d.id()) else: md5.update(str(d)) feed(stuff) return md5.hexdigest()
def _uuid(*args): """ uuid courtesy of Carl Free Jr: (http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/213761) """ t = long(time.time() * 1000) r = long(random.random() * 100000000000000000L) try: a = socket.gethostbyname(socket.gethostname()) except: # if we can't get a network address, just imagine one a = random.random() * 100000000000000000L data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args) md5 = md5.md5() md5.update(data) data = md5.hexdigest() return data
def getHashes(self, block_size=2**8): """ Calculate MD5,SHA-1, SHA-256 hashes of APK input file """ md5 = hashlib.md5() sha1 = hashlib.sha1() sha256 = hashlib.sha256() f = open(self.filename, 'rb') while True: data = f.read(block_size) if not data: break md5.update(data) sha1.update(data) sha256.update(data) return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
def get_md5_input_file(input_file): if not os.path.exists(input_file): raise Exception('Input file not found at: ' + input_file) logger.info('Computing md5 sum for: ' + input_file) # open the file in binary mode f = open(input_file, 'rb') chunk_size = 2 ** 20 md5 = hashlib.md5() # read the input file in 1MB pieces while True: chunk = f.read(chunk_size) if not chunk: break md5.update(chunk) return md5.hexdigest()
def _uuid( *args ): """ uuid courtesy of Carl Free Jr: (http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/213761) """ t = long( time.time() * 1000 ) r = long( random.random() * 100000000000000000L ) try: a = socket.gethostbyname( socket.gethostname() ) except: # if we can't get a network address, just imagine one a = random.random() * 100000000000000000L data = str(t) + ' ' + str(r) + ' ' + str(a) + ' ' + str(args) md5 = md5.md5() md5.update(data) data = md5.hexdigest() return data
def get_md5_input_file(input_file): if not os.path.exists(input_file): raise Exception('Input file not found at: ' + input_file) logger.info('Computing md5 sum for: ' + input_file) # open the file in binary mode f = open(input_file, 'rb') chunk_size = 2**20 md5 = hashlib.md5() # read the input file in 1MB pieces while True: chunk = f.read(chunk_size) if not chunk: break md5.update(chunk) return md5.hexdigest()
'usage: genSerialVersionUID site-profile top-srcdir source target') profile = sys.argv[1] topsrcdir = sys.argv[2] spath = sys.argv[3] tpath = sys.argv[4] cname = spath[len(topsrcdir) + 11:len(spath)] print('Profile = ' + profile) print('Class = ' + cname) md5 = md5.new() md5.update(profile) md5.update(cname) hex = md5.hexdigest() print('CheckSum = ' + hex) uid = int('0x' + hex, 16) >> 64 print('SerialVersionUID = ' + str(uid) + 'L') print prog = re.compile('private static final long serialVersionUID') source = open(spath, 'rU') target = open(tpath, 'w') try: for line in source: if prog.search(line):
#-*- coding: UTF-8 -*- import hashlib import md5 md5 = hashlib.md5() md5.update('how to use md5 in python hashlib?') print md5.hexdigest() #如果数据量很大,可以分块多次调用update(),最后计算的结果是一样的: md5 = hashlib.md5() md5.update('how to use md5 in ') md5.update('python hashlib?') print md5.hexdigest() print '-------------------' # import hashlib ## 所以使用hashlib一定要每次初始化,不然计算的值不对 m = hashlib.md5() m.update('a') print m.hexdigest() # 0cc175b9c0f1b6a831c399e269772661 m = hashlib.md5() m.update('a') print m.hexdigest() # 4124bc0a9335c27f086f24ba207a4912 m = hashlib.md5() m.update('aa') print m.hexdigest() # 4124bc0a9335c27f086f24ba207a4912
sys.exit('usage: genSerialVersionUID site-profile top-srcdir source target'); profile = sys.argv[1] topsrcdir = sys.argv[2] spath = sys.argv[3] tpath = sys.argv[4] cname = spath[len(topsrcdir) + 11 : len(spath)] print ('Profile = ' + profile); print ('Class = ' + cname) md5 = md5.new() md5.update(profile) md5.update(cname) hex = md5.hexdigest() print ('CheckSum = ' + hex) uid = int('0x' + hex, 16) >> 64 print ('SerialVersionUID = ' + str(uid) + 'L') print prog = re.compile('private static final long serialVersionUID') source = open(spath, 'rU') target = open(tpath, 'w') try: for line in source:
def h(s): import md5 md5 = md5.md5() md5.update(s + seed) return md5.hexdigest()