def charge(phone, face): resp = '' param = { "agentId": agentId, "businessId": businessId, "reqStreamId": "fd{}{}".format(str(round(time.time() * 1000)), random.randint(100, 1000)), "phone": str(phone), "face": str(face), "tradePwd": tools.md5(tradePwd), "timeStamp": time.strftime('%Y%m%d%H%M%S'), } param2 = sorted(param) param['notify'] = "" source_str = "".join([param[k] for k in param2]) + appKey param['sign'] = tools.md5(source_str) try: r = requests.get(charge_url, params=param) resp = r.text except Exception: log.info(traceback.format_exc()) finally: return resp
def parse_same_name_author(id, author_id, url, html, obj_type): try: author_list = html.xpath('//ul[@class="author-list"]/li') if len(author_list) != 0: ls = [] for item in author_list: dic = {} same_author_name = item.xpath( './div[@class="author-list-title"]/a/text()')[0] same_author_url = domain + item.xpath( './div[@class="author-list-title"]/a/@href')[0].replace( 'Professional', 'General') same_author_id = same_author_url.split('/')[-1] same_author_org = item.xpath( './div[@class="author-list-content"]/span[1]/text()' )[0].split(':')[-1] same_author_pub_num = item.xpath( './div[@class="author-list-content"]/span[2]/text()' )[0].split(':')[-1] uuid = md5(same_author_url) dic['uuid'] = uuid dic['author_id'] = same_author_id dic['name'] = same_author_name dic['url'] = same_author_url dic['org'] = same_author_org dic['pun_num'] = same_author_pub_num if obj_type != 6 and obj_type != 7: # same_obj_uuid = '' # same_obj_type = 6 # same_obj_name = '' # same_obj_url = '' save_author_index(source_id=id, uuid=uuid, name=same_author_name, author_id=same_author_id, author_url=same_author_url, obj_uuid=md5(url), obj_type=6, obj_name=author_id, obj_url=url) ls.append(dic) SameName_author = json.dumps(ls, ensure_ascii=False) else: SameName_author = None return SameName_author except Exception as x: err = traceback.format_exc() print(err) pass
def charge(phone, face): resp = '' params = { "PlatID": PlatID, "NodeOrderID": "a{}{}".format(int(time.time()*1000), random.randint(0, 100)), "Phone": phone, "Fee": face*100, "CallBackUrl": "", "TransType": "01", "IP": "121.201.16.55", } sorted_keys = sorted(params.keys()) value_str= '' for k in sorted_keys: value_str = value_str + str(params[k]) sign = tools.md5(value_str + API_KEY) params['Sign'] = sign.upper() try: if pre_charge(phone, face): log.info("号码{}面值{}已插入数据库".format(phone, face)) r = requests.post(charge_url, data=params) resp = r.text except Exception: log.info(traceback.format_exc()) finally: return resp
def main(): while True: try: hasches = raw_input("%s%shasches%s%s> " % (WHITEBOLD, LINE, STOPLINE, WHITEBOLD)) if "generate/password/type/md5" in hasches: md5() elif "generate/password/type/sha1" in hasches: shaone() elif "generate/password/type/sha224" in hasches: sha224() elif "generate/password/type/sha256" in hasches: sha256() elif "generate/password/type/sha384" in hasches: sha384() elif "generate/password/type/sha512" in hasches: sha512() elif "show modules" in hasches: m() elif "clear" in hasches: os.system("clear") elif "exit" in hasches: sys.exit() elif hasches == 'show options' or hasches == '?' or hasches == 'help': iop() elif "hash" in hasches: hes = hasches.split()[-1] print "STRING => ", hasches.split()[-1] print "%s[*]%s Cracking.." % (BLUE, WHITEBOLD) try: url = "https://lea.kz/api/hash/%s" % (hes) beheading = urllib.urlopen(url).read() data = json.loads(beheading) print "%s[+]%s STRING : %s" % (BLUE, WHITEBOLD, hes) print "%s[*]%s RESULT : %s%s" % (RED, WHITEBOLD, RED, data["password"]) except: print "%s[!]%s STRING NOT FOUND!" % (RED, WHITEBOLD) else: print "%s[-] Unknown command : %s%s%s" % (RED, WHITEBOLD, hasches, RED) print "[!] U can enter 'help' ?" except: print "\nBye" sys.exit()
def __init__(self, text, p, href): self.text = text self.path = p self.id = md5(href) self.baseUrl = href self.soup = BeautifulSoup(text, 'html.parser') self.css_list = [] self.title = self.soup.title.string self.cover = '' self.description = ''
def get(self, cmds): filename = cmds[1] download_dir = os.path.join(self.client_dir, filename) head_dic = { 'cmd': 'put', 'file_name': filename, 'user_dir': self.user_dir } self.send(head_dic) header_dic = self.receive() total_size = header_dic['file_size'] file_name = header_dic['file_name'] md5_server = header_dic['md5'] file_size = 0 file_flag = 0 if os.path.exists(download_dir): # 判断本地是否已有接收完成的文件 file_size = os.path.getsize(download_dir) if file_size == total_size and tools.md5( download_dir) == md5_server: # 判断是否是续传 file_flag = 1 else: file_flag = 0 head_dic = {'file_size': file_size, 'file_flag': file_flag} self.send(head_dic) if file_flag == 0 and file_name: # 第四步:接收真实的数据 with open(download_dir, 'ab') as f: recv_size = file_size while recv_size < total_size: line = self.socket.recv(1024) f.write(line) recv_size += len(line) print('总大小%s,已下载%s' % (total_size, recv_size)) md5_local = tools.md5(download_dir) if md5_local == md5_server: print('下载文件%s验证成功,下载完成!' % file_name) else: print('下载文件%s与服务器不一致,建议重新下载!' % file_name) elif file_flag == 1: print('已下载过相同文件!') else: print('%s无此文件!' % filename)
def parse_author_cooperation(id, url, author_id, html, obj_type): try: author_ls = html.xpath( '//div[@class="tag-content"]/span[@class="lnk2"]') if len(author_ls) != 0: ls = [] for item in author_ls: dic = {} name = item.xpath('./a/text()')[0] url = domain + item.xpath('./a/@href')[0].replace( 'Professional', 'General') author_id = url.split('/')[-1] uuid = md5(url) dic['uuid'] = uuid dic['author_id'] = author_id dic['name'] = name dic['url'] = url # obj_uuid = '' # obj_type = 7 # obj_name = '' # obj_url = '' if obj_type != 6 and obj_type != 7: save_author_index(source_id=id, uuid=uuid, name=name, author_id=author_id, author_url=url, obj_uuid=md5(url), obj_type=7, obj_name=author_id, obj_url=url) ls.append(dic) cooperation_author = json.dumps(ls, ensure_ascii=False) else: cooperation_author = None return cooperation_author pass except Exception as x: err = traceback.format_exc() print(err) pass
def init_folder(href): """ 初始化文件夹,如果不存在,则递归创建 """ name = md5(href) p = './dist/{}'.format(name) try: os.makedirs(p) except BaseException as e: print('异常', e) return p
def down_image_url(self, url, selector): """ 将远程图片下载到本地 """ r = self.get_remote_text(url, selector) if r == None: return None id_ = md5(url) p = '{}/{}.jpg'.format(self.path, id_) with open(p, 'wb') as f: f.write(r.content) return "./{}.jpg".format(id_)
def login(): username = flask.request.values.get('username') #从请求里面获取到参数的 password = flask.request.values.get('password') tick = time.time() t = username + str(tick) sessionid = tools.md5(t) #tools.rc.hset("user_seesions",sessionid,{"username":username,"userid":1}) tools.rc.set(sessionid, username) tools.rc.expire(sessionid, 60 * 60) # flask.request.is_json#是否请求为json # flask.request.json.get('')#入参是json的话,用这个 d = { 'error_code': 1, 'msg': '登录成功', 'sessionid': sessionid, 'username': username, 'password': password } return json.dumps(d, ensure_ascii=False)
def test_checksum(): """ check on files for agotool flask PMID_autoupdates compares previously recorded checksum (from Phobos) to currently created checksum (on e.g. Pisces) """ cond_md5 = df["md5"].notnull() cond_latestVersion = df["version"] == max(df["version"]) df2compare = df[cond_md5 & cond_latestVersion] fn_list, binary_list, size_list, num_lines_list, date_list, md5_list = [], [], [], [], [], [] for fn in sorted(os.listdir(variables.TABLES_DIR)): if fn == "Entity_types_table_UPS_FIN.txt": continue fn_abs_path = os.path.join(variables.TABLES_DIR, fn) if fn.endswith("UPS_FIN.txt"): binary_list.append(False) num_lines_list.append(tools.line_numbers(fn_abs_path)) elif fn.endswith("UPS_FIN.p") or fn.endswith(".npy"): binary_list.append(True) num_lines_list.append(np.nan) else: continue fn_list.append(fn) size_list.append(os.path.getsize(fn_abs_path)) timestamp = tools.creation_date(fn_abs_path) date_list.append(datetime.datetime.fromtimestamp(timestamp)) md5_list.append(tools.md5(fn_abs_path)) dflocal = pd.DataFrame() dflocal["fn"] = fn_list dflocal["binary"] = binary_list dflocal["size"] = size_list dflocal["num_lines"] = num_lines_list dflocal["date"] = date_list dflocal["md5"] = md5_list dfm = pd.concat([dflocal, df2compare]) for fn, group in dfm.groupby("fn"): md5_arr = group["md5"].values assert md5_arr.shape == (2, ) assert md5_arr[0] == md5_arr[1]
def parse_all_pub_num(html): try: all_pub_num_list = html.xpath('//ul[@id="Professional_all"]/li') ls = [] for item in all_pub_num_list: dic = {} url = domain + item.xpath('./a/@href')[0] author_id = item.xpath('./a/@authorid')[0] name = get_name(author_id) pub_num = item.xpath('./span/text()')[0] uuid = md5(domain) dic['uuid'] = uuid dic['author_id'] = author_id dic['name'] = name dic['url'] = url dic['pub_num'] = pub_num ls.append(dic) pub_author_all = json.dumps(ls, ensure_ascii=False) return pub_author_all except Exception as x: err = traceback.format_exc() print(err) pass
def put(self, cmds): filename = cmds[1] file_dir = os.path.join(self.client_dir, filename) if not os.path.exists(file_dir): print('file:%s is not exists' % filename) return else: filesize = os.path.getsize(file_dir) md5 = tools.md5(file_dir) # 判断上传文件是否超过剩余配额 self.get_quota() remain_quota = self.user_info[self.user_name][1] - int(self.user_quota) if filesize > remain_quota: print('无法上传%s!当前剩余配额%s!' % (filesize, remain_quota)) return head_dic = { 'cmd': 'get', 'md5': md5, 'file_name': filename, 'file_size': filesize, 'user_dir': self.user_dir } self.send(head_dic) header_dic = self.receive() send_size = header_dic['file_size'] if header_dic['file_flag'] == 0: with open(file_dir, 'rb') as f: f.seek(send_size) for line in f: self.socket.send(line) send_size += len(line) print('已传送%s!' % send_size) else: print('upload successful') else: print('服务器上已有相同文件!')
def parse_click_num(html): try: click_num_list = html.xpath('//ul[@class="nlst3 clear"]/li') ls = [] for item in click_num_list: dic = {} url = domain + item.xpath('./a/@href')[0] uuid = md5(url) author_id = item.xpath('./a/@authorid')[0] author_id = url.split('/')[-1].strip('?version=Professional') name = get_name(author_id) pub_num = item.xpath('./span/text()')[0] dic['uuid'] = uuid dic['author_id'] = author_id dic['name'] = name dic['url'] = url dic['pub_num'] = pub_num ls.append(dic) click_author = json.dumps(ls, ensure_ascii=False) return click_author except Exception as x: err = traceback.format_exc() print(err) pass
def getCache(page): return models.Cache.all().filter('page =', page).filter('url =', tools.md5(unicode(page.url))).get()
def parse_author_cooperation_relation(author_id): data = get_RelationChartData(author_id) try: data_dic = json.loads(data) except: data_dic = {} author_cooperation_ls = [] cooperation_relation_ls = [] if len(data_dic) != 0: for item1 in data_dic['links']: dic = {} for item2 in data_dic['nodes']: if item1['source'] == item2['index']: source_name = item2['name'] source_author_id = item2['id'] source_url = 'http://med.wanfangdata.com.cn/Author/Professional/' + source_author_id source_uuid = md5(source_url) cooperationTimes = item1['cooperationTimes'] dic['source_name'] = source_name dic['source_author_id'] = source_author_id dic['source_url'] = source_url dic['source_uuid'] = source_uuid dic['cooperationTimes'] = cooperationTimes continue if item1['target'] == item2['index']: target_name = item2['name'] target_author_id = item2['id'] target_url = 'http://med.wanfangdata.com.cn/Author/Professional/' + target_author_id target_uuid = md5(target_url) dic['target_name'] = target_name dic['target_author_id'] = target_author_id dic['target_url'] = target_url dic['target_uuid'] = target_uuid continue cooperation_relation_ls.append(dic) cooperation_relation_author = json.dumps(cooperation_relation_ls, ensure_ascii=False) else: cooperation_relation_author = None return cooperation_relation_author # for item1 in data_dic['links']: # dic1={} # source=['source'] # target=['target'] # cooperationTimes=item1['cooperationTimes'] # dic # # # # for item2 in data_dic['nodes']: # dic={} # name=item2['name'] # author_id=item2['id'] # index=item2['index'] # url='http://med.wanfangdata.com.cn/Author/Professional/'+author_id # uuid=md5(url) # dic['uuid']=uuid # dic['author_id']=author_id # dic['name']=name # dic['url']=url # author_cooperation_ls.append(dic) pass
def getCache(page): return models.Cache.all().filter('page =', page).filter( 'url =', tools.md5(unicode(page.url))).get()
import tools, datetime for i in range(3): username = input('username:'******'pwd:').strip() if username == '' or pwd == '': print('不能为空') else: new_password = tools.md5(pwd) sql = 'select username,password,error_count from app_myuser where username="******";' % ( username) result = tools.op_mysql(sql, False) if result: if result.get('error_count') > 5: print('错误次数大于5,账号被冻结') break elif new_password == result.get('password'): print('登录成功 today is %s' % datetime.datetime.today()) up_sql = 'update app_myuser set error_count=0 where username="******";' % username tools.op_mysql(up_sql) break else: up_sql = 'update app_myuser set error_count=error_count+1 where username="******";' % username tools.op_mysql(up_sql) print('密码错误!') else: print('用户不存在')
if not r_url or r_url == 'http://': # url not passed tools.redirect('/') exit() if not r_url.startswith('http://') and not r_url.startswith('https://'): r_url = 'http://'+r_url try: if not r_content or not r_type: r_content, r_type, r_url = fetch(r_url) except DownloadFail, e: tools.printError('Download error', 'Sorry, we couldn\'t access to address you provided. Please try again in a few seconds.') tools.logException() exit() id = tools.md5(ID_SALT+r_url+unicode(time.time()))[:8] page = models.Page(key_name='K'+id, url=r_url) page.put() if tools.isHtml(r_type): r_content = preprocessHtml(r_content, r_url) content = bz2.compress(r_content) cache = models.Cache(page=page, url=tools.md5(unicode(page.url)), content=content, contentType=r_type) cache.put() tools.redirect('/'+id) if __name__ == "__main__": main()
def index_page(): return tools.md5(os.urandom(512))
def parse_literature_info(id, author_uuid, author_name, author_url, author_id, source_url, html, page, path, last_index): try: html_str = lxml_to_string(html) literature_ls = html.xpath('//ul[@class="author-list"]/li') literature_index = 1 for item in literature_ls: if literature_index <= last_index: literature_index += 1 continue source_text = lxml_to_string(item) if len( item.xpath( './div[@class="author-list-title"]/span[@class="title-only"]/text()' )) != 0: label = item.xpath( './div[@class="author-list-title"]/span[@class="title-only"]/text()' )[0] else: label = '' num = item.xpath( './div[@class="author-list-title"]/span[@class="num"]/text()' )[0].strip('.') title = item.xpath('./div[@class="author-list-title"]/a/text()')[0] url = item.xpath('./div[@class="author-list-title"]/a/@href')[0] uuid = md5(url) periodical_type = item.xpath( './div[@class="author-list-type"]/b/text()')[0] author_ls = item.xpath('./div[@class="author-list-type"]/a') ls = [] for author_item in author_ls: author_dic = {} a_url = author_item.xpath('./@href')[0] name = author_item.xpath('./text()')[0] uuid = md5(a_url) author_id = a_url.split('/')[-1] author_dic['uuid'] = uuid author_dic['name'] = name author_dic['author_id'] = author_id author_dic['url'] = a_url ls.append(author_dic) author_info = json.dumps(ls, ensure_ascii=False) # 所在期刊 periodical = item.xpath( './div[@class="author-list-type-info"]/a[1]/text()')[0] periodical_url = item.xpath( './div[@class="author-list-type-info"]/a[1]/@href')[0] periodical_uuid = md5(periodical_url) # 期数链接链接 period_url = item.xpath( './div[@class="author-list-type-info"]/a[2]/@href')[0] # 期数 period = item.xpath( './div[@class="author-list-type-info"]/a[2]/text()')[0] # 期数链接 period_url = item.xpath( './div[@class="author-list-type-info"]/a[2]/@href')[0] # 页码 pagination = item.xpath( './div[@class="author-list-type-info"]/a[2]/following::text()' )[0].strip() # 被引数 cite_num = item.xpath( './div[@class="author-list-type-info"]/span[1]/text()' )[0].strip() # 收录信息 include_info_ls = item.xpath( './div[@class="author-list-type-info"]/span[@class="core-img"]' ) ls = [] if len(include_info_ls) != 0: for include_item in include_info_ls: include_dic = {} include_name = include_item.xpath('./text()')[0] detail = include_item.xpath('./@title')[0] include_dic['name'] = include_name include_dic['detail'] = detail ls.append(include_dic) include_info = json.dumps(ls, ensure_ascii=False) else: include_info = None # 摘要 intro = '<' + lxml_to_string( item.xpath('./div[@class="author-list-main"]')[0]).replace( ' ', '').replace( '\n', '').strip('<div class="author-list-main"></div>') # intro='<'+item.xpath('./div[@class="author-list-main"]/string(.)')[0] #关键词 keywords_ls = item.xpath('./div[@class="author-list-keyword"]/a') if len(keywords_ls) != 0: ls = [] for keywords_item in keywords_ls: keyword_dic = {} k_url = keywords_item.xpath('./@href')[0] try: keyword = keywords_item.xpath('./text()')[0] except: keyword = k_url.split('=')[-1].strip('()') keyword_dic['keyword'] = keyword keyword_dic['url'] = k_url ls.append(keyword_dic) keywords = json.dumps(ls, ensure_ascii=False) else: keywords = None # str_test=lxml_to_string(item.xpath('.//div[@class="author-list-operation"]')[0]) #在线阅读链接 read_url = '' if len( item.xpath( './/div[@class="author-list-operation"]/a[2]/@href') ) != 0: read_url = item.xpath( './/div[@class="author-list-operation"]/a[2]/@href')[0] #下载链接 download_url = '' if len( item.xpath( './/div[@class="author-list-operation"]/a[1]/@href') ) != 0: download_url = item.xpath( './/div[@class="author-list-operation"]/a[1]/@href')[0] pass save_author_relative(id, uuid, source_url, source_text, author_uuid, author_id, author_url, author_name, num, title, url, label, periodical_type, author_info, periodical, periodical_url, period_url, period, pagination, cite_num, include_info, intro, keywords, read_url, download_url) record_last(id, page, literature_index, path) literature_index += 1 except Exception as x: err = traceback.format_exc() print(err) pass
public = 1 if user is not None else 0 if user: owner = user else: owner = generateAnonymous() try: if not r_content or not r_type: r_content, r_type, r_url = fetch(r_url) verified = True except DownloadFail, e: tools.printError('Download error', 'Sorry, we couldn\'t access to address you provided. Please try again in a few seconds.') tools.logException() exit() id = tools.md5(ID_SALT+r_url+unicode(time.time()))[:8] page = models.Page(key_name='K'+id, url=r_url, owner=owner, public=public) page.put() if tools.isHtml(r_type): r_content = preprocessHtml(r_content, r_url) content = bz2.compress(r_content) cache = models.Cache(page=page, url=tools.md5(unicode(page.url)), content=content, contentType=r_type, verified=verified) cache.put() if user: tools.redirect('/'+id) else: cookies = {'anonymous_token': tools.token(page, owner)} headers = [