def vm_migrate(): if request.method == 'POST': vm_name = request.form.get('vm_name') KVM_IP = request.form.get('kvm_ip') migrate_info = "migrate~" + KVM_IP + "~" + vm_name pub(migrate_info) sub = RedisHelper() redis_sub = sub.subscribe() status = "failure" while 1 < 2: msg = redis_sub.parse_response() if msg[2].split("~")[0] == "migrate": continue else: status = msg[2] break if status == "ok": response = make_response(redirect('/migrate_results')) result = vm_name + "~" + status response.set_cookie('result', value=result) return response else: response = make_response(redirect('/migrate_results')) result = vm_name + "~" + status response.set_cookie('result', value=result) return response else: username = request.cookies.get('username') if not username: return "please login!!!" islogin = session.get('islogin') return render_template('vm_migrate.html',username=username,islogin=islogin)
def disk_create(): if request.method == 'POST': disk_name = request.form.get('disk_name') disk_size = request.form.get('disk_size') disk_info = [disk_name,disk_size] pub(disk_info) sub = RedisHelper() redis_sub = sub.subscribe() status = "failure" while 1 < 2: msg = redis_sub.parse_response() if len(msg[2].split("[")[1].split("]")[0].split(", ")) == 2: continue else: status = msg[2].split("]")[0].split(", ")[2].split("'")[1] break if status == "ok": response = make_response(redirect('/disk_list')) return response else: return "%s is created %s!" % (disk_name, status) else: username = request.cookies.get('username') if not username: return "please login!!!" islogin = session.get('islogin') return render_template('disk_create.html', username=username, islogin=islogin)
def getGraph(self): mRedis = RedisHelper() count = 0 for author in mRedis.getAuthorList(): count += 1 if count % 1000 == 0: print count for coau in mRedis.getAuthorCoauthors(author): if min(int(t) for t in mRedis.getAuCoauTime(author + ':' + coau)) < TEST_DATA_YEAR: self.graph.add_edge(author, coau) logging.info('load graph done!') logging.info('nodes:' + str(self.graph.number_of_nodes())) logging.info('edges:' + str(self.graph.number_of_edges())) return self.graph
def recvedServer(q): redis = RedisHelper() redis_sub = redis.subscribe() # 调用订阅方法 while True: msg = redis_sub.parse_response() if msg: q.put(msg)
def radis_push_server_minute(): redis = RedisHelper(host='127.0.0.1', port=6379) data = {'name': 'liwei', 'age': '30', 'sex': '1', 'addr': '深圳市南山区'} while True: j = json.dumps(data, ensure_ascii=False) redis.publish(j) # 发布 break
def getImgFromUrl(): print('开始获得网页链接: ') redishelper= RedisHelper() urls= redishelper.get('linkstarget') # print(urls) for url in urls: bFlag= redishelper.hexists('imghandled', url) if bFlag: continue redishelper.hset('imghandled', url, url) opener=makeMyOpener() try: response=opener.open(url, timeout=1000) except urllib2.HTTPError: traceback.print_exc() continue except ValueError: traceback.print_exc() continue except urllib2.URLError: traceback.print_exc() continue try: data=response.read() except httplib.IncompleteRead: traceback.print_exc() continue soup = BeautifulSoup(data, 'lxml') for links in soup.findAll("img"): print("获得本页所有图片", links.get('src')) if (links.get('src')==None): continue getAndSaveImg(links.get('src'))
def checkUser(name, password): '''检查登录用户信息''' red = RedisHelper('127.0.0.1', 6379, '123456') #验证redis里面是否存有数据 if red.get(name) == None: #从mysql数据库取数 mysqlHelper = MySqlHelper('localhost', 3306, 'root', 'root', 'test') sql = 'select * from user where UserName=%s' params = [name] result = mysqlHelper.get_one(sql, params) if result == None: print('用户名错误') else: #存入redis里面 red.set(name, result[5]) sha1Str = strToSha1(password) if sha1Str != result[5]: print('密码错误') else: print('登录成功') else: sha1Str = strToSha1(password) if sha1Str != str(red.get(name)): print('密码错误') else: print('登录成功')
def getImgFromUrl(): print('开始获得网页链接: ') redishelper = RedisHelper() print(keys) urls = redishelper.get('linkstarget') print(urls) for url in urls: bFlag = redishelper.sismember('imghandled', url) if bFlag: continue opener = makeMyOpener() response = opener.open(url, timeout=1000) data = response.read() soup = BeautifulSoup(data, 'lxml') # f=codecs.open(name, "w+", "utf8") req = urllib2.Request(url, headers=hds[random.randint(0, len(hds) - 1)]) source_code = urllib2.urlopen(req, timeout=25).read() plain_text = source_code.decode('utf8') # f.write(plain_text) soup = BeautifulSoup(plain_text) for links in soup.findAll("img"): print("获得本页所有图片", links.src) getAndSaveImg(links) redishelper.hset('imghandled', url)
def recommender(): graph = Graph().getGraph() targets = getTargetAuthor() mRedis = RedisHelper() recom_dict = dict() rw = Randomwalk() for index, author in enumerate(targets): logging.info(str(index)) pagerank = rw.PageRank(graph, author) papers = mRedis.getAuthorPapers(author) connedCoaus = list() for paper in papers: if int(mRedis.getPaperYear(paper)) < TEST_DATA_YEAR: refers =mRedis.getPaperReferences(paper) for refer in refers: connedCoaus.extend(mRedis.getPaperAuthors(refer)) for node in pagerank.keys(): if node in connedCoaus: pagerank[node] = 0.0 recom_dict[author] = sorted(pagerank.iteritems(), key = lambda d:d[1], reverse = True)[:RECOM_TOP_N] resultsave = open(PATH_RWR_RECOM_LIST, 'wb') pickle.dump(recom_dict, resultsave) resultsave.close()
def saveTargetAuthor(): redis = RedisHelper() authors = redis.getAuthorList() count = 0 author_ref_before = dict() author_ref_after = dict() target = list() for index, author in enumerate(authors): if index % 10000 == 0: print index papers = redis.getAuthorPapers(author) num = len(papers) if num >= TARGET_PAPER_NUM_MIN and num <= TARGET_PAPER_NUM_MAX: references_after, references_before = 0,0 for paper in papers: if int(redis.getPaperYear(paper)) > TEST_DATA_YEAR: references_after += len(redis.getPaperReferences(paper)) else: references_before += len(redis.getPaperReferences(paper)) author_ref_after[author] = author_ref_after.setdefault(author, 0) + references_after author_ref_before[author] = author_ref_before.setdefault(author, 0) + references_before after_sorted = sorted(author_ref_after.iteritems(), key = lambda d:d[1], reverse = True)[:1000] print after_sorted[:200] for author, ref in after_sorted: if author_ref_before[author] > 1: print author, ref target.append(author) count += 1 if count == 100: break print count # while True: # author = random.choice(authors) # papers = redis.getAuthorPapers(author) # num = len(papers) # if num >= TARGET_PAPER_NUM_MIN and num <= TARGET_PAPER_NUM_MAX: # references_after, references_before = list(), list() # for paper in papers: # if int(redis.getPaperYear(paper)) > TEST_DATA_YEAR: # ref = list(redis.getPaperReferences(paper)) # references_after.extend(ref) # else: # ref = list(redis.getPaperReferences(paper)) # references_before.extend(ref) # if len(references_before) > 8 and len(references_after) > 4: # if author not in target: # count += 1 # print '********************',count # target.append(author) # if count == 10: # break with open(PATH_TARGET_AUTHOR, 'w') as fileWrite: s = ','.join(target) fileWrite.write(s) fileWrite.close()
def getUrlList(): print('开始获得网页链接: ') redishelper= RedisHelper() urls= redishelper.get('linkstarget') print(urls) for url in urls: print('\nurl: ', url) bFlag= redishelper.hexists('urlhandled', url) if(bFlag): print('linkstarget 已经在 imghandled 中处理过,返回获取新的链接地址') return opener=makeMyOpener() try: response=opener.open(url, timeout=1000) except urllib2.HTTPError: traceback.print_exc() continue except ValueError: traceback.print_exc() continue data=response.read() soup = BeautifulSoup(data, 'lxml') # f=codecs.open(name, "w+", "utf8") req = urllib2.Request(url,headers=hds[random.randint(0,len(hds)-1)]) source_code = urllib2.urlopen(req,timeout=25).read() plain_text=source_code.decode('utf8') # f.write(plain_text) soup = BeautifulSoup(plain_text) # print((type(soup))) for links in soup.findAll("a"): print(u"获得本页所有链接", links) print(u"获得本页所有链接 links.href", links.get('href')) hrefheader= links.get('href').split('/')[0] if (not(hrefheader == 'http:')): href= baseurlPath+links.get('href') else: href= links.get('href') try: redishelper.hset('linkstarget', href, href) except: pring('except') redishelper.hset('urlhandled', url, url)
def getUrlList(): print('开始获得网页链接: ') redishelper = RedisHelper() urls = redishelper.get('linkstarget') # print(urls) for url in urls: # print('\nurl: ', url) bFlag = redishelper.hexists('urlhandled', url) if (bFlag): print('linkstarget 已经在 imghandled 中处理过,返回获取新的链接地址') continue opener = makeMyOpener() try: response = opener.open(url, timeout=1000) except urllib2.HTTPError: traceback.print_exc() continue except ValueError: traceback.print_exc() continue data = response.read() soup = BeautifulSoup(data, 'lxml') for links in soup.findAll("a"): # print(u"获得本页所有链接", links) # print(u"获得本页所有链接 links.href", links.get('href')) try: hrefheader = links.get('href').split('/')[0] except AttributeError: traceback.print_exc() continue if (not (hrefheader == 'http:')): href = baseurlPath + links.get('href') else: href = links.get('href') try: redishelper.hset('linkstarget', href, href) except: print('except') redishelper.hset('urlhandled', url, url)
def getAllImg(): redishelper= RedisHelper() urlstr=("设计模式之禅") url=trans_url_bd("csdn.net", "site:(zhihu.com) "+ urlstr) opener=makeMyOpener() response=opener.open(url, timeout=1000) data=response.read() soup = BeautifulSoup(data, 'lxml') for div in soup.findAll('a'): print('获得所有的链接 a', a.href) for a in div.findAll('a'): try: print("=====") hrefname = a['href'] if(hrefname[0:2]=='/s'): hrefname=hrefname+a['href'] urls = '[link](%s)'%hrefname print(urls) f.write(urls+"\n") except: traceback.print_exc() print('except') f.flush() f.close()
#!/usr/bin/env python # -*- coding:utf-8 -*- #订阅 from RedisHelper import RedisHelper import sys sys.getdefaultencoding() reload(sys) sys.setdefaultencoding('UTF-8') sys.getdefaultencoding() obj = RedisHelper() redis_sub = obj.subscribe() #调用订阅方法 while True: msg = redis_sub.parse_response() print("收到订阅消息 {}".format((msg)[2])).decode('utf-8') #print "收到订阅消息 {}".format((msg)[2])
str_r_port = config['redis']['port2'] str_r_pwd = config['redis']['pwd2'] str_r_chan = config['redis']['chan1'] str_r_db = config['redis']['db'] str_r_chan2 = config['redis']['chan2'] #打印配置文件 lists_header = config.sections() str_config = "" for secs in lists_header: for key in config[secs]: str_config = str_config + " " + key + ":" + config[secs][key] LogHelper.info(str_config) obj = RedisHelper(str_r_ip, str_r_pwd, str_r_port, str_r_db, str_r_chan, str_r_chan2) # 赋值订阅变量 redis_sub = obj.subscribe() for item in redis_sub.listen(): LogHelper.info(item) if item['type'] == "message": if bytes.decode(item['channel']) == str_r_chan: #新建一个新的command_info对象 obj_msg = command_info() #将字典转化为对象 obj_msg.__dict__ = json.loads(item['data']) LogHelper.debug(obj_msg.__dict__) obj_result = Result_info() obj_result.type = obj_msg.type if obj_msg.type == 1: #1、获取所有硬盘基本信息
#!/usr/bin/env python # -*- coding:utf-8 -*- #发布 from RedisHelper import RedisHelper obj = RedisHelper() obj.publish('你好'.decode('utf-8')) #发布
from RedisHelper import RedisHelper import time #实例化 obj = RedisHelper() for i in range(50): obj.publish("Hello-{0}".format(i)) time.sleep(5)
from RedisHelper import RedisHelper obj = RedisHelper() obj.public('hello')
pub = self.__conn.pubsub() pub.subscribe(self.channel) pub.parse_response() return pub 复制代码 发布者 复制代码 #!/usr/bin/env python # -*- coding:utf-8 -*- #发布 from RedisHelper import RedisHelper obj = RedisHelper() obj.publish('hello')#发布 复制代码 订阅者 复制代码 #!/usr/bin/env python # -*- coding:utf-8 -*- #订阅 from RedisHelper import RedisHelper obj = RedisHelper() redis_sub = obj.subscribe()#调用订阅方法
#encoding=utf-8 import json from mitmproxy import ctx from urllib.parse import quote import string import requests from RedisHelper import RedisHelper from MongoModel import DataController obj = RedisHelper() db = DataController() def writeIndex(): pass def readIndex(): pass def writeQuestion(): pass def writeAnswer(): pass def request(flow): pass # print ('request:') # print(flow.request.content) # print(type(flow.request.content)) # flow.request.content = flow.request.content.replace('type=1','type=2') # print(flow.request.content)
#!/usr/bin/env python # -*- coding:utf-8 -*- #订阅 from RedisHelper import RedisHelper obj = RedisHelper() redis_sub = obj.subscribe()#调用订阅方法 while True: msg= redis_sub.parse_response() print (msg)
#!/usr/local/bin/python # -*- coding:utf-8 -*- # 订阅 from RedisHelper import RedisHelper obj = RedisHelper() obj.publish('nihao')#发布
首先请求redis数据库服务器,如果redis服务器中有,则直接验证; 如果redis服务器中没有,则直接去mysql数据库中查找。 ''' # 接收输入: name = input('请输入用户名:') pwd1 = input("请输入密码:") # 密码加密: s1 = sha1() s1.update(pwd1.encode('utf-8')) pwd2 = s1.hexdigest() # 先判断redis中判断是否有此用户名。 # 查询redis中是否存在此用户和密码: r = RedisHelper() m = MysqlHelper('192.168.85.20', 3306, 'follow', 'root', '123456') # print(r.get('name')) # 判断redis中是否有此用户名和密码: if r.get(name) is None: print("redis数据库服务器中无此用户信息。将转至MySQL数据库中查找....") sql = 'select passwd from users where name=%s' pwd3 = m.get_one(sql, [name]) if pwd3 is None: print("用户名错误") elif pwd3[0] == pwd2: print("\nMySQL数据库服务器中有此用户信息:") print("登陆成功1\n") # 如果用户在mySql中存在,则将其添加到Redis服务器中。
import sys sys.path.append(r'/home/peng/learnredis') from RedisHelper import RedisHelper obj = RedisHelper() a = 'hello world' obj.public(a)
sha1_password = sha1.hexdigest() sql = 'select password from userinfos where name=%s' params = [name] # try: # conn = pymysql.connect(host='localhost', port=3306, db='python2', user='******', password='') # cur = conn.cursor() # cur.execute(sql, params) # result = cur.fetchone() # cur.close() # conn.close() # except Exception as e: # print(e) r = RedisHelper() if r.get('uname').decode('utf-8') == name: print('OK') else: helper = MysqlHelper('localhost', 3306, 'python2', 'root', '') result = helper.get_one(sql, params) if result is None: print('用户名错误') elif result[0] == sha1_password: r.set('uname', name) print('登录成功') else: print('密码错误')
'''修复小说列表页面失败而记录的网址''' _lines = None with open("pageList.bak") as f: _lines = f.readlines() f.close() os.remove("pageList.bak") for _url in _lines: get_novel_list(_url.strip("\n")) if __name__ == '__main__': # repair_pageList() novel_info = Novel23us(1) dbhelper = DBhelper() dics = {} redis_helper = RedisHelper() dict_list = dbhelper.query( "SELECT id,`name` from dictionary where type = 'tag'") for _item in dict_list: dics[_item[1]] = _item[0] # get_novel_list_main() novelId, start_chapter_index = dbhelper.query_one( "SELECT novelId,chapterId from chapter ORDER BY novelId DESC,chapterId desc LIMIT 1" ) start_novel_index = None start, end, step = 0, 5000, 500 index_start = start if novelId is not None: start_novel_index = dbhelper.query_one( "SELECT count(1) from novel where id <=%s", novelId) index_start = start + start_novel_index[0] - 1
# 业务过程如下: # 输入用户名、密码 # 密码加密 # 判断redis中是否记录了用户名,如果有则成功 # 如果redis中没有用户名,则到mysql中查询 # 从mysql中查询成功后,将用户名记录到redis中 #1.获取用户的输入 user_name = input("请输入用户名:") user_pwd = input("请输入密码:") #2.对用户的密码进行加密 s1 = sha1() s1.update(user_pwd.encode("utf-8")) enc_user_pwd = s1.hexdigest() #3.查询redis中是否存在此用户 r = RedisHelper('localhost',6379) m = MysqlHelper('localhost', 3306, 'root', 'mysql', 'python3') #print(r.get('user_name')) for test #当在redis查询结果为空时,在mysql中查询密码 #print(r.get(user_name)) for test if r.get(user_name)==None: sql = 'select passwd from users where name =%s' params = [user_name] passwd = m.get_one(sql, params) # print(passwd) for test #判断用户是否存在 if passwd == None: print("用户名不存在....") else: #将mysql查询的结果存到redis中,因为redis的键相同会被替换,所以这里set name 直接为用户名 r.set(user_name, passwd[0])
DEPLOY_END_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") print "Deploy operation successfully." write_vm_info_to_file = "echo %s %s %s %s %s running >> /vm_scripts/vm_info.list" % ( vm_name, cpu, mem, disk, os_type) run_command(write_vm_info_to_file) result = "ok" return result else: DEPLOY_END_TIME = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") print "Deploy operation failure." result = "failure" return result if __name__ == "__main__": sub = RedisHelper() redis_sub = sub.subscribe() while 1 < 2: msg = redis_sub.parse_response() #info = msg[2].split("[")[1].split("]")[0].split(",|'") #vm_info = info[0].split(",") print msg[2] if msg[2] == "vm_list": run_command("/vm_scripts/get_running_vm_info.sh") f = open('/vm_scripts/vm_list', 'r') vm_list = [line.strip() for line in f.readlines()] f.close pub = RedisHelper() pub.publish(vm_list) elif msg[2].split("~")[0] == "delvm": vm_name = msg[2].split("~")[1]
from RedisHelper import RedisHelper import time #发布 obj = RedisHelper() for i in range(100): for j in range(100): obj.publish('mytopic%s' % i, 'hello%s' % j) #发布 # time.sleep(0.1)
from RedisHelper import RedisHelper from MySqlHelper import MysqlHelper from flask import Flask from flask import request from flask import render_template # from flask import sessions logging.basicConfig( level=logging.DEBUG, format= '%(asctime)s %(levelname)s %(message)s %(filename)s[line:%(lineno)d]', datefmt='%a, %d %b %Y %H:%M:%S') app = Flask(__name__) redis = RedisHelper() mysql = MysqlHelper() taskQueue = queue.Queue() class RecvedServer(threading.Thread): def __init__(self, q, name): self.__quit = False self.__q = q super(RecvedServer, self).__init__(name=name) def quitWorker(self): self.__quit = True def run(self): redis_sub = redis.subscribeChannel('/liwei/000') # 调用订阅方法
from config import * from RedisHelper import RedisHelper total = 0 sub_name = "BlinkBlock" message_demo = {"flash_color": "red", "base_color": "off", "count": 5, "interval": 0.1} if __name__ == '__main__': r = RedisHelper() r.publish(sub_name, message_demo)
''' 发布与订阅是不同于存值取值,存值取值不需要同步,发布与订阅是需要同步的 ''' ''' #这样是可以的,为了配套,使用下面的 import redis obj = redis.Redis(password='******') obj.publish('fm104.5','hello') ''' from RedisHelper import RedisHelper obj = RedisHelper() obj.public('hello steven!')
# -*- coding: utf-8 -*- """ Created on Thu Dec 13 21:11:09 2018 @author: dingfei """ import redis #r = redis.Redis(host='127.0.0.1',port=6379,db=0) from RedisHelper import RedisHelper obj=RedisHelper() #obj.publish('hello') re_sub=obj.subscribe() while True: msg=re_sub.parse_response() print(msg)
def __init__(self): self.redishelper = RedisHelper() self.mongohelper = MongoHelper()
from RedisHelper import RedisHelper obj = RedisHelper()#订阅 i = 0 while True: redis_sub = obj.subscribe('mytopic%s'%i) # 调用订阅方法 msg= redis_sub.parse_response() print(msg) i += 1
#!/usr/bin/env python # -*- coding:utf-8 -*- #发布 from RedisHelper import RedisHelper obj = RedisHelper() obj.publish('hello')#发布