def get(self, request): user_id = request.user.id ret = {"code": 1000, "msg": "wawa"} conn = redis.Connection(pool=utils.pool) something = conn.hget("Chart", request.user.id) curent = json.loads(something) return Response(curent)
def testRedisConn(address, port, auth): try: rd=redis.Connection(host=address, port=port, db=0, password=auth) rd.connect() except: return 0 return 1
class redisController: redis.Connection(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT']).connect() redis_obj = redis.Redis() @staticmethod def set(key: str, value: str): try: redisController.redis_obj.set(name=key, value=value) except redis.RedisError as e: raise redis.RedisError(f"failed to set {key}. {e}") @staticmethod def get(key: str) -> str: value = '' try: value = redisController.redis_obj.get(name=key) except redis.RedisError as e: raise redis.RedisError(f"failed to set {key}. {e}") return value.decode() @staticmethod def delete(key: str) -> bool: try: if redisController.redis_obj.delete(key) != 0: return False except redis.RedisError as e: raise redis.RedisError(f"failed to delete {key}. {e}") return True
def get_db_info(hosttag, useraccount): tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_redis_coninfo( hosttag, useraccount) if tar_passwd <> "null": connect = redis.Connection(host=tar_host, port=int(tar_port), db=int(tar_dbname), password=tar_passwd, decode_responses=True) else: connect = redis.Connection(host=tar_host, port=int(tar_port), db=int(tar_dbname), decode_responses=True) #connect = redis.Redis(host=tar_host,port=int(tar_port),password=tar_passwd,decode_responses=True) results = connect.execute_command({'info': 1}) return results
def connect_to_redis(self, host, port, update_map=False): self.redis_connection = redis.Connection(host, port) self.redis_connection.connect() self.server_socket = self.redis_connection._sock self.address = self.redis_connection.host + ":" + port print("Connected to Redis node: " + self.address) if update_map: self.update_slot_node_mapping()
def redis(ctx): """ Start redis server, if necessary. """ import redis try: redis.Connection().connect() except redis.ConnectionError: _run(ctx, 'Starting redis server', 'redis-server --daemonize yes')
async def connect_and_sleep(**kwargs): connection = redis.Connection(**kwargs) try: connection.connect() await asyncio.sleep(3600) connection.disconnect() except Exception as e: print(e) finally: del connection
def __init__(self): # Initialize all API's object # Create necessay data structure self.API_list = ['Amazon', 'Clarifai', 'Google', 'IBM'] self.database = redis.Connection(host='127.0.0.1') self.aws = AWS.Rekonition() self.clarifai = Clarifai.Predict() self.google = Google.VisionAI() self.ibm = IBM.WatsonVisualRecognition() self.image_name_list = os.listdir(r".\images")
def is_ok(self): passwd = os.environ.get("REDIS_SERVER_PASSWORD") kw = {"host": self.server} if passwd: kw["password"] = passwd try: conn = redis.Connection(**kw) conn.connect() except Exception as e: return False, str(e) return True, ""
def status(name): passwd = os.environ.get("REDIS_SERVER_PASSWORD") kw = {"host": server} if passwd: kw["password"] = passwd try: conn = redis.Connection(**kw) conn.connect() except Exception as e: return str(e), 500 return "", 204
def check(self): log.info('检查 Redis 连接: %s:%d', self._host, self._port) try: conn = redis.Connection(self._host, self._port, *self._args, **self._kwargs) conn.connect() info = self._redis_info(conn) conn.disconnect() self._log_redis_info(info) except redis.exceptions.RedisError as e: log.error('连接 Redis 失败: %s - %s', e.__class__, e)
def execute_low_level(command, *args, **kwargs): connection = redis.Connection(**kwargs) try: connection.connect() connection.send_command(command, *args) response = connection.read_response() if command in redis.Redis.RESPONSE_CALLBACKS: return redis.Redis.RESPONSE_CALLBACKS[command](response) return response finally: connection.disconnect() del connection
def __init__(self, host='localhost', port=6379, password='******', pub='redsnow', sub='redsnow', db=0): pool = redis.Connection(host=host, port=port, password=password, db=db) self._con = redis.StrictRedis(connection_pool=pool) self.pub = pub self.sub = sub self._sub = None
def is_redis_running(port=None): """ Check if redis is running in the given port. """ import redis conn = redis.Connection(port=port or 6379) try: conn.connect() return True except redis.ConnectionError: return False
def get_tbindex_info(hosttag, tbname, useraccount): tar_host, tar_port, tar_username, tar_passwd, tar_dbname = get_redis_coninfo( hosttag, useraccount) pool = redis.Connection(host=tar_host, port=int(tar_port), password=tar_passwd, decode_responses=True) connect = redis db = connect[tar_dbname] try: db.authenticate(tar_username, tar_passwd) except Exception, e: pass
def connectToDetServer(): status = "ok" DSsocket = redis.Connection(host="minard.sp.snolab.ca", port=8520, socket_connect_timeout=15.0, retry_on_timeout=True) try: DSsocket._connect() DSsocket.on_connect() except: print("Failed to connect to Detector Server", file=sys.stderr) status = "bad" return status, DSsocket
def _connectToDetServer(self): status = "ok" DSsocket = redis.Connection(host=self.hostdns, port=self.hostport, socket_connect_timeout=15.0, retry_on_timeout=True) try: DSsocket._connect() DSsocket.on_connect() except: logging.info( "RackController: Failed to connect to Detector Server!") status = "bad" return status, DSsocket
def connect_to_redis(self, host, port): self.node_connections = [] first = redis.Connection(host, port) first.connect() self.node_connections.append(first) first._sock.sendall(b'*2\r\n$7\r\ncluster\r\n$5\r\nnodes\r\n') response = self.get_response(first._sock) self.nodes = Node.parse_node(response) for n in self.nodes: ip, port = n.address.split(":")[0], n.address.split(":")[1].split("@")[0] self.connect_to_node(ip, port)
def post(self, request): self ret = {'code': 1000, 'msg': None} course_id = request.data.get('course_id') price_policy_id = request.data.get('price_policy_id') course_obj = models.Course.objects.filter(pk=course_id).first() if course_obj: ret["msg"] = "瞎输课程" ret["code"] = 1001 else: price_policies = course_obj.price_policy.all() policies_id = [i.id for i in price_policies] policies_list = [{ 'id': i.id, 'valid_period': i.get_valid_period_display(), 'price': i.price } for i in price_policies] # 老师的比这个好,少用一个for循环 if not price_policy_id in policies_id: ret["msg"] = "瞎输钱数" ret["code"] = 1002 else: # 构造一个表 course_dict = { 'id': course_obj.id, 'img': course_obj.course_img, 'title': course_obj.name, 'price_policy_list': policies_list, 'default_policy_id': price_policy_id } conn = redis.Connection(pool=utils.pool) nothing = conn.hget("Chart", request.user.id) if not nothing: data = {course_obj.id: course_dict} else: data = json.loads(nothing.decode('utf-8')) data[course_obj.id] = course_dict conn.hset("Chart", request.user.id, json.dumps(data)) return ret
# -*- coding: utf-8 -*- import redis phone = routable.pdu.params['destination_addr'] host = 'localhost' port = 6379 db0, db1 = 0, 1 connection = redis.Connection(host=host, port=port, db=db0) mnc = None try: connection.connect() connection.send_command('GET', phone[1:]) mnc = connection.read_response() finally: del connection if not mnc: connection = redis.Connection(host=host, port=port, db=db1) def_code = phone[1:4] number = int(phone[-7:]) members = set() try: connection.connect() connection.send_command('SMEMBERS', def_code)
def __init__(self): self.pool = redis.Connection(host='localhost', port=6379, db=0) self.redishandle = redis.Redis(connection_pool=self.pool, )
def main(): dequeue_loop_count = 0 # Parse the BROKER_URL broker_url = urlparse(BROKER_URL) if broker_url.scheme.lower( ) != 'redis': # Currently the de-queuer only support redis print('Sorry, the scanner currently only supports redis.', file=sys.stderr) sys.exit(1) # Get the current CPU utilization and wait a second to begin the loop for the next reading psutil.cpu_percent() sleep(1) while True: try: # TODO: Document this madness and magic numbers, make it configurable # If max cpu is 90 and current CPU is 50, that gives us a headroom of 8 scans headroom = int( (SCANNER_MAX_CPU_UTILIZATION - psutil.cpu_percent()) / 5) dequeue_quantity = min(headroom, SCANNER_MAX_LOAD) if headroom <= 0: # If the cycle sleep time is .5, sleep 2 seconds at a minimum, 10 seconds at a maximum sleep_time = min( max(abs(headroom), SCANNER_CYCLE_SLEEP_TIME * 4), 10) print( '[{time}] WARNING: Load too high. Sleeping for {num} second(s).' .format(time=str(datetime.datetime.now()).split('.')[0], num=sleep_time), file=sys.stderr) sleep(sleep_time) continue except: # I've noticed that on laptops that Docker has a tendency to kill the scanner when the laptop sleeps; this # is designed to catch that exception sleep(1) continue # Every so many scans, let's opportunistically clear out any PENDING scans that are older than 1800 seconds # Also update the grade_distribution table # If it fails, we don't care. Of course, nobody reads the comments, so I should say that *I* don't care. try: if dequeue_loop_count % SCANNER_MAINTENANCE_CYCLE_FREQUENCY == 0: print('[{time}] INFO: Performing periodic maintenance.'.format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) dequeue_loop_count = 0 num = periodic_maintenance() if num > 0: print('[{time}] INFO: Cleared {num} broken scan(s).'.format( time=str(datetime.datetime.now()).split('.')[0], num=num), file=sys.stderr) num = 0 except: pass finally: dequeue_loop_count += 1 # Verify that the broker is still up; if it's down, let's sleep and try again later try: conn = redis.Connection(host=broker_url.hostname, port=broker_url.port or 6379, db=int(broker_url.path[1:]), password=broker_url.password) conn.connect() conn.can_read() conn.disconnect() del conn except: print( '[{time}] ERROR: Unable to connect to to redis. Sleeping for {num} seconds.' .format(time=str(datetime.datetime.now()).split('.')[0], num=SCANNER_BROKER_RECONNECTION_SLEEP_TIME), file=sys.stderr) sleep(SCANNER_BROKER_RECONNECTION_SLEEP_TIME) continue # Get a list of sites that are pending try: sites_to_scan = update_scans_dequeue_scans(dequeue_quantity) except IOError: print( '[{time}] ERROR: Unable to retrieve lists of sites to scan. Sleeping for {num} seconds.' .format(time=str(datetime.datetime.now()).split('.')[0], num=SCANNER_DATABASE_RECONNECTION_SLEEP_TIME), file=sys.stderr) sleep(SCANNER_DATABASE_RECONNECTION_SLEEP_TIME) continue try: if sites_to_scan: print('[{time}] INFO: Dequeuing {num} site(s).'.format( time=str(datetime.datetime.now()).split('.')[0], num=len(sites_to_scan)), file=sys.stderr) for site in sites_to_scan: scan.delay(*site) # Always sleep at least some amount of time so that CPU utilization measurements can track sleep(SCANNER_CYCLE_SLEEP_TIME / 2) else: # If the queue was empty, lets sleep a little bit sleep(SCANNER_CYCLE_SLEEP_TIME) except: # this shouldn't trigger, but we don't want a scan breakage to kill the scanner print('[{time}] ERROR: Unknown celery error.'.format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) pass
def main(): # Start each scanner at a random point in the range to spread out database maintenance dequeue_loop_count = randrange(0, SCANNER_MAINTENANCE_CYCLE_FREQUENCY) # Parse the BROKER_URL broker_url = urlparse(BROKER_URL) if broker_url.scheme.lower() not in ( 'redis', 'redis+socket'): # Currently the de-queuer only support redis print('Sorry, the scanner currently only supports redis.', file=sys.stderr) sys.exit(1) # Get the current CPU utilization and wait a second to begin the loop for the next reading psutil.cpu_percent() sleep(1) while True: try: # TODO: Document this madness and magic numbers, make it configurable # If max cpu is 90 and current CPU is 50, that gives us a headroom of 8 scans headroom = int( (SCANNER_MAX_CPU_UTILIZATION - psutil.cpu_percent()) / 5) dequeue_quantity = min(headroom, SCANNER_MAX_LOAD) if headroom <= 0: # If the cycle sleep time is .5, sleep 2 seconds at a minimum, 10 seconds at a maximum sleep_time = min( max(abs(headroom), SCANNER_CYCLE_SLEEP_TIME * 4), 10) print( '[{time}] WARNING: Load too high. Sleeping for {num} second(s).' .format(time=str(datetime.datetime.now()).split('.')[0], num=sleep_time), file=sys.stderr) sleep(sleep_time) continue except: # I've noticed that on laptops that Docker has a tendency to kill the scanner when the laptop sleeps; this # is designed to catch that exception sleep(1) continue # Every so many scans, let's opportunistically clear out any PENDING scans that are older than 1800 seconds # Also update the grade_distribution table # If it fails, we don't care. Of course, nobody reads the comments, so I should say that *I* don't care. try: if dequeue_loop_count % SCANNER_MAINTENANCE_CYCLE_FREQUENCY == 0: print('[{time}] INFO: Performing periodic maintenance.'.format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) dequeue_loop_count = 0 num = periodic_maintenance() if num > 0: print('[{time}] INFO: Cleared {num} broken scan(s).'.format( time=str(datetime.datetime.now()).split('.')[0], num=num), file=sys.stderr) num = 0 # Forcibly restart if things are going real bad, sleep for a bit to avoid flagging if num > SCANNER_ALLOW_KICKSTART_NUM_ABORTED and SCANNER_ALLOW_KICKSTART: sleep(10) try: print( '[{time}] ERROR: Celery appears to be hung. Attempting to kickstart the scanners.' .format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) subprocess.call([ 'pkill', '-u', 'httpobs', '-f', 'httpobs-scan-worker' ]) except FileNotFoundError: print( '[{time}] ERROR: Tried to kickstart, but no pkill found.' .format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) except: print( '[{time}] ERROR: Tried to kickstart, but failed for unknown reasons.' .format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr) except: pass finally: dequeue_loop_count += 1 # Verify that the broker is still up; if it's down, let's sleep and try again later try: if broker_url.scheme.lower() == 'redis': conn = redis.Connection( host=broker_url.hostname, port=broker_url.port or 6379, db=int(broker_url. path[1:] if len(broker_url.path) > 0 else 0), password=broker_url.password) else: conn = redis.UnixDomainSocketConnection( path=broker_url.path, db=int( parse_qs(broker_url.query).get('virtual_host', ['0'])[0])) conn.connect() conn.can_read() conn.disconnect() del conn except: print( '[{time}] ERROR: Unable to connect to to redis. Sleeping for {num} seconds.' .format(time=str(datetime.datetime.now()).split('.')[0], num=SCANNER_BROKER_RECONNECTION_SLEEP_TIME), file=sys.stderr) sleep(SCANNER_BROKER_RECONNECTION_SLEEP_TIME) continue # Get a list of sites that are pending try: sites_to_scan = update_scans_dequeue_scans(dequeue_quantity) except IOError: print( '[{time}] ERROR: Unable to retrieve lists of sites to scan. Sleeping for {num} seconds.' .format(time=str(datetime.datetime.now()).split('.')[0], num=SCANNER_DATABASE_RECONNECTION_SLEEP_TIME), file=sys.stderr) sleep(SCANNER_DATABASE_RECONNECTION_SLEEP_TIME) continue try: if sites_to_scan: print( '[{time}] INFO: Dequeuing {num} site(s): {sites}.'.format( time=str(datetime.datetime.now()).split('.')[0], num=len(sites_to_scan), sites=', '.join([site[0] for site in sites_to_scan])), file=sys.stderr) for site in sites_to_scan: scan.delay(*site) # Always sleep at least some amount of time so that CPU utilization measurements can track sleep(SCANNER_CYCLE_SLEEP_TIME / 2) else: # If the queue was empty, lets sleep a little bit sleep(SCANNER_CYCLE_SLEEP_TIME) except: # this shouldn't trigger, but we don't want a scan breakage to kill the scanner print('[{time}] ERROR: Unknown celery error.'.format( time=str(datetime.datetime.now()).split('.')[0]), file=sys.stderr)
import redis HOST = '127.0.0.1' PORT = 6379 DB = 0 pool = redis.Connection(host=HOST, port=PORT, db=DB, decode_responses=True) rds = redis.Redis(connection_pool=pool) ''' name 键 value 表示值 ex=None, 过期时间 单位秒 px=None, 过期时间 单位毫秒 nx=False, 如果设置为True 表示当key不存在的时候才去设置值,存在就不不设置值 xx=False 如果设置为True 表示name存在的时候才去设置值 ''' rds.set('py', 1805, ex=10) rds.set('nx1', 'nx的作用', ex=1 * 60, nx=True) rds.set('nx1', '修改已存在的值', ex=2 * 60, nx=True) rds.setex('k1', 'v1', 20) rds.setnx() dic = {'k1': 1, 'k2': 2} rds.mset(dic) #等价 rds.mset(k1=1, k2=2) # 获取值 # print(rds.get('k1'))同时获取多个值 print(rds.mget('k1', 'k2'))
value = unicode_or_str return value def try_load_json(json_str): try: return json.loads(json_str) except Exception: return json_str def get_content_type(filename): return urllib3.fields.guess_content_type(filename) def make_json_response(json_str, status_code): response = make_response(json_str) response.headers["content-type"] = 'application/json' response.status_code = status_code return response _redis_instance = redis.Connection(host=app.config['REDIS_HOST'], port=app.config['REDIS_PORT'], db=0, password=app.config['REDIS_PWD']) def redis_factory(): return _redis_instance
list( filter(lambda x: x[0] == 'subjects:cancer', eval(r.hget('ontologist:autocomplete', 'neoplasm')))) # create redis protocol file autocomplete_dict = {} for substring in npg_subjects_substring_dict.keys(): if substring in autocomplete_dict: autocomplete_dict[substring].extend( npg_subjects_substring_dict[substring]) else: autocomplete_dict[substring] = npg_subjects_substring_dict[substring] conn = redis.Connection() with gzip.open('redis_autocomplete_init.txt.gz', 'wb') as f: print('Autocomplete keys', end='...', flush=True) for i, substring in enumerate(autocomplete_dict.keys()): for item in conn.pack_command( 'HSET', 'ontologist:autocomplete', substring, json.dumps(autocomplete_dict[substring])): f.write(item) # NPG subjects print('NPG subjects tree depths', end='...', flush=True) for uri in npg_subjects_tree_depth.keys(): for item in conn.pack_command('HSET', 'ontologist:treeDepth', uri.replace(subjects, 'subjects:'), json.dumps( npg_subjects_tree_depth[uri])):
#!/usr/bin/python3 import redis for x in range(2512): conn = redis.Connection(host="10.20.1.71", port=1339, socket_timeout=2) conn.connect() conn.send_command("ping") print(conn.read_response())
def connect_to_node(self, host, port): redis_connection = redis.Connection(host, port) redis_connection.connect() self.node_connections.append(redis_connection)
def order_deal(request,user_name=None): import redis pool=redis.Connection(host='localhost',port=6379,db=0) r=redis.Redis(connection_pool=pool) #处理不同请求 if request.method=='POST': json_str = request.body json_obj = json.loads(json_str) print(json_obj) temp=json_obj.get('obj') temp=eval(temp) username=json_obj.get('users').replace('"','') #获取订单人 o_number=json_obj.get('order_num').split('.')[0] #获取订单号 shopname=json_obj.get('storename').replace('"','')#获取商家名称 user = UserProfile.objects.filter(user_name=username) # 获取订单人对象 if not user: result={'code':10303,'error':'error'} return JsonResponse(result) user=user[0] shop=Store.objects.filter(store_name=shopname)[0] #获取商家对象 #获取数据 for k,v in temp.items(): o_name=k.split('(')[0] #商品名称 print(o_name) o_count=v.split('&')[0] #商品数量 try: good=Good.objects.filter(c_name=o_name)[0] good.c_sales+=int(o_count) good.save() except Exception as e: print(e) result={'code':10333,'error':'error'} return JsonResponse(result) print(o_count) o_price=float(v.split('&')[-1])*int(o_count) #商品价格 o_time=time.strftime("%Y-%m-%d %H:%m:%S") #下单时间 try: # with r.lock(shop,blocking_timeout=3) as lock: Order.objects.create(o_name=o_name,o_count=int(o_count),o_price=o_price,o_time=o_time,o_number=o_number, user=user,shop=shop) shop.store_sales+=int(o_count) shop.save() except Exception as e: print(e) # r.expire(lock,3) result={'code':10224,'error':'error'} return JsonResponse(result) result={'code':200} return JsonResponse(result) elif request.method=='GET': if not user_name: return render(request, 'order_my.html') if user_name: user_name=parse.unquote(user_name).replace('"','') print(user_name) user=UserProfile.objects.filter(user_name=user_name)[0] #获取订单人对象 #获取订单 order=Order.objects.filter(user=user) if not order: result={'code':10231,'error':'Sorry,No order'} return JsonResponse(result) list=[] for item in order: dic={} dic['username']=user.user_name dic['number']=item.o_number dic['price']=item.o_price dic['count']=item.o_count dic['time']=item.o_time dic['goodname']=item.o_name dic['shop']=item.shop.store_name list.append(dic) result={'code':200,'data':list} return JsonResponse(result) #{'obj': {'西蓝花炒鸡蛋(力荐)': '2&月售:1000&15', }, 'users': None,'李小姐的店': '李小姐的店'} #{'西蓝花炒鸡蛋(力荐)': '2&月售:1000', '糖醋排骨(力荐)': '2&月售:1003', '李小姐的店': '李小姐的店'}
import redis args = ('PING',) # 将redis命令安装redis的协议编码,返回编码后的数组,如果命令很大,返回的是编码后chunk的数组 packed_command = redis.Connection().pack_command(*args) print(packed_command) # 输出[b'*1\r\n$4\r\nPING\r\n']