def route(url, **kwargs): """ 1. @route('/v1') def myfunc(req): pass 2. class Myclass(Application): @route('/v1') def myfunc(self, req): pass :param url: :param kwargs: :return: """ if not check_url(url): logger.error('check error') raise Exception() def _route(func): func._url = url func._kwargs = kwargs @functools.wraps(func, assigned=WRAPPER_ASSIGNMENTS) def __route(*args, **kwargs): return func(*args, **kwargs) return __route return _route
def restoredb(pg_env, pg_restore_binary, database_dump_path, dump_name): env = os.environ.copy() env.update(pg_env) answer = raw_input("This command will restore this dump into database %s. " "Continue? (y)es, (N)o? " % env['PGDATABASE']) if answer != 'y': logger.info("Aborting!") return db_dump_file_name = os.path.join(database_dump_path, dump_name) if not os.path.isfile(db_dump_file_name): logger.error("file %s does not exist: " % db_dump_file_name) return logger.debug("Restoring %s" % db_dump_file_name) cmd = (pg_restore_binary, "-d", env['PGDATABASE'], "-O", "-x", db_dump_file_name) logger.trace("Executing %s" % str(cmd)) proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if stderr != '': logger.error("An error occured while calling pg_restore: %s " % stderr) return
def createSockByType(self, sockType): if sockType == socktypes.UDP_CLIENT_LOCAL: self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) elif sockType == socktypes.TCP_CLIENT_LOCAL: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) else: logger.error("***** ERROR SOCKTYPE *****")
def createImage(self,user,image,lxc,description="Not thing", imagenum=10): fspath = self.NFS_PREFIX + "/local/volume/" + lxc imgpath = self.imgpath + "private/" + user + "/" #tmppath = self.NFS_PREFIX + "/local/tmpimg/" #tmpimage = str(random.randint(0,10000000)) + ".tz" if not os.path.exists(imgpath+image) and os.path.exists(imgpath): cur_imagenum = 0 for filename in os.listdir(imgpath): if os.path.isdir(imgpath+filename): cur_imagenum += 1 if cur_imagenum >= int(imagenum): return [False,"image number limit exceeded"] #sys_run("mkdir -p %s" % tmppath, True) sys_run("mkdir -p %s" % imgpath,True) try: sys_run("tar -cvf %s -C %s ." % (imgpath+image+".tz",self.dealpath(fspath)), True) except Exception as e: logger.error(e) #try: #sys_run("cp %s %s" % (tmppath+tmpimage, imgpath+image+".tz"), True) #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (self.dealpath(fspath),imgpath+image),True) #except Exception as e: # logger.error(e) #sys_run("rm -f %s" % tmppath+tmpimage, True) #sys_run("rm -f %s" % (imgpath+"."+image+"_docklet_share"),True) self.updateinfo(imgpath,image,description) logger.info("image:%s from LXC:%s create success" % (image,lxc)) return [True, "create image success"]
def detachFS(self, lxc, vgname="docklet-group"): rootfs = "/var/lib/lxc/%s/rootfs" % lxc Ret = sys_run("umount %s" % rootfs) if Ret.returncode != 0: logger.error("cannot umount rootfs:%s" % rootfs) return False return True
def fix_mxclientsystem_symlink(self): # check mxclientsystem symlink and refresh if necessary if self.config.get_symlink_mxclientsystem(): mxclient_symlink = os.path.join( self.config.get_public_webroot_path(), 'mxclientsystem') real_mxclient_location = self.config.get_real_mxclientsystem_path() if os.path.islink(mxclient_symlink): current_real_mxclient_location = os.path.realpath( mxclient_symlink) if current_real_mxclient_location != real_mxclient_location: logger.debug("mxclientsystem symlink exists, but points " "to %s" % current_real_mxclient_location) logger.debug("redirecting symlink to %s" % real_mxclient_location) os.unlink(mxclient_symlink) os.symlink(real_mxclient_location, mxclient_symlink) elif not os.path.exists(mxclient_symlink): logger.debug("creating mxclientsystem symlink pointing to %s" % real_mxclient_location) try: os.symlink(real_mxclient_location, mxclient_symlink) except OSError, e: logger.error("creating symlink failed: %s" % e) else: logger.warn("Not touching mxclientsystem symlink: file exists " "and is not a symlink")
def run(self, handler): daemon_thread_pool_size = self.options['daemon_thread_pool_size'] from wsgiref.simple_server import WSGIRequestHandler LoggerHandler = WSGIRequestHandler if self.quiet: class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass LoggerHandler = QuietHandler srv = simple_server.make_server(self.host, self.port, handler, handler_class=LoggerHandler) logger.info('Initializing a wsgiref backend with %d threads', daemon_thread_pool_size) use_ssl = self.options['use_ssl'] ca_cert = self.options['ca_cert'] ssl_cert = self.options['ssl_cert'] ssl_key = self.options['ssl_key'] if use_ssl: if not ssl: logger.error("Missing python-openssl librairy," "please install it to open a https backend") raise Exception("Missing python-openssl librairy, " "please install it to open a https backend") srv.socket = ssl.wrap_socket(srv.socket, keyfile=ssl_key, certfile=ssl_cert, server_side=True) return srv
def create(): if request.method == 'POST': ip = request.json['ip'] username = request.json['username'] password = request.json['password'] role_id = request.json['role_id'] role_name = request.json['role_name'] logger.debug(request.json) try: db = get_db() cursor = db.cursor() cursor.execute( "INSERT INTO node (ip, username, password, role_id, status, deleted) VALUES (?, ?, ?, ?, 'created', 0)", (ip, username, password, role_id) ) role_id = cursor.lastrowid db.commit() cursor.close() add_host(role_name, ip) return jsonify({'status': 1, "role_id": role_id}) except Exception as ex: logger.error(ex) logger.error(traceback.format_exc()) return jsonify({'status': -1, 'error': ex.message})
def on_message(self, message, answer_box=None): """ Message type can be : - new-feed - force-refresh-feed - delete-feed - edit-refresh-interval-feed - refresh-cache """ assert isinstance(message, dict) msg_type = message.get('type') if msg_type == 'new-feed': logger.warning('>>> new-feed request : %s', message['url']) gevent.spawn(new_feed_worker, message['url'], self.favicon_dir, answer_box, self.inbox) elif msg_type == "new-deadline-worker": logger.warning('>>> new-deadline-worker request : %d', message['feed_id']) feed = Feed.query.get(message['feed_id']) self.launch_deadline_worker(feed) elif msg_type == "force-refresh-feed": logger.warning('>>> force-refresh-feed request : %d', message['id']) self.workers[message['id']]['queue'].put({ 'type': 'force-refresh', 'answer_box': answer_box, }) # TODO put Mail instead ? elif msg_type == "refresh-cache": gevent.spawn(cache_worker, message.get('feed_id')) elif msg_type == "delete-feed": worker = self.workers.get(message['feed_id']) if worker: del self.workers[message['feed_id']] gevent.spawn(delete_worker, worker, message['feed_id'], answer_box) else: logger.error("Unknown message type : %s", msg_type)
def get_streamrouter_ports(cls): r = cls.default_deploy().get_streamrouter_ports() if r.status_code < 400: return r.json() else: logger.error("fail to get ports of streamrouter") return None
def __init__(self, nodemgr, networkmgr, etcdclient, addr, mode): self.mode = mode self.nodemgr = nodemgr self.imgmgr = imagemgr.ImageMgr() self.networkmgr = networkmgr self.addr = addr self.etcd = etcdclient self.defaultsize = env.getenv("CLUSTER_SIZE") self.fspath = env.getenv("FS_PREFIX") logger.info ("vcluster start on %s" % (self.addr)) if self.mode == 'new': logger.info ("starting in new mode on %s" % (self.addr)) # check if all clusters data are deleted in httprest.py clean = True usersdir = self.fspath+"/global/users/" for user in os.listdir(usersdir): if len(os.listdir(usersdir+user+"/clusters")) > 0 or len(os.listdir(usersdir+user+"/hosts")) > 0: clean = False if not clean: logger.error ("clusters files not clean, start failed") sys.exit(1) elif self.mode == "recovery": logger.info ("starting in recovery mode on %s" % (self.addr)) self.recover_allclusters() else: logger.error ("not supported mode:%s" % self.mode) sys.exit(1)
def on_failure(self, exc, task_id, args, kwargs, einfo): try: logger.error(u'发送邮件失败,celery task id: %s, 参数:%s, 错误信息:%s' %(task_id, str(args), str(exc))) wait_email = Tbl_Wait_Emails() wait_email.update_status(str(args[0]), gk7.STATUS.get('error')) except Exception as e: logger.error(u'更新发送邮件状态异常,错误:%s,参数:%s' %(str(e), str(args)))
def is_db_connected(self): try: self.db.ping() return True except pymysql.Error, err: logger.error("DB DISCONNECTED!Try to reconnect..., err info: %s" % err)
def _write_pidfile(self): if self._pid: pidfile = self._config.get_pidfile() try: file(pidfile, 'w+').write("%s\n" % self._pid) except IOError, e: logger.error("Cannot write pidfile: %s" % e)
def sendData(self): if not self.sock.isConnected(): return text = utils.qstr2gbk(self.ui.sendPlainTextEdit.toPlainText()) if len(text) < 1: return False data = text if self.isHexMode(): try: text = "".join(("".join(text.split("\n"))).split(" ")) data = binascii.unhexlify(text) except TypeError: logger.error("Non-hexadecimal digit found") return False n = 0 try: n = self.sock.sendall(protocolPacker.getProxyDataBuff(data)) except Exception as e: logger.error("send data error: %s" % e.message) if n: logger.debug("sent bytes: %d" % n) self.addData(data, config.SEND_TAG, True) self.ui.sendPlainTextEdit.clear() return n is None
def start_services(self, lxc_name, services=[], isfirst=False): logger.info ("start services for container %s: %s" % (lxc_name, services)) try: Ret = subprocess.run(["lxc-attach -n %s -- ln -s /nfs %s" % (lxc_name, self.nodehome)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=False) logger.debug ("prepare nfs for %s: %s" % (lxc_name, Ret.stdout.decode('utf-8'))) # master node if isfirst is True: Ret = subprocess.run(["lxc-attach -n %s -- su -c %s/start_jupyter.sh" % (lxc_name, self.rundir)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=True) logger.debug (Ret) logger.info ("start services for container %s success" % lxc_name) # not sure whether should execute this Ret = subprocess.run(["lxc-attach -n %s -- service ssh start" % lxc_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=False) logger.debug(Ret.stdout.decode('utf-8')) for service in services: logger.info ("start service %s for container" % service) Ret = subprocess.run(["lxc-attach -n %s -- %s" % (lxc_name, service)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, check=False) logger.debug(Ret.stdout.decode('utf-8')) return [True, "start container services success"] except subprocess.CalledProcessError as sube: logger.error('start services for container %s failed: %s' % (lxc_name, sube.output.decode('utf-8'))) return [False, "start services for container failed"]
def get_clustersetting(self, clustername, username, containername, allcontainer): clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername if not os.path.isfile(clusterpath): logger.error("cluster file: %s not found" % clustername) return [False, "cluster file not found"] infofile = open(clusterpath, 'r') info = json.loads(infofile.read()) infofile.close() cpu = 0 memory = 0 disk = 0 if allcontainer: for container in info['containers']: if 'setting' in container: cpu += int(container['setting']['cpu']) memory += int(container['setting']['memory']) disk += int(container['setting']['disk']) else: for container in info['containers']: if container['containername'] == containername: if 'setting' in container: cpu += int(container['setting']['cpu']) memory += int(container['setting']['memory']) disk += int(container['setting']['disk']) return [True, {'cpu':cpu, 'memory':memory, 'disk':disk}]
def addGroup(self,msg): try: OwnGuid=self.msg.getValue(msg,'OwnGuid') except KeyError: logger.error("add group msg has no OwnGuid") return try: GroupName=self.msg.getValue(msg,'GroupName') except KeyError: logger.error("add group msg has no GroupName") return try: Weight=self.msg.getValue(msg,'Weight') except KeyError: logger.error("add group msg has no Weight") return try: GroupName=GroupName.encode('utf8') logger.debug("Add group {!s}...".format(GroupName)) pass except UnicodeDecodeError: pass try: self.mysql.addGroup(OwnGuid,GroupName,Weight) logger.debug("Done") except _mysql_exceptions.IntegrityError as err: logger.error("Failed") logger.error("{!s}".format(err[1])) group=self.mysql.getGroups(OwnGuid,GroupName)[0] arg=('CREATE_CUSTOM_GROUP',group) groupMsg=self.msg.packMsg(*arg) logger.debug("Send group msg : {!s}".format(groupMsg)) self.transport.write(groupMsg)
def get_apps_from_lainlet(data): apps = {} for name, proc_info in data.iteritems(): parts = name.split(".") if len(parts) < 3: logger.error("Get an invalid procname %s, skipped" % name) continue proc_name = parts[-1] app_name = ".".join(parts[:-2]) proc = {} proc["proc_name"] = proc_name # The annotation of one proc are the same # During the deployment, podInfos may be empty for a while, # We should skip it if len(proc_info["PodInfos"]) == 0: continue proc["annotation"] = proc_info["PodInfos"][0]["Annotation"] proc["containers"] = [] for c in proc_info["PodInfos"]: if len(c["ContainerInfos"]) > 0: proc["containers"].append({ "container_ip": c["ContainerInfos"][0]["ContainerIp"], "container_port": c["ContainerInfos"][0]["Expose"], }) if app_name in apps: apps[app_name].append(proc) else: apps[app_name] = [proc] return apps
def prepareImage(self,user,image,fspath): imagename = image['name'] imagetype = image['type'] imageowner = image['owner'] #tmppath = self.NFS_PREFIX + "/local/tmpimg/" #tmpimage = str(random.randint(0,10000000)) + ".tz" if imagename == "base" and imagetype == "base": return if imagetype == "private": imgpath = self.imgpath + "private/" + user + "/" else: imgpath = self.imgpath + "public/" + imageowner + "/" #try: # sys_run("cp %s %s" % (imgpath+imagename+".tz", tmppath+tmpimage)) #except Exception as e: # logger.error(e) try: sys_run("tar -C %s -xvf %s" % (self.dealpath(fspath),imgpath+imagename+".tz"), True) #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (imgpath+imagename,self.dealpath(fspath)),True) except Exception as e: logger.error(e) #sys_run("rm -f %s" % tmppath+tmpimage) #self.sys_call("rsync -a --delete --exclude=nfs/ %s/ %s/" % (imgpath+image,self.dealpath(fspath))) #self.updatetime(imgpath,image) return
def _ssh_untrusted(self, cfg, client, comp_id): ip = socket.gethostbyname(cfg["host"]) code = "%s '%s/receiver.py' '%s' '%s' '%s'"%(cfg["python"], cfg["location"], ip, comp_id, self.tmp_dir) logger.debug(code) ssh_stdin, ssh_stdout, ssh_stderr = client.exec_command(code) stdout_channel = ssh_stdout.channel # Wait for untrusted side to respond with the bound port using paramiko channels # Another option would be to have a short-lived ZMQ socket bound on the trusted # side and have the untrusted side connect to that and send the port output = "" stdout_channel.settimeout(2.0) polls = 0 while output.count("\n")!=2: try: output += stdout_channel.recv(1024) except socket.timeout: polls+= 1 if stdout_channel.closed: logger.error( "An error occurred getting data from the untrusted side.") return None if polls>20: return None return int(output.split("\n")[0])
def get_meta_from_registry(app, meta_version, registry=None): logger.debug("ready get meta version %s for app %s from registry" % (meta_version, app)) meta_version = normalize_meta_version(meta_version) if not registry: registry = PRIVATE_REGISTRY try: y = None c = None cli = None cli = get_docker_client(DOCKER_BASE_URL) # TODO check if the image already exits cli.pull( repository="%s/%s" % (registry, app), tag="meta-%s" % (meta_version, ), insecure_registry=True ) image = "%s/%s:meta-%s" % (registry, app, meta_version) command = '/bin/sleep 0.1' c = cli.create_container(image=image, command=command) r = cli.get_archive(container=c.get('Id'), path='/lain.yaml') tar = tarfile.open(fileobj=StringIO(r[0].data)) f = tar.extractfile('lain.yaml') y = yaml.safe_load(f.read()) except Exception, e: logger.error("fail get yaml from %s %s: %s" % (app, meta_version, e)) raise Exception("fail get yaml from %s %s: %s" % (app, meta_version, e))
def __init__(self, addr_cidr, etcdclient, mode, masterip): self.etcd = etcdclient self.masterip = masterip if mode == 'new': logger.info("init network manager with %s" % addr_cidr) self.center = IntervalPool(addr_cidr=addr_cidr) # allocate a pool for system IPs, use CIDR=27, has 32 IPs syscidr = 27 [status, sysaddr] = self.center.allocate(syscidr) if status == False: logger.error ("allocate system ips in __init__ failed") sys.exit(1) # maybe for system, the last IP address of CIDR is available # But, EnumPool drop the last IP address in its pool -- it is not important self.system = EnumPool(sysaddr+"/"+str(syscidr)) self.usrgws = {} self.users = {} #self.vlanids = {} #self.init_vlanids(4095, 60) #self.init_shared_vlanids() self.dump_center() self.dump_system() elif mode == 'recovery': logger.info("init network manager from etcd") self.center = None self.system = None self.usrgws = {} self.users = {} #self.vlanids = {} self.load_center() self.load_system() #self.load_vlanids() #self.load_shared_vlanids() else: logger.error("mode: %s not supported" % mode)
def handle_accepted(self,sock,addr): #"""Called when remote client initiates a connection.""" handler=None; ip=None try: handler=self.handler(sock,self,ioloop=self.ioloop) if not handler.connected: return ip=addr[0]; self.ip_map.append(ip) # For performance and security reasons we should always set a limit for the number of file # descriptors that socket_map should contain. When we're running out of such limit we'll # use the last available channel for sending a 421 response to the client before disconnecting it. if not self._accept_new_cons(): handler.handle_max_cons(); return # accept only a limited number of connections from the same source address. if self.max_cons_per_ip: if self.ip_map.count(ip) > self.max_cons_per_ip: handler.handle_max_cons_per_ip(); return try: handler.handle() except: handler.handle_error() else: return handler except Exception: # This is supposed to be an application bug that should be fixed. We do not want to tear down the # server though (DoS). We just log the exception, hoping that someone will eventually file a bug. # References: # - http://code.google.com/p/pyftpdlib/issues/detail?id=143 # - http://code.google.com/p/pyftpdlib/issues/detail?id=166 # - https://groups.google.com/forum/#!topic/pyftpdlib/h7pPybzAx14 logger.error(traceback.format_exc()) if handler is not None: handler.close() else: if ip is not None and ip in self.ip_map: self.ip_map.remove(ip)
def callback(ch, method, properties, body): try: f1 = build_fact(e1, body) #decoded = json.loads(body) # f1 = e1.Assert("(ServerFact \"" + str(decoded[SERVERID]) + "\" " + str(decoded['cpu']) # + " " + str(decoded['mem']) + " " + str(decoded['hdd']) + " " + str(decoded['net']) # + ")") logger.info("received fact: %s" % body) get_rules_from_db(tenantId) saveout = sys.stdout fsock = open(LOGGING_PATH + '/CLIPSout.log', 'w') sys.stdout = fsock e1.PrintFacts() e1.PrintRules() e1.Run() sys.stdout = saveout fsock.close() f1.Retract() except ValueError: logger.error("receiving an invalid body: " + body) except clips.ClipsError: logger.error(clips.ErrorStream.Read()) except Exception as ex: logger.warn("FACT: already exists or " + ex.message)
def chat(self,msg): #if self.login : msgBody=self.msg.getValue(msg,'Body') msgCreated=int(time.time()) msgRecvGuid=self.msg.getValue(msg,'RecvGuid') msgSendGuid=self.msg.getValue(msg,'SendGuid') blacklist=self.mysql.getBlackList(msgRecvGuid) if msgSendGuid not in blacklist: arg=('CHAT',msgBody,msgCreated,msgSendGuid,msgRecvGuid) msg=self.msg.packMsg(*arg) receiver=self.factory.getClient(msgRecvGuid) if not receiver: logger.debug("User {!s} if offline,write the msg to mysql".format(msgRecvGuid)) arg=('CHAT',msgBody,msgCreated,msgSendGuid,msgRecvGuid,self.mysql) setOffLineMsg(*arg) #T=setOffLineMsg(*arg) #T.setDaemon(True) #T.start() return logger.debug("Sending msgs to user {!s}".format(msgRecvGuid)) receiver.transport.write(msg) else: logger.debug("you are in receiver's blacklist,so sorry...") logger.debug("Writing msgs to database...") arg=('CHAT',msgBody,msgCreated,msgSendGuid,msgRecvGuid,self.mysql) try: setOnLineMsg(*arg) except _mysql_exceptions.ProgrammingError: logger.error("Insert error")
def _lookup_runtime_version(self): # force to a specific version if self._conf['m2ee'].get('runtime_version', None): return self._conf['m2ee']['runtime_version'] # 3.0 has runtime version in metadata.json if 'RuntimeVersion' in self._model_metadata: return self._model_metadata['RuntimeVersion'] # else, 2.5: try to read from model.mdp using sqlite model_mdp = os.path.join( self._conf['m2ee']['app_base'], 'model', 'model.mdp' ) if not os.path.isfile(model_mdp): logger.warn("%s is not a file!" % model_mdp) return None version = None try: conn = sqlite3.connect(model_mdp) c = conn.cursor() c.execute('SELECT _ProductVersion FROM _MetaData LIMIT 1;') version = c.fetchone()[0] c.close() conn.close() except sqlite3.Error, e: logger.error("An error occured while trying to read mendix " "version number from model.mdp: %s" % e) return None
def __init__(self, appname, proc): self.appname = appname self.procname = proc['proc_name'] self.name = "%s__upstream__%s" % (appname, self.procname) self.backends = [] for c in proc["containers"]: if c["container_port"] != 0 and c["container_ip"]: self.backends.append("%s:%s" % ( c["container_ip"], c["container_port"] )) annotation = json.loads(proc['annotation']) mountpoints = annotation.get('mountpoint') try: self.mountpoint_list = [parse_mountpoint(m) for m in mountpoints] except Exception as e: logger.error("invalid mountpoint info in proc %s: %s" % ( proc, e )) self.mountpoint_list = [] self.https_only = annotation.get('https_only', False) try: healthcheck_path = annotation.get('healthcheck', None) if healthcheck_path: self.healthcheck = True self.healthcheck_path = healthcheck_path except Exception as e: self.healthcheck = False self.healthcheck_path = ''
def _task_queue_consumer(): while True: try: task = _task_queue.get() thread.start_new_thread(do_callback, (task, )) except Exception as ex: logger.error(ex)
def unpack(model_upload_path, mda_name, app_base): mda_file_name = os.path.join(model_upload_path, mda_name) if not os.path.isfile(mda_file_name): logger.error("file %s does not exist" % mda_file_name) return False logger.debug("Testing archive...") cmd = ("unzip", "-tqq", mda_file_name) try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if proc.returncode != 0: logger.error("An error occured while testing archive " "consistency: ") if stdout != '': logger.error(stdout) if stderr != '': logger.error(stderr) return False except OSError, ose: logger.error("An error occured while executing unzip: %s" % ose) return False
def add(db_session, data, username): logger.info(LogMsg.START, username) check_schema(['body'], data.keys()) logger.debug(LogMsg.SCHEMA_CHECKED) group_id = data.get('group_id', None) user = check_user(username, db_session) if group_id is not None and not is_group_member(user.person_id, group_id, db_session): logger.debug(LogMsg.CHAT_PERSON_NOT_IN_GROUP, { 'group_id': group_id, 'username': username }) raise Http_error(403, Message.PERSON_CANT_SENT_MESSAGE) logger.debug(LogMsg.PERMISSION_VERIFIED, username) parent_id = data.get('parent_id', None) if parent_id: logger.debug(LogMsg.CHAT_CHECK_FOR_PARENT) parent_message = get_internal(parent_id, db_session) if parent_message is None: logger.error(LogMsg.CHAT_PARENT_NOT_FOUND, parent_id) raise Http_error(404, Message.PARENT_NOT_FOUND) model_instance = ChatMessage() logger.debug(LogMsg.POPULATING_BASIC_DATA) populate_basic_data(model_instance, username, data.get('tags')) model_instance.sender_id = user.person_id model_instance.receptor_id = data.get('receptor_id') model_instance.group_id = group_id model_instance.body = data.get('body') model_instance.parent_id = data.get('parent_id') db_session.add(model_instance) logger.debug(LogMsg.DB_ADD) logger.info(LogMsg.END) return model_instance
def getTags(self): path = self.url self["size"] = os.path.getsize(path) self["ext"] = self.ext audio = common.MutagenFile(path, common.FORMATS) if audio is not None: tag_keys_override = TAGS_KEYS_OVERRIDE.get( audio.__class__.__name__, None) for tag, file_tag in TAG_KEYS.iteritems(): if tag_keys_override and tag_keys_override.has_key(file_tag): file_tag = tag_keys_override[file_tag] if audio.has_key(file_tag) and audio[file_tag]: value = audio[file_tag] if isinstance(value, list) or isinstance(value, tuple): value = value[0] if isinstance(value, mutagen.asf.ASFUnicodeAttribute): value = value.value fix_value = common.fix_charset(value) if fix_value == "[Invalid Encoding]": if tag == "title": fix_value = self.fileName else: fix_value = "" self[tag] = fix_value else: if tag == 'composer': self[tag] = '' else: self[tag] = 0 for key in ['sample_rate', 'bitrate', 'length']: try: if hasattr(audio.info, key): if key == 'length': self['duration'] = getattr(audio.info, key) * 1000 else: self[key] = getattr(audio.info, key) except Exception, e: logger.error(e)
def get_msg(sock, header_length, header_format): """从 sock 中获取一个完整的消息 分为两次获取消息,第一次收取消息头部;在消息头部中获取消息余下的长度,在第二 次收取余下的消息内容。 """ recvlen, header = attempt_recvall(sock, header_length) if recvlen == header_length: try: header_unpack = struct.unpack(header_format, header) header_unpack_len = struct.calcsize(header_format) except struct.error as e: logger.error("unpack header failed: {0}".format(e)) return (0, None, 0, None) else: total_size = header_unpack[0] body_size = total_size - header_length if body_size > 0: recvlen, body = attempt_recvall(sock, body_size) if recvlen == body_size: return (header_unpack_len, header_unpack, body_size, body) else: logger.error("recv body failed: want={0}, recv={1}".format( body_size, recvlen)) return (header_unpack_len, header_unpack, body_size, None) else: return (header_unpack_len, header_unpack, 0, None) else: logger.error("recv header failed: want={0}, recv={1}".format( header_length, recvlen)) return (0, None, 0, None)
def delete(id, db_session, username): logger.info(LogMsg.START, username) logger.debug(LogMsg.PERMISSION_CHECK, username) validate_permissions_and_access(username, db_session, 'ACCOUNT_DELETE', access_level=Access_level.Premium) logger.debug(LogMsg.PERMISSION_VERIFIED, username) user = check_user(username, db_session) if user is None: logger.error(LogMsg.INVALID_USER, username) raise Http_error(404, Message.INVALID_USER) if user.person_id is None: logger.error(LogMsg.PERSON_NOT_EXISTS, username) raise Http_error(404, Message.Invalid_persons) validate_person(user.person_id, db_session) logger.debug(LogMsg.PERSON_EXISTS, username) try: logger.debug(LogMsg.DELETE_ACCOUNT_BY_ID, id) db_session.query(Account).filter( and_(Account.person_id == user.person_id, Account.id == id) ).delete() except: logger.error(LogMsg.DELETE_FAILED, exc_info=True) raise Http_error(404, Message.NOT_FOUND) logger.info(LogMsg.END) return Http_response(204, True)
def reset_pass(data, db_session): logger.info(LogMsg.START, data) schema_validate(data, RESET_PASS_SCHEMA_PATH) logger.debug(LogMsg.SCHEMA_CHECKED) cell_no = data.get('cell_no') redis_key = 'PASS_{}'.format(cell_no) code = redis.get(redis_key) if code is None: logger.error(LogMsg.REGISTER_KEY_DOESNT_EXIST) raise Http_error(404, Message.INVALID_CODE) code = code.decode("utf-8") if (code is None) or (code != data.get('code')): logger.error(LogMsg.REGISTER_KEY_INVALID) raise Http_error(409, Message.INVALID_CODE) user = check_by_cell_no(cell_no, db_session) if user: user.password = data.get('password') logger.debug(LogMsg.USER_PASSWORD_RESET, user_to_dict(user)) logger.info(LogMsg.END) return data logger.error(LogMsg.NOT_FOUND, data) raise Http_error(404, Message.INVALID_USER)
def create_server_socket(listen_ip, listen_port): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except OSError as e: logger.error("created tcp socket failed: {0}".format(e)) return None else: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(False) try: sock.bind((listen_ip, listen_port)) except OSError as e: logger.error("bind {0}:{1} failed: {2}".format( listen_ip, listen_port, e)) sock.close() return None else: try: sock.listen(100) except OSError as e: logger.error("listen failed: {0}".format(e)) sock.close() return None else: logger.info("listening on {0}:{1}".format( listen_ip, listen_port)) return sock
def request_parser_helper(api, params, timeout=10000, format_="json", method="POST"): """请求总通道""" js_out = None try: if method == "GET": response = requests.request("GET", api, params=params, timeout=timeout) elif format_ == "json": response = requests.post(api, json=params, timeout=timeout) elif format_ == "form": response = requests.post(api, data=params, timeout=timeout) else: raise Exception("request_parser_helper: not valid format") if response.status_code == 200: js_out = response.json() if js_out.get("status") != 0: logger.error( "request_parser: [api:%s] [input:%s] [output:%s]" % (api, json.dumps(params, ensure_ascii=False), json.dumps(js_out, ensure_ascii=False))) else: logger.error("request_parser http error %s" % response.text) api_js = {"api": api, "js_out": js_out} logger.debug(f'API_AND_RETURN|{api_js}') return js_out except Exception as err_msg: logger.error(f'err_msg: {err_msg}') return
def flush_cluster(self, username, clustername, containername): begintime = datetime.datetime.now() [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] containers = info['containers'] imagetmp = username + "_tmp_docklet" for container in containers: if container['containername'] == containername: logger.info("container: %s found" % containername) onework = self.nodemgr.ip_to_rpc(container['host']) onework.create_image(username, imagetmp, containername) fimage = container['image'] logger.info("image: %s created" % imagetmp) break else: logger.error("container: %s not found" % containername) for container in containers: if container['containername'] != containername: logger.info("container: %s now flush" % container['containername']) onework = self.nodemgr.ip_to_rpc(container['host']) #t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername'])) #threads.append(t) onework.flush_container(username, imagetmp, container['containername']) container['lastsave'] = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S") container['image'] = fimage logger.info("thread for container: %s has been prepared" % container['containername']) clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername infofile = open(clusterpath, 'w') infofile.write(json.dumps(info)) infofile.close() self.imgmgr.removeImage(username, imagetmp) endtime = datetime.datetime.now() dtime = (endtime - begintime).seconds logger.info("flush spend %s seconds" % dtime) logger.info("flush success")
def restoredb(config, dump_name): if not config.allow_destroy_db(): logger.error("Refusing to do a destructive database operation " "because the allow_destroy_db configuration option " "is set to false.") return False env = os.environ.copy() env.update(config.get_pg_environment()) db_dump_file_name = os.path.join(config.get_database_dump_path(), dump_name) if not os.path.isfile(db_dump_file_name): logger.error("file %s does not exist: " % db_dump_file_name) return False logger.debug("Restoring %s" % db_dump_file_name) cmd = (config.get_pg_restore_binary(), "-d", env['PGDATABASE'], "-O", "-n", "public", "-x", db_dump_file_name) logger.trace("Executing %s" % str(cmd)) proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = proc.communicate() if stderr != '': logger.error("An error occured while calling pg_restore: %s " % stderr) return False return True
def getIp(): # 取默认网卡 global error # name = pcap.findalldevs() try: dataPack = pcap.pcap(name=NAME, promisc=True, immediate=True) # dataPack.setfilter('udp port 6343') logger.info('连接网卡->%s,开始抓包', NAME) except Exception as e: logger.error('连接网卡->%s失败,强制退出,错误信息->%s', NAME, e) error = True sys.exit(1) else: for ptime, pdata in dataPack: # 解包,获得数据链路层包 Ethernet_pack = dpkt.ethernet.Ethernet(pdata) # 判断是否含有网络层ip包,和传输层tcp包(端口号用) if type(Ethernet_pack.data) == dpkt.ip.IP and type( Ethernet_pack.data.data) == dpkt.tcp.TCP: srcIp = '%d.%d.%d.%d' % tuple( map(ord, list(Ethernet_pack.data.src))) dstIp = '%d.%d.%d.%d' % tuple( map(ord, list(Ethernet_pack.data.dst))) sport = Ethernet_pack.data.data.sport dport = Ethernet_pack.data.data.dport obj = { 'srcIp': srcIp, 'dstIp': dstIp, 'sport': sport, 'dport': dport, 'count': 1 } head = str(srcIp) + '=>' + str(dstIp) try: currentObj = results[head] except KeyError: results[head] = obj else: currentObj['count'] = currentObj['count'] + 1 dataPack.close()
def delete(id, db_session, username): logger.info(LogMsg.START, username) logger.debug(LogMsg.MODEL_GETTING, {'comment_action_id': id}) action = db_session.query(CommentAction).filter( CommentAction.id == id).first() if action is None: logger.debug(LogMsg.NOT_FOUND) raise Http_error(404, Message.NOT_FOUND) user = check_user(username, db_session) if user.person_id is None: logger.error(LogMsg.USER_HAS_NO_PERSON) raise Http_error(400, Message.INVALID_USER) validate_person(user.person_id, db_session) logger.debug(LogMsg.PERSON_EXISTS) logger.debug(LogMsg.PERMISSION_CHECK, username) permission_data = {} if action.person_id == user.person_id: permission_data = {Permissions.IS_OWNER.value: True} logger.debug(LogMsg.PERMISSION_CHECK, username) validate_permissions_and_access(username, db_session, 'COMMENT_ACTION_DELETE', permission_data) logger.debug(LogMsg.PERMISSION_VERIFIED) if action.person_id != user.person_id: logger.error(LogMsg.NOT_ACCESSED, username) raise Http_error(403, Message.ACCESS_DENIED) try: db_session.delete(action) except: logger.exception(LogMsg.DELETE_FAILED, exc_info=True) raise Http_error(404, Message.NOT_FOUND) logger.info(LogMsg.END) return Http_response(204, True)
def testDelRequest(hurl, hdata, headers, htestcaseid, htestcasename, htesthope, response_testhope): hr = requests.delete(hurl, params=hdata, headers=header) hresult = json.loads(hr.text) # 获取并处理返回的json数据 hstatus = hresult["status"] if hstatus == htesthope and response_testhope in str(hresult): data = { "t_id": htestcaseid, "t_name": htestcasename, "t_method": "DELETE", "t_url": hurl, "t_param": "测试数据:" + str(hdata), "t_hope": "status:" + htesthope + " 期望结果:" + response_testhope, "t_actual": "status:" + hstatus + " 实际结果:" + str(hresult), "t_result": "通过" } hlist.append(data) # 把测试结果添加到数组里面 logger.info(htestcasename) logger.info("通过") logger.info(" 实际结果:" + str(hresult)) else: data = { "t_id": htestcaseid, "t_name": htestcasename, "t_method": "DELETE", "t_url": hurl, "t_param": "测试数据:" + str(hdata), "t_hope": "status:" + htesthope + " 期望结果:" + response_testhope, "t_actual": "status:" + hstatus + " 实际结果:" + str(hresult), "t_result": "失败" } hlist.append(data) # 把测试结果添加到数组里面 logger.error(htestcasename) logger.error("失败") logger.error(" 实际结果:" + str(hresult))
def parse_post_body(body, content_type): if '; ' in content_type: content_type, details = content_type.split('; ', 1) if content_type == 'application/x-www-form-urlencoded': return parse_query(body.decode('utf-8')) elif content_type == 'multipart/form-data': if not '=' in details: logger.error(f'Bad multipart/xxx spec') return {} name, value = details.split('=', 1) if not name == 'boundary': logger.error(f'Bad multipart/xxx spec') return {} boundary = ('--' + value).encode('utf-8') # why extra hyphens?? chunks = body.split(boundary) if len(chunks) < 3: logger.error(f'Bad multipart/xxx data') return {} chunks = chunks[1:-1] form_data = {} for chunk in chunks: form_data.update(parse_form_data_chunk(chunk)) logger.info(f'Got post form data = {form_data}') return form_data else: logger.warning(f'Unsupported form data content type {content_type}') return {}
def delete_comments(book_id, db_session, username, **kwargs): logger.info(LogMsg.START, username) logger.debug(LogMsg.COMMENT_DELETING_BOOK_COMMENTS, book_id) book = get_book(book_id, db_session) if book is None: logger.error(LogMsg.NOT_FOUND, {'book_id': book_id}) raise Http_error(404, Message.NOT_FOUND) press = book.get('press', None) logger.debug(LogMsg.PERMISSION_CHECK, username) validate_permissions_and_access(username, db_session, 'COMMENT_DELETE', model=book) logger.debug(LogMsg.PERMISSION_VERIFIED) # permissions, presses = get_user_permissions(username, db_session) # # has_permit = has_permission_or_not( # [Permissions.COMMENT_DELETE_PREMIUM], # permissions) # if not has_permit: # if press in presses: # has_permission( # [Permissions.COMMENT_DELETE_PRESS], # permissions) # else: # logger.error(LogMsg.PERMISSION_DENIED, username) # raise Http_error(403, Message.ACCESS_DENIED) # # logger.debug(LogMsg.PERMISSION_VERIFIED, username) delete_book_comments(book_id, db_session) logger.info(LogMsg.END) return Http_response(204, True)
def setSystemTime(): url = 'https://api.m.jd.com/client.action?functionId=queryMaterialProducts&client=wh5' try: session = requests.session() # get server time t0 = datetime.now() ret = session.get(url).text t1 = datetime.now() if not ret: logger.error('同步京东服务器时间失败,时间同步接口已失效') return js = json.loads(ret) t = float(js["currentTime2"]) / 1000 dt = datetime.fromtimestamp(t) + ((t1 - t0) / 2) sys = platform.system() if sys == "Windows": import win_util win_util.setWinSystemTime(dt) elif sys == "Linux": os.system(f'date -s "{dt.strftime("%Y-%m-%d %H:%M:%S.%f000")}"') logger.info('已同步京东服务器时间:%s' % dt) except Exception as e: logger.error('同步京东服务器时间失败,请检查权限') logger.error(e)
def get(id, db_session, username): logger.info(LogMsg.START, username) content = db_session.query(BookContent).filter( BookContent.id == id).first() if content is None: logger.error(LogMsg.NOT_FOUND, {'book_content_id': id}) raise Http_error(404, Message.NOT_FOUND) book = get_book(content.book_id, db_session) if book is None: logger.error(LogMsg.NOT_FOUND, {'book_id': content.book_id}) raise Http_error(404, Message.NOT_FOUND) permission_data = {} if content.creator == username: permission_data = {Permissions.IS_OWNER.value: True} permissions, presses = get_user_permissions(username, db_session) has_permit = has_permission_or_not([Permissions.BOOK_CONTENT_GET_PREMIUM], permissions, None, permission_data) if not has_permit: if book.press in presses: has_permission([Permissions.BOOK_CONTENT_GET_PRESS], permissions) else: logger.error(LogMsg.PERMISSION_DENIED) raise Http_error(403, Message.ACCESS_DENIED) logger.debug(LogMsg.PERMISSION_VERIFIED, username) return content_to_dict(content, db_session)
def upload_to_lanzouyun(self, filepath, target_folder, history_file_prefix=""): logger.warning("开始上传 {} 到 {}".format(os.path.basename(filepath), target_folder.name)) run_start_time = datetime.now() def on_uploaded(fid, is_file): if not is_file: return logger.info("下载完成,fid={}".format(fid)) prefix = history_file_prefix if prefix == "": prefix = self.history_version_prefix files = self.lzy.get_file_list(target_folder.id) for file in files: if file.name.startswith(prefix): self.lzy.move_file(file.id, self.folder_history_files.id) logger.info("将{}移动到目录({})".format( file.name, self.folder_history_files.name)) logger.info("将文件移到目录({})中".format(target_folder.name)) self.lzy.move_file(fid, target_folder.id) # 上传到指定的文件夹中 retCode = self.lzy.upload_file(filepath, -1, callback=self.show_progress, uploaded_handler=on_uploaded) if retCode != LanZouCloud.SUCCESS: logger.error("上传失败,retCode={}".format(retCode)) return False logger.warning("上传当前文件总计耗时{}".format(datetime.now() - run_start_time)) return True
def play(args): '''Pass an argument list to play. Parameters ---------- args : iterable Argument list for play. The first item can, but does not need to, be 'play'. Returns: -------- status : bool True on success. ''' if args[0].lower() != "play": args.insert(0, "play") else: args[0] = "play" try: logger.info("Executing: %s", " ".join(args)) process_handle = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) status = process_handle.wait() if process_handle.stderr is not None: logger.info(process_handle.stderr) if status == 0: return True else: logger.info("Play returned with error code %s", status) return False except OSError as error_msg: logger.error("OSError: Play failed! %s", error_msg) except TypeError as error_msg: logger.error("TypeError: %s", error_msg) return False
def delete(id, db_session, username): logger.info(LogMsg.START, username) permissions, presses = get_user_permissions(username, db_session) has_permission([Permissions.ACCOUNT_DELETE_PREMIUM], permissions) user = check_user(username, db_session) if user is None: logger.error(LogMsg.INVALID_USER, username) raise Http_error(404, Message.INVALID_USER) if user.person_id is None: logger.error(LogMsg.PERSON_NOT_EXISTS, username) raise Http_error(404, Message.Invalid_persons) validate_person(user.person_id, db_session) logger.debug(LogMsg.PERSON_EXISTS, username) try: logger.debug(LogMsg.DELETE_ACCOUNT_BY_ID, id) db_session.query(Account).filter( and_(Account.person_id == user.person_id, Account.id == id)).delete() except: logger.error(LogMsg.DELETE_FAILED, exc_info=True) raise Http_error(404, Message.NOT_FOUND) logger.info(LogMsg.END) return Http_response(204, True)
def receive_events(): """ Recive events related to API recommendations feature. """ data = request.data jsonData = json.loads(data[36:-4].decode("utf-8")) try: jwt_token = request.headers['X-JWT-Assertion'] organization = get_company_from_jwt(jwt_token) except Exception: logger.debug( "Error when extracting company details from JWT, default Company selected" ) organization = "Company" try: action = jsonData["action"] payload = jsonData["payload"] logger.debug("Recommendation event received with Action " + action) if (action == 'ADD_API'): response = add_API(payload, organization) elif (action == 'DELETE_API'): response = delete_API(payload, organization) elif (action == 'ADD_NEW_APPLICATION'): response = add_application(payload, organization) elif (action == 'UPDATED_APPLICATION'): response = update_application(payload, organization) elif (action == 'DELETE_APPLICATION'): response = delete_application(payload, organization) elif (action == 'ADD_USER_SEARCHED_QUERY'): response = add_search_query(payload, organization) elif (action == 'ADD_USER_CLICKED_API'): response = add_clicked_API(payload, organization) else: logger.error("Incorrect action " + action + " used for recommendation event") response = Response(status=500) except Exception: logger.exception("Error occurred when adding the" + action + " event") response = Response(status=500) return response
def main(): if token is None: logger.error('Missing env variable "sentrycloudtoken"') return if sentry_url is None: logger.error('Missing env variable "sentryurl"') return parser = argparse.ArgumentParser() parser.add_argument( '--field', help='Field name to pull from sentry. [culprit, filename, title, type]', default='title', ) parser.add_argument('--file', help='File path to export to. Ie. result.png', default='result.png') parser.add_argument( '--cache', help= 'Utilize cache result from the last Sentry API query. False will delete the previous cache.', default=False, ) parser.add_argument('--verbose', help='Verbose to stdout', default=False) args = parser.parse_args() logger.info('Running with args: {}'.format(args)) if args.verbose is False: logger.setLevel(logging.DEBUG) if parser.cache is False: os.remove(cache_file_name) dataMap = get_data() counts = count(dataMap, args.field) create_word_cloud(counts, args.file) if parser.cache is False: os.remove(cache_file_name)
def usageInc(self, *args, **kwargs): ''' Usage: usageModify(cur_user = token_from_auth, modification = data_from_form) Modify the usage info of user ''' cur_user = kwargs['cur_user'] modification = kwargs['modification'] logger.info("record usage for user:%s" % cur_user.username) groupname = cur_user.user_group groupinfo = self.groupQuery(name = groupname)['data'] usage = UserUsage.query.filter_by(username = cur_user.username).first() if usage == None: new_usage = UserUsage(cur_user.username) db.session.add(new_usage) db.session.commit() usage = UserUsage.query.filter_by(username = cur_user.username).first() if int(modification['cpu']) <= 0 or int(modification['memory']) <= 0 or int(modification['disk']) <= 0: return [False, "cpu,memory and disk setting cannot less than zero"] cpu = int(usage.cpu) + int(modification['cpu']) memory = int(usage.memory) + int(modification['memory']) disk = int(usage.disk) + int(modification['disk']) if cpu > int(groupinfo['cpu']): logger.error("cpu quota exceed, user:%s" % cur_user.username) return [False, "cpu quota exceed"] if memory > int(groupinfo['memory']): logger.error("memory quota exceed, user:%s" % cur_user.username) return [False, "memory quota exceed"] if disk > int(groupinfo['disk']): logger.error("disk quota exceed, user:%s" % cur_user.username) return [False, "disk quota exceed"] usage.cpu = str(cpu) usage.memory = str(memory) usage.disk = str(disk) db.session.commit() return [True, "distribute the resource"]
def getFreeDineList(page, stype, user): url = 'https://m.dianping.com/activity/static/list?page={0}&cityid=7®ionParentId=0®ionId=0&type={1}&sort=0&filter=1&token={2}'.format( page, stype, user["token"]) actList = [] try: # logger.debug(url) response = request.openUrl(url, user, {}) content = str(response.read(), 'utf-8') decodeContent = json.loads(content) logger.debug(content) for act in decodeContent["data"]["mobileActivitys"]: actList.append({ "id": str(act["offlineActivityId"]), "name": act["title"], "mode": act["mode"] }) except Exception as ex: logger.error("获取霸王餐列表报错, url:{0}".format(url)) logger.error(content) logger.error(ex) logger.info('---获取第{0}页,type:{2}霸王餐, size:{1}---'.format( page, len(actList), stype)) return actList
def checkout(order_id, data, db_session, username): logger.info(LogMsg.START, username) schema_validate(data,CHECKOUT_EDIT_SCHEMA_PATH) logger.debug(LogMsg.SCHEMA_CHECKED) preferred_account = data.get('preferred_account', 'Main') person_id = data.get('person_id') logger.debug(LogMsg.ORDER_CHECKOUT_REQUEST, order_id) order = get_order(order_id, db_session) if order is None: logger.error(LogMsg.NOT_FOUND, {'order_id': order_id}) raise Http_error(404, Message.NOT_FOUND) logger.debug(LogMsg.ORDER_EXISTS, order_id) if order.status==OrderStatus.Invoiced: logger.debug(LogMsg.ORDER_NOT_EDITABLE,order_id) raise Http_error(409,Message.ORDER_INVOICED) # CHECK PERMISSION logger.debug(LogMsg.PERMISSION_CHECK, username) validate_permissions_and_access(username, db_session, 'ORDER_CHECKOUT',model=order) logger.debug(LogMsg.PERMISSION_VERIFIED, username) logger.debug(LogMsg.GETTING_ACCOUNT_PERSON, {'person_id': order.person_id}) account = get_account(order.person_id, preferred_account, db_session) if account is None: logger.error(LogMsg.USER_HAS_NO_ACCOUNT, {'person_id': order.person_id, 'type': preferred_account}) raise Http_error(404, Message.USER_HAS_NO_ACCOUNT) logger.debug(LogMsg.ORDER_CALC_PRICE,{'order_id',order_id}) order_price = recalc_order_price(order_id, db_session) logger.debug(LogMsg.ORDER_CHECK_ACCOUNT_VALUE) if account.value < order_price: logger.error(LogMsg.ORDER_LOW_BALANCE,{'order_price':order_price,'account_value':account.value}) raise Http_error(402, Message.INSUFFICIANT_BALANCE) account.value -= order_price transaction_data = {'account_id': account.id, 'debit': order_price} add_transaction(transaction_data, db_session) order.status = OrderStatus.Invoiced logger.debug(LogMsg.ORDER_INVOICED,order_id) edit_basic_data(order,username) order_items = get_orders_items_internal(order_id, db_session) logger.debug(LogMsg.ORDER_GETTING_ITEMS,{'order_id':order_id}) book_list = [] for item in order_items: book_list.append(item.book_id) add_books_to_library(order.person_id, book_list, db_session) data.update({'order_price': order_price}) logger.debug(LogMsg.ORDER_ITEMS_ADDED_TO_LIB) logger.info(LogMsg.END) return data
def TestPostRequest(hurl,hdata,headers,htestcassid,htestcasename,htesthope,fanhuitesthope): hr = requests.post(hurl, data=hdata, headers=headers) # hr=requests.post(hurl,data=json.dumps(hdata),headers=header) hresult=json.loads(hr.text) hstatus=hresult['status'] if hstatus==htesthope and fanhuitesthope in str(hresult): hhhdata={"t_id":htestcassid, "t_name":htestcasename, "t_method":"POST", "t_url":hurl, "t_param":"测试数据:"+str(hdata), "t_hope":"status:"+htesthope+"期望结果:"+fanhuitesthope, "t_actual":"status:"+hstatus+"实际返回结果:"+str(hresult), "t_result":"通过" } hlist.append(hhhdata) logger.info(htestcasename) logger.info("通过") logger.info("实际返回结果:"+str(hresult)) else: hhhdata = {"t_id": htestcassid, "t_name": htestcasename, "t_method": "POST", "t_url": hurl, "t_param": "测试数据:" + str(hdata), "t_hope": "status:" + htesthope + "期望结果:" + fanhuitesthope, "t_actual": "status:" + hstatus + "实际返回结果:" + str(hresult), "t_result": "失败" } hlist.append(hhhdata) logger.error(htestcasename) logger.error("失败") logger.error("实际返回结果:" + str(hresult)) print(hlist)
def signUpFreeDine(dine, user, stype): data = get_request_data(dine, user, stype) url = 'http://s.dianping.com/ajax/json/activity/offline/saveApplyInfo' user["headers"].update({ "Host": "s.dianping.com", "Referer": "http://s.dianping.com/event/" + dine["id"] }) try: response = request.openUrl(url, user, data) content = str(response.read(), 'utf-8') decodeContent = json.loads(content) resultCode = decodeContent["code"] logger.info("-----霸王餐:{0}-----".format(dine["name"])) if (resultCode == 200): logger.info('-----报名成功-----') else: errMessage = decodeContent["msg"]["html"] logger.info('-----报名失败:{0},{1}-----'.format( resultCode, errMessage)) except Exception as ex: logger.error('--------报名中断报错------') logger.error(content) logger.error(ex)
def getAlreadyApplyTryProduct(allTryProducts, user): prodIds = ','.join(allTryProducts) url = 'https://try.jd.com/user/getApplyStateByActivityIds?activityIds={0}'.format( prodIds) user["headers"].update({ "Referer": url }) result = [] try: response = request.openUrl(url, user, {}) content = str(response.read(), 'utf-8') decodeContent = json.loads(content) for item in decodeContent: result.append(item["activityId"]) # logger.info(result) except Exception as ex: logger.error("获取已申请试用产品报错, url:{0}".format(url)) logger.error(content) logger.error(ex) return result
def _user_login(self): self.session = requests.Session() r = self.session.post( 'https://account.dianping.com/account/ajax/checkRisk', headers=self._headers, data=self._get_check_risk_data(), verify=False) r_dict = json.loads(r.text) logger.info("login check risk response:{}".format(r_dict)) public_key = r_dict and r_dict['msg'] and r_dict['msg']['publicKey'] if not public_key: print("Error: Cannot get public key!") return '' uuid = r_dict and r_dict['msg'] and r_dict['msg']['uuid'] if not uuid: print("Error: Cannot get uuid!") return '' print('check risk success, public key:{0}, uuid:{1}'.format( public_key, uuid)) r = self.session.post(self._login_url, headers=self._headers, data=self._get_login_data(public_key, uuid), verify=False) logger.info("dp login response content:{}".format(r.json())) if not r or not r.cookies: logger.error("Error: Login failed!") return '' if r and r.cookies: logger.info(r.cookies) for (k, v) in r.cookies.items(): if (k == "dper"): return v return ''
def hottryapply(user): logger.info("热门试用产品开始申请") url = 'http://try.jd.com' user["headers"].update({ "Host": "try.jd.com", "Referer": url }) try: response = request.openUrl(url, user, {}) logger.info("opening web") content = str(response.read(), 'utf-8') soup = BeautifulSoup(content, "html.parser") hottryprods = [] for result in soup.find_all('li', {"class": "ui-switchable-panel"}): activityId = result.get("activity_id") if (not activityId is None): hottryprods.append(activityId) logger.info("user:{1}热门试用产品:{0}".format(hottryprods, user["phone"])) for prodId in hottryprods: vendorId = getVendorByProductId(user, prodId) followVendor(user, vendorId) applyTryProduct(user, prodId) except Exception as ex: logger.error("获取试用产品列表报错, url:{0}".format(url)) logger.error(ex) logger.error(content) logger.info("user:{0}热门试用产品结束申请".format(user["phone"]))
def TestDeleteRequest(hurl, hdata, headers, htestcassid, htestcassname, htesthope, fanhuitesthope): if hdata =="": hr = requests.delete(hurl, headers=header) else: hr = requests.delete(hurl, params=hdata, headers=header) hjson = json.loads(hr.text) # 获取并处理返回的json数据 hstatus = str(hjson["status"]) if str(hstatus) == str(htesthope) and fanhuitesthope in str(hjson): hhhdata = {"t_id": htestcassid, "t_name": htestcassname, "t_method": "DELETE", "t_url": hurl, "t_param": "测试数据:" + str(hdata), "t_hope": "期望值: status=" + str(htesthope) + ",msg包含:" + fanhuitesthope, "t_actual":"实际值: status=" + hstatus + ",msg=" + str(hjson), "t_result": "通过"} hlist.append(hhhdata) # 把测试结果添加到数组里面 logger.info(htestcassname) logger.info("通过") logger.info("实际值"+str(hjson)) else: hhhdata = {"t_id": htestcassid, "t_name": htestcassname, "t_method": "DELETE", "t_url": hurl, "t_param": "测试数据:" + str(hdata), "t_hope": "期望值: status=" + str(htesthope) + ",msg包含:" + fanhuitesthope, "t_actual":"实际值: status=" + hstatus + ",msg=" + str(hjson), "t_result": "失败"} hlist.append(hhhdata) logger.error(htestcassname) logger.error("失败") logger.error("实际值"+str(hjson))