def post(self): # 文件夹路径 string filepath = os.path.join(app.config['UPLOAD_PATH'], arrow.now().format('YYYYMMDD')) if not os.path.exists(filepath): os.makedirs(filepath) try: # 上传文件命名 随机32位16进制字符 string imgname = '%32x' % random.getrandbits(128) # 文件绝对路径 string imgpath = os.path.join(filepath, '%s.jpg' % imgname) f = request.files['file'] f.save(imgpath) except Exception as e: logger.error(e) return {'message': 'File error'}, 400 # 回调用的消息队列 object que = Queue.Queue() # 识别参数字典 dict r = {'coord': []} app.config['RECGQUE'].put((9, r, que, imgpath)) try: recginfo = que.get(timeout=app.config['TIMEOUT']) except Queue.Empty: return {'message': 'Timeout'}, 408 except Exception as e: logger.error(e) else: return {'coord': r['coord'], 'recginfo': recginfo}, 201
def on_message(self, headers, message): """ Handler on message """ LOGGER.info("=" * 72) LOGGER.info('Message headers:\n%s', headers) LOGGER.info('Message body:\n%s', message) libvirt_dashboard_build = headers.get('libvirt_dashboard_build') libvirt_dashboard_id = headers.get('libvirt_dashboard_id') polarion_testrun = headers.get('polarion_testrun') message = json.loads(message) status = message.get('status') log_url = message.get('log-url') with app.app_context(): if status == "passed": count = Run.query.filter(Run.id == libvirt_dashboard_id).update({ "submit_status": status, "submit_log": log_url, "submit_date": datetime.datetime.now() }) else: count = Run.query.filter(Run.id == libvirt_dashboard_id).update({ "submit_status": status, "submit_log": log_url, }) Run.query.session.commit() if not count: LOGGER.error("No matching test run for ID: {}".format(libvirt_dashboard_id)) else: LOGGER.info("Updated Test run ID: {}".format(libvirt_dashboard_id))
def register(): if request.method == 'POST': logger.info('Registration POST: %s%s%s'\ % (request.form['email'], \ request.form['name'],\ request.form['tel'])) user = User() user.name = request.form.get('name', None) user.email = request.form.get('email', None) user.tel = request.form.get('tel', None) user.msg = request.form.get('message', None) if not user.is_valid: logger.error('No valid form. Request:%s' % request) return jsonify(False) try: user.save() except: logger.error('Don\'t save in base. Request:%s' % request) return jsonify('Error') logger.info('Register:Done!') send_email(user) return jsonify(True) else: return jsonify(False)
def generateSSLCert(): if not os.path.exists(os.path.join(config.DATA_DIR, 'plexivity.key')) or not os.path.exists(os.path.join(config.DATA_DIR, 'plexivity.crt')): logger.warning("plexivity was started with ssl support but no cert was found, trying to generating cert and key now") try: from OpenSSL import crypto, SSL from socket import gethostname # create a key pair k = crypto.PKey() k.generate_key(crypto.TYPE_RSA, 1024) # create a self-signed cert cert = crypto.X509() cert.get_subject().C = "US" cert.get_subject().ST = "plex land" cert.get_subject().L = "plex land" cert.get_subject().O = "plexivity" cert.get_subject().OU = "plexivity" cert.get_subject().CN = gethostname() cert.set_serial_number(1000) cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(10*365*24*60*60) cert.set_issuer(cert.get_subject()) cert.set_pubkey(k) cert.sign(k, 'sha1') open(os.path.join(config.DATA_DIR, 'plexivity.crt'), "wt").write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) open(os.path.join(config.DATA_DIR, 'plexivity.key'), "wt").write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k)) logger.info("ssl cert and key generated and saved to: %s" % config.DATA_DIR) except: logger.error("unable to generate ssl key and cert")
def save_feedback_response(bound_logger, survey_feedback_response): bound_logger.info("Saving feedback response") survey = survey_feedback_response.get("survey_id") period = survey_feedback_response.get("collection", {}).get("period") invalid = survey_feedback_response.get("invalid") if invalid: survey_feedback_response.pop("invalid") feedback_response = FeedbackResponse(invalid=invalid, data=survey_feedback_response, survey=survey, period=period) try: db.session.add(feedback_response) db.session.commit() except IntegrityError as e: logger.error("Integrity error in database. Rolling back commit", error=e) db.session.rollback() raise e except SQLAlchemyError as e: logger.error("Unable to save response", error=e) db.session.rollback() raise e else: logger.info("Feedback response saved") return invalid
def _error(request, message, code=None, err=None): if err: logger.error('%s;{remote_addr:%s};%s' % (message, request.remote_addr, err)) else: logger.error('%s;{remote_addr:%s}' % (message, request.remote_addr)) if code: abort(code, message)
def startScheduler(): db.create_all() #create default roles! if not db.session.query(models.Role).filter(models.Role.name == "admin").first(): admin_role = models.Role(name='admin', description='Administrator Role') user_role = models.Role(name='user', description='User Role') db.session.add(admin_role) db.session.add(user_role) db.session.commit() try: import tzlocal tz = tzlocal.get_localzone() logger.info("local timezone: %s" % tz) except: tz = None if not tz or tz.zone == "local": logger.error('Local timezone name could not be determined. Scheduler will display times in UTC for any log' 'messages. To resolve this set up /etc/timezone with correct time zone name.') tz = pytz.utc #in debug mode this is executed twice :( #DONT run flask in auto reload mode when testing this! scheduler = BackgroundScheduler(logger=sched_logger, timezone=tz) scheduler.add_job(notify.task, 'interval', seconds=config.SCAN_INTERVAL, max_instances=1, start_date=datetime.datetime.now(tz) + datetime.timedelta(seconds=2)) scheduler.start() sched = scheduler
def imgrecg(self, path, coord=[]): """识别车辆信息""" try: recg_path = None if coord != []: path = self.crop_img(path, coord) recg_path = path p_str_url = create_string_buffer(path) sz_result = create_string_buffer("\0" * 1024) ret = self.dll.doRecg(self.engineID, byref(p_str_url), byref(sz_result), 1024) res = sz_result.value.decode(encoding="gbk", errors="ignore") return json.loads(res) except Exception as e: logger.error(e) return None finally: try: if recg_path is not None: os.remove(recg_path) except Exception as e: logger.error(e)
def delete_user(id): if not g.user.is_admin(): logger.error("%s tried to access /delete-user/%d", g.user.email, id) abort(403) user = User.query.get_or_404(id) if user.is_admin(): flash("Cannot delete the admin") return redirect(url_for('user_list')) form = DeleteUserForm() if form.validate_on_submit(): if request.form['button'] == 'Cancel': return form.redirect(url_for('user_list')) logger.info("%s was deleted", user.email) db.session.delete(user) db.session.commit() flash("User deleted successfully") return redirect(url_for('user_list')) return render_template('admin_delete_user.html', Title = "Delete user", form = form, user = user)
def load_user_from_request(request): # slack login auth_key = (request.args.get('token') or request.form.get('token')) if auth_key: servictoken = ServiceToken.query.filter_by(key=auth_key).first() if servictoken: user_id = (request.args.get('user_id') or request.form.get('user_id')) user = request_user_slack(user_id) if user and user.is_allowed(): return user else: logger.error( "User ID %s is not in the database" % user_id) # try token login token = request.headers.get('Authorization') if token: token = Token.query.filter_by(token=token).first() if token: user = token.user if user.is_authenticated(): return user # finally, return None if both methods did not login the user return None
def get_pdf_url_status(pdf_url): worker = current_process() logger.info(u'{} checking pdf url: {}'.format(worker, pdf_url)) is_pdf = False http_status = None try: response = http_get( url=pdf_url.url, ask_slowly=True, stream=True, publisher=pdf_url.publisher, session_id=get_session_id() ) except Exception as e: logger.error(u"{} failed to get response: {}".format(worker, e.message)) else: with response: try: is_pdf = is_a_pdf_page(response, pdf_url.publisher) http_status = response.status_code except Exception as e: logger.error(u"{} failed reading response: {}".format(worker, e.message)) pdf_url.is_pdf = is_pdf pdf_url.http_status = http_status pdf_url.last_checked = datetime.utcnow() logger.info(u'{} updated pdf url: {}'.format(worker, pdf_url)) return pdf_url
def file_to_image(file): log.debug("Converting file to image") try: nparr = np.fromstring(file, np.uint8) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) return image except Exception as exc: log.error(exc) return None
def compute_similarity(embedding1, embedding2): log.debug("Calculating similarity.") try: sim = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2)) return sim except Exception as exc: log.error(exc) return -99
def _make_request_and_cache(self, url, timeout_sec): r = requests.get(url, timeout=timeout_sec) if r.status_code == 200: self.cache[url] = {'timestamp': time.time(), 'payload': r.text} return r.text else: logger.error("Error while making http request. code={}".format( r.status_code)) return None
def persist_auth_code(redis_connection, name: str, value: str, expire: int, db: int) -> List: with redis_connection.pipeline() as pipe: try: pipe.execute_command('SELECT', db) pipe.set(name=name, value=value, ex=expire) pipe_response = pipe.execute() return pipe_response except WatchError: logger.error(exc_info=True)
def _get_real_url(self): ret = Engine.get(self.raw_url) if not ret or ret.status_code != 200: logger.error(f"{__name__} 处理失败: {self.raw_url}") return self.raw_url ret = re.search(r"url:\s*'(http.+?)',", ret.text) if not ret: logger.error(f"{__name__} 处理失败: {self.raw_url}") return ret.group(1) # 真正的直链(m3u8)
def on_receive(self, payload): try: # logger.debug( # "SubscriberProcessorService.process_subscriber: " # f"processing subscriber: {payload['subscriber']}" # ) # start_time = time.time() redis_client = redis.Redis(connection_pool=REDIS_POOL, socket_timeout=1) cab = Cabinet(settings.CABINET_URL) cached_cabinet = CachedCabinet( cab, RedisEngine(redis_client, prefix="CABINET_CACHE", ttl=30)) general_settings = cached_cabinet.general() limit = general_settings["push_limit_per_token"] bid_interval = general_settings["token_bid_interval"] token = payload["subscriber"]["_id"] subscriber_pushes = redis_client.get( f"subscriber.pushes.count:{token}") try: subscriber_pushes = int(subscriber_pushes) except TypeError: subscriber_pushes = 0 # subscriber_pushes = (self.counter_service # .get_pushes_count(token)) has_quota = subscriber_pushes < limit last_bid_key = f"subscriber:{token}:last-bid-at" try: last_bid_time = int(redis_client.get(last_bid_key)) except TypeError: last_bid_time = None if last_bid_time: time_passed = time.time() - last_bid_time time_passed_enough = time_passed > (bid_interval * 60) logger.debug("SubscriberProcessorService.process_subscriber: " f"passed time since last bid {time_passed}") else: time_passed_enough = True if has_quota and time_passed_enough: send_to_ssp(payload) # self.queue.publish.call_async(payload) redis_client.set(last_bid_key, int(time.time()), ex=DAY_SECONDS) else: logger.debug("SubscriberProcessorService.process_subscriber: " f"for subscriber: {payload['subscriber']['_id']} " f"has_quota={has_quota} " f"time_passed_enough={time_passed_enough}") # finish_time = time.time() # logger.debug( # "SubscriberProcessorService.process_subscriber: " # "total execution time " # f"{(finish_time - start_time) * 1000}ms" # ) except Exception as e: logger.error(f"SubscriberProcessor: exception {e}")
def render_dashboard_page(uid, priv, inst_id): try: # get info serv_core_obj = db.session.query(ServerInstance).join( ServerCORE).filter(ServerInstance.inst_id == int(inst_id)).first() # first, make sure this operation is only allowed by its owner if serv_core_obj != None: if serv_core_obj.owner_id == uid: mc_version = serv_core_obj.ob_server_core.minecraft_version # get server properties and motd file_server_properties = os.path.join(serv_core_obj.inst_dir, "server.properties") motd_string = "" server_properties = {} if os.path.exists(file_server_properties): parser = KVParser(file_server_properties) server_properties = parser.conf_items motd_string = server_properties.get("motd") # LOGO src image_source = "" inst_dir = serv_core_obj.inst_dir logo_file_name = os.path.join(inst_dir, "server-icon.png") if os.path.exists(logo_file_name): image_source = "/server_inst/dashboard/logo_src/%s" % inst_id # ftp account name ftp_account_name = "" default_ftp_password = True ftp_obj = db.session.query(FTPAccount).filter( FTPAccount.inst_id == inst_id).first() if ftp_obj != None: ftp_account_name = ftp_obj.username default_ftp_password = ftp_obj.default_password properties_params = { "motd": motd_string, "image_source": image_source, "mc_version": mc_version, "listen_port": serv_core_obj.listening_port, "ftp_account_name": ftp_account_name, "default_ftp_password": default_ftp_password, "server_properties": server_properties } return rtn.success(properties_params) else: return rtn.error(404) else: return rtn.error(403) else: return rtn.error(500) except: logger.error(traceback.format_exc()) return rtn.error(500)
def run(self): if self.stopped is False: self.stopped = False if slack.rtm_connect(auto_reconnect=True): while self.stopped is False and slack.server.connected is True: for msg in slack.rtm_read(): received(msg) sleep(0.2) else: logger.error("SlackRTM connection failed")
def decorated_function(*args, **kwargs): try: scope = '_'.join([request.path[1:], request.method.lower()]) except Exception as e: logger.error(e) if 'all' in g.scope or scope in g.scope: pass else: return {'status': 405, 'error': 'Method Not Allowed'}, 405 return f(*args, **kwargs)
def healthcheck(): try: logger.info("Checking database connection") conn = db.engine.connect() test_sql(conn) except SQLAlchemyError: logger.error("Failed to connect to database") return server_error(500) else: return jsonify({'status': 'OK'})
async def select_labels(self): sql = 'select id,label from {0} order by label;'.format(self.table) try: async with self.conn.cursor() as cur: await cur.execute(sql) labels = await cur.fetchall() return labels except Exception as e: logger.error(e) return None
def api_get_all(db_name): try: columns = tables[db_name] rows = get_db_data(db_name) result = [dict(zip(columns, row)) for row in rows] return jsonify(result) except Exception as e: logger.error("Exception with api_get_all(): {}".format(e)) return "Error", 500
def _get_s3_object(bucket, key): try: response = s3.get_object(Bucket=bucket, Key=key) return response except Exception as e: logger.error( f'Error getting object {key} from bucket {bucket}. ' f'Make sure they exist and your bucket is in the same region as this function.' ) return None
def string_to_nparray(string): log.debug("Converting string to nparray") try: rpr = string.replace("(", "") rpr = rpr.replace(")", "") res = np.fromstring(rpr, dtype=float, sep=',') return res except Exception as exc: log.error(exc) return None
def report_error(api_key): if api_key != os.getenv("HEROKU_API_KEY"): error = u'wrong heroku API key {} in /admin/report-error/'.format( api_key) logger.error(error) return make_response(error, 403) handle_papertrail_alert(request) return jsonify({'error-data': request.form})
def send_custom_message(self, message): url = "/cgi-bin/message/custom/send" params = {"access_token": self.request.get_access_token()} try: response = self.request.request(url, params, message, 'POST') logger.info(str(response)) return response except WeixinException as e: logger.error(e) return {'errcode': e.errcode, 'message': e.errmsg or e.message}
def home(): """ Initial method, responsible to redner the web application """ logger.info('start render') try: return render_template('layout.html') except Exception as err: logger.error('ERROR' + str(err)) return {'message': err}, 500
def url_to_image(url): log.debug("Converting url to image") try: resp = urllib.request.urlopen(url) image = np.asarray(bytearray(resp.read()), dtype="uint8") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image except Exception as exc: log.error(exc) return None
def createMenu(): menu = get_menus_json() logger.info('[WEIXIN] menu=%s' % menu) try: result = weixin.weixin_helper.create_menu(menu) result = jsonify(result) return result except Exception as e: logger.error(e.message) return jsonify({'result': 255, 'errmsg': e.message})
def parse_params(self, url): row_params = parse.parse_qs(urlparse(url).query) check_result = self.check_params(row_params) if check_result: logger.error(check_result.get('error')) return check_result parsed_params = self.form_dict_for_query(row_params) return parsed_params
def parse_params(self, url): row_params = parse.parse_qs(urlparse(url).query) check_result = self.check_params(row_params) if check_result: logger.error(check_result.get('error')) return check_result parsed_params = {'quantity_in_stock': int(row_params.get('num')[0])} return parsed_params
async def delete(self, id): sql = "delete from {0} where id=%s".format(self.table) try: async with self.conn.cursor() as cur: count = await cur.execute(sql, (id, )) await self.conn.commit() return count except Exception as e: logger.error(e) return -1
def get_today_weather_info(city_name, lang, cur_timestamp): """basic function to get weather info for today""" transliterated_city = transliterate_name(city_name) try: weather_info = get_weather_info(transliterated_city, lang) except AttributeError as e: logger.error(f'Wrong city name\n{e}') return info[lang][0] else: weather_rest_info = get_extended_info(transliterated_city, 'today', lang) # type: Dict daypart_message = '' for i in range(1, 5): daypart_info = weather_rest_info["part" + str(i)] # type: Dict[str, str] daypart = daypart_info["weather_daypart"] daypart_temp = daypart_info["weather_daypart_temp"] daypart_cond = daypart_info["weather_daypart_condition"] daypart_cond_emoji = get_condition(daypart_cond, daypart) wind_speed_and_direction = daypart_info["wind_speed_and_direction"] if wind_speed_and_direction != info[lang][7]: wind_speed_and_direction = wind_speed_and_direction.split(', ')[0] daypart_message += f'{daypart.title()}: {daypart_temp}; {info[lang][2]}: {wind_speed_and_direction} ' \ f'{daypart_cond_emoji}\n\n' header = weather_info["header"] temp = weather_info["temperature"] wind_speed_and_direction = weather_info["wind_speed_and_direction"] humidity = weather_info["humidity"] cond = weather_info["condition"] feels_like = weather_info["feels_like"] daylight_hours = weather_info["daylight_hours"] sunrise = weather_info["sunrise"] sunset = weather_info["sunset"] day_time = get_day_part(cur_timestamp, sunrise, sunset) weather_cond = get_condition(cond, day_time) message_part1 = f'<i>{header}</i>\n\n' \ f'<b>{info[lang][1]}: {temp}°; {info[lang][3]}: {feels_like}\n' \ f'{info[lang][2]}: {wind_speed_and_direction}; {phenomenon_button_names["humidity"][lang]}:' \ f' {humidity}\n' \ f'{cond} {weather_cond}</b> \n\n' message_part2 = f'{info[lang][4]}: {daylight_hours}\n' \ f'{info[lang][5]}: {sunrise} - {sunset}\n' response_message = message_part1 + daypart_message + message_part2 return response_message
def check_newest_release(self): # use GitHub API # https://developer.github.com/v3/repos/releases/ if self.zhao == 0: _url = "https://api.github.com/repos/DemoHn/obsidian-panel/releases" req = Request(url=_url) try: resp = urlopen(req, timeout=15) release_info = resp.read().decode() r_json = json.loads(release_info) # GitHub itself has arranged all release by the order of publish time. # Thus, the first element must be the newest version release = r_json[0] release_model = { "version": release["tag_name"], "publish_date": datetime.strptime( release["published_at"], "%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%d"), "release_note": release["body"] } return release_model except: logger.error(traceback.format_exc()) return None elif self.zhao == 1: # use coding.net OpenAPI _url = "https://coding.net/api/user/DemoHn2016/project/obsidian-panel/git/tags" req = Request(url=_url) try: resp = urlopen(req, timeout=15) release_info = resp.read().decode() r_json = json.loads(release_info) # GitHub itself has arranged all release by the order of publish time. # Thus, the first element must be the newest version release = r_json["data"]["list"][0] created_timestamp = release["created_at"] created_time = datetime.fromtimestamp(created_timestamp / 1e3) release_model = { "version": release["name"], "publish_date": created_time.strftime("%Y-%m-%d"), "release_note": release["message"] } return release_model except: logger.error(traceback.format_exc()) return None else: return None
def startMonitor(self): """远程启动被测机器上的nmon,生成监控数据""" stdin, stdout, stderr = self.exec_command( "/usr/sbin/lsof|grep nmon|awk '{print $2}'|xargs kill -9") logger.info("kill -9 执行输出:" + stderr.read() + stdout.read()) stdin, stdout, stderr = self.exec_command( "export TERM=xterm&chmod a+x nmon&export NMON=dmn&./nmon -F %s_%s.nmon -t -s %s -c %s" % (self.name, self.missionid, self.frequency, self.count)) error_info = stderr.read() if error_info: logger.error(error_info + stdout.read())
def get_department_info(self, open_department_id): try: department = self._get( self.__opes_url + '/open-apis/contact/v1/department/info/get', {'open_department_id': open_department_id}) return department except Exception as ex: logger.error( "Feishu get department info fail! open_department_id={0} error by {1}" .format(open_department_id, ex)) raise FeishuException(ex)
def get_id_by_phone(phone): try: c.execute("SELECT * FROM users WHERE phone='{}'".format(phone)) result = c.fetchone() except conn.Error as e: logger.error("Database Error %s", e) return None if not result: logger.warning("There isn't any client with such phone %s", phone) return None return result[0]
def server_error(error=None): '''Handles the building and returning of a response in the case of an error''' logger.error(error, status=500) message = { 'status': 500, 'message': error, } resp = jsonify(message) resp.status_code = 500 return resp
def _validate(self, proxy): try: r = requests.get(self.target, proxies={"http": "http://%s" % proxy}, timeout=self.timeout) if r.status_code == requests.codes.ok and r.text == 'true': logger.info("Valid proxy: %r, latency:%s", proxy, r.elapsed) return r.elapsed except Exception as e: logger.error("Exception: %s", e) return None
def __init__(self, mac, host, username, password): self.mac = mac self.timeout = 4 self.command_timeout = 0.5 self.command_interval = 0.5 try: self.tn = telnetlib.Telnet(host, 23, self.timeout) self.tn.set_debuglevel(0) print('login') time.sleep(self.command_interval) self.tn.expect([ re.compile(b'name:'), ], self.timeout) print('input username') self.tn.write(username.encode('utf-8') + b'\n') time.sleep(self.command_interval) self.tn.expect([ b'password:'******'input password') self.tn.write(password.encode('utf-8') + b'\n\n\n') self.tn.expect([ b'More', ], self.command_timeout) time.sleep(self.command_interval) self.tn.write(b'\n') time.sleep(self.command_interval) self.tn.write(b'\n') time.sleep(self.command_interval) self.tn.expect([ b'>', ], self.command_timeout) time.sleep(self.command_interval) self.tn.write(b'enable\n') time.sleep(self.command_interval) self.tn.expect([ b'#', ], self.command_timeout) time.sleep(self.command_interval) self.tn.write(b'conf\n') self.tn.expect([ re.compile(b'\(config\)\#'), ], self.command_timeout) logger.info('Login device {}'.format(host)) except Exception as e: logger.error('Error: {}, telnet {} fail'.format(e, host))
async def update(self, id, fields: dict): sql = 'update {0} set {1} where id=%s;'.format( self.table, BaseModel.set_fields(fields)) try: async with self.conn.cursor() as cur: count = await cur.execute(sql, tuple(fields.values()) + (id, )) await self.conn.commit() return count except Exception as e: logger.error(e) return -1
def register_member(member_id, access_token): r = redis_member(member_id) if not r: logger.error('Can not access member redis') return False pipe = r.pipeline() pipe.set(member_id, to_utf8(access_token)) pipe.expire(member_id, app.EXPIRE_TIME) pipe.execute() return True
def user_list(): if not g.user.is_admin(): logger.error("%s tried to access the /users page", g.user.email) abort(403) users = User.query.order_by(User.name).all() logger.info("%s viewed the /users page", g.user.email) return render_template("admin_users.html", title = "Users", users = users)
def get_file(name, session): file_data = session.get(f'https://rossvyaz.ru/data/{name}') if not file_data.ok: logger.error( f'Unable to download {name}. Using the previous version instead.') return 0 copy(name, f'backup/{name}') logger.info(f'File {name} backed up.') with open(name, 'w') as file: file.write(file_data.text) logger.info(f'File {name} downloaded successfully.')
def server_error(error): """Handles the building and returning of a response in the case of an error""" logger.error(error, status=500) message = { 'status': 500, 'message': error, } resp = jsonify(message) resp.status_code = 500 return resp
def get(self, jgsj, hphm, kkdd): try: hbc = Hbc.query.filter(Hbc.date==jgsj[:10], Hbc.hphm==hphm, Hbc.jgsj==jgsj, Hbc.kkdd_id==kkdd).first() except Exception as e: logger.error(e) if hbc: return {'id': hbc.id, 'jgsj': str(hbc.jgsj), 'hphm': hbc.hphm, 'kkdd_id': hbc.kkdd_id, 'imgpath': hbc.imgpath}, 200 else: return {}, 200
def parse(self, soup): proxy_list = [] for tbody in soup.find_all("tbody"): for tr in tbody.find_all("tr"): try: td = tr.find_all("td") proxy_list.append(Proxy(td[0].string, td[1].string, 0)) except Exception as e: logger.error("Exception: %s", e) return proxy_list
def safefilename(filename): """ rename the file, support unicode :param filename: filename :return: new filename """ try: extname = filename.split('.')[1] except: logger.error('illegal filename') random_str = ''.join(random.sample(string.ascii_letters + string.digits, 16)) return '{}.{}'.format(random_str, extname)
def send_message(gcm_url, server_api_key, token, message): data = '{"to":"%s","data":%s}' % (token, message) r = requests.post(gcm_url, headers=_headers(server_api_key), data=to_utf8(data)) logger.debug(r.content) if r.status_code != 200: logger.error('Error to GCM Message') else: response = json.loads(r.content) if response['success'] == 1: logger.info('Success to GCM Message') else: logger.error('Failed to GCM Message')
def parse(self, soup): proxy_list = [] for tr in soup.find(id="ip_list").find_all("tr")[1:]: try: td = tr.find_all("td") proxy = Proxy(td[1].string, td[2].string, Proxy.anonymity.get(td[4].string, 0)) proxy_list.append(proxy) except Exception as e: logger.error("Exception: %s", e) return proxy_list
def get(self, date, hphm, kkdd): try: hbc = Hbc.query.filter(Hbc.date==date, Hbc.hphm==hphm, Hbc.kkdd_id.startswith(kkdd), Hbc.imgpath != '').first() except Exception as e: logger.error(e) if hbc: return {'id': hbc.id, 'jgsj': str(hbc.jgsj), 'hphm': hbc.hphm, 'kkdd_id': hbc.kkdd_id, 'imgpath': hbc.imgpath}, 200 else: return {}, 200
def verify_member(member_id, access_token): r = redis_member(member_id) if not r: logger.error('Can not access member redis') return False # matching access_token by member_id if access_token != r.get(member_id): logger.error('Mismatch access_token and member;{member_id:%d}' % (member_id)) return False # update expire time r.expire(member_id, app.EXPIRE_TIME) return True
def getData(url,token): '''!Fetch data from data source :param url: :return data: ''' try: request = urllib2.Request(url) request.add_header('Authorization', 'bearer '+token) logger.info('Fetching data from:'+url) data = urllib2.urlopen(request,timeout=20).read() except: logger.error('Failed fetch data from :' + url) return data
def get_uid(): g.uid = -1 g.scope = '' try: user = Users.query.filter_by(username=request.json.get('username', ''), banned=0).first() except Exception as e: logger.error(e) raise if user: if sha256_crypt.verify(request.json.get('password', ''), user.password): g.uid = user.id g.scope = user.scope return str(g.uid) return request.remote_addr
def verify(self): result = {} for level in config["anonymity"]: proxies = self.classify().get(level, []) logger.info("Validate [%s], total: %s", Proxy.anonymity.get(level), len(proxies)) result[level] = self.validator.run(proxies) logger.info("Validate [%s], valid: %s", Proxy.anonymity.get(level), len(result[level])) if config["save"]: try: self.save(config["save"], result) except Exception as e: logger.error("Exception: %s", e)
def get(self, user_name): user = session.query(User).filter(User.name == user_name).first() if user is None: logger.error('GET User "{}" not found in DB'.format(user_name)) return {'error': 'Specified user does not exist'}, 404 if user != g.user: logger.error('GET Auth user {} does not match specified user {}'.format(user, g.user)) return {'error': 'Authenticated user does not match specified user'}, 403 assert isinstance(user, User) logger.info('GET User {}; username {}'.format(user, user_name)) # Update when User last seen user.last_seen = datetime.utcnow() session.add(user) session.commit() return UserAPI.user_info(user)
def merge(response): try: db.session.merge(response) db.session.commit() except IntegrityError as e: logger.error("Integrity error in database. Rolling back commit", error=e) db.session.rollback() raise e except SQLAlchemyError as e: logger.error("Unable to save response", error=e) db.session.rollback() raise e else: logger.info("Response saved", tx_id=response.tx_id)
def update_server(): logger.info('Update server information') r = redis_monitor() if not r: logger.error('Can not connect monitor redis') return name = 'PUSH:' + app.SERVICE_NAME info = {'ip': app.HOST, 'port': app.PORT} pipe = r.pipeline() pipe.zadd('AVAIL:PUSH', app.SERVICE_NAME, 0) pipe.hmset(name, info) pipe.expire(name, app.UPDATE_INTERVAL) pipe.execute()