def query_latest_person_pass_info(min_id=1, limit=''): db_session = None try: db_session = DBConnection().connect() if db_session is not None: query = db_session\ .query(PersonLatest.pic_name.label('pic_path'), PersonLatest.pid.label('id'), PersonLatest.collect_time.label('collect_time'), PersonLatest.name.label('person_name'), InfoPortal.portal_id.label('portal_id'))\ .join(InfoPortal, InfoPortal.portal_id == PersonLatest.device_code)\ .filter(PersonLatest.pid > min_id)\ .order_by(PersonLatest.pid.desc()) if isinstance(limit, int): query = query.limit(limit) return query.all() except: logging.getLogger(GlobalInfo.logger_main).error(traceback.format_exc()) return None finally: if db_session: db_session.close() return None
def mutate(self, args, context, info): exp_data = args.get('experiment_data') sample_group_data = exp_data.get('sample_group_data') try: experiment = ExperimentModel( exp_data.get('name'), exp_data.get('description'), exp_data.get('scientist'), exp_data.get('group_name'), exp_data.get('start_date'), exp_data.get('start_of_experimentation')) db.session.add(experiment) db.session.flush() for group_data in sample_group_data: group = SampleGroupModel.fromdict(group_data, experiment.id) db.session.add(group) db.session.flush() for plant_data in group_data.get('plants'): plant = PlantModel(plant_data.get('index'), plant_data.get('name'), group.id) db.session.add(plant) except DBAPIError as err: #TODO add unique constraint checks logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") db.session.commit() return ConstructExperiment( id=to_global_id('Experiment', experiment.id))
def mutate(self, args, context, info): identity = get_jwt_identity() proj_data = args.get('project_data') _, exp_id = from_global_id(proj_data.pop('id')) with db.session.no_autoflush: try: experiment = db.session.query(ExperimentModel).get(exp_id) if experiment.scientist == identity.get( 'username') or is_admin(identity): EditProject._edit_experiment(proj_data, experiment) else: raise ForbiddenActionError( "Unable to edit experiment {} by user {}. Insufficient privileges." .format(experiment.name, identity.get('username')), identity.get('username')) # User is not allowed to edit this project db.session.commit() return EditProject(experiment=experiment) except IntegrityError as err: db.session.rollback() #TODO add experiment name constraint if err.orig.diag.constraint_name == u'uq_sample_groups_treatment_experiment_id': raise ConstraintViolationError( 'It is not allowed to have two sample groups with the same ' 'treatment.') elif err.orig.diag.constraint_name == u'sample_group_name_experiment_id_key': raise ConstraintViolationError( 'It is not allowed to have two sample groups with the same ' 'name.') except DBAPIError as err: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured")
def _get_next_passage_for_route_point(self, route_point, count=None, from_dt=None): url = self._make_url(route_point, count, from_dt) if not url: return None r = self._call_synthese(url) if not r: return None if r.status_code != 200: # TODO better error handling, the response might be in 200 but in error logging.getLogger(__name__).error( 'Synthese RT service unavailable, impossible to query : {}'. format(r.url)) return None logging.getLogger(__name__).debug("synthese response: {}".format( r.text)) stop_point_id = str(route_point.fetch_stop_id(self.object_id_tag)) route_id = str(route_point.fetch_route_id(self.object_id_tag)) route_point = SyntheseRoutePoint(route_id, stop_point_id) m = self._get_synthese_passages(r.content) return m.get( route_point) # if there is nothing from synthese, we keep the base
def _get_passages(self, xml, route_point): ns = {'siri': 'http://www.siri.org.uk/siri'} try: root = et.fromstring(xml) except et.ParseError as e: logging.getLogger(__name__).exception("invalid xml") raise RealtimeProxyError('invalid xml') stop = route_point.fetch_stop_id(self.object_id_tag) line = route_point.fetch_line_id(self.object_id_tag) route = route_point.fetch_route_id(self.object_id_tag) next_passages = [] for visit in root.findall('.//siri:MonitoredStopVisit', ns): cur_stop = visit.find('.//siri:StopPointRef', ns).text if stop != cur_stop: continue cur_line = visit.find('.//siri:LineRef', ns).text if line != cur_line: continue cur_route = visit.find('.//siri:DirectionName', ns).text if route != cur_route: continue cur_destination = visit.find('.//siri:DestinationName', ns).text cur_dt = visit.find('.//siri:ExpectedDepartureTime', ns).text cur_dt = aniso8601.parse_datetime(cur_dt) next_passages.append(RealTimePassage(cur_dt, cur_destination)) return next_passages
def get_stop_point(self, line_uri, code_key, code_value): req = request_pb2.Request() req.requested_api = type_pb2.PTREFERENTIAL req.ptref.requested_type = type_pb2.STOP_POINT req.ptref.count = 1 req.ptref.start_page = 0 req.ptref.depth = 1 req.ptref.filter = 'stop_point.has_code("{code_key}", "{code_value}")'.format( code_key=code_key, code_value=code_value) if line_uri: req.ptref.filter = req.ptref.filter + ' and line.uri="{}"'.format( line_uri) result = self.instance.send_and_receive(req) if len(result.stop_points) == 0: logging.getLogger(__name__).info( 'PtRef, Unable to find stop_point with filter {}'.format( req.ptref.filter)) return None if len(result.stop_points) == 1: return result.stop_points[0] logging.getLogger(__name__).info( 'PtRef, Multiple stop_points found with filter {}'.format( req.ptref.filter)) return None
def _get_passages(self, route_point, resp): logging.getLogger(__name__).debug('sirilite response: {}'.format(resp)) line_code = route_point.fetch_line_id(self.object_id_tag) monitored_stop_visit = resp.get('siri', {})\ .get('serviceDelivery', {})\ .get('stopMonitoringDelivery', {})\ .get('monitoredStopVisit', []) schedules = (vj for vj in monitored_stop_visit if vj.get('monitoredVehicleJourney', {}).get( 'lineRef', {}).get('value', '') == line_code) #TODO: we should use the destination to find the correct route if schedules: next_passages = [] for next_expected_st in schedules: # for the moment we handle only the NextStop and the direction dt = self._get_dt(next_expected_st['monitoredVehicleJourney'] ['monitoredCall']['expectedDepartureTime']) direction = next_expected_st.get('destinationName', {}).get('value') is_real_time = True next_passage = RealTimePassage(dt, direction, is_real_time) next_passages.append(next_passage) return next_passages else: return None
def init_cors(app): """Allow intial pre-flight "OPTIONS" request globally on app.""" logging.getLogger('flask_cors').addHandler(flask.logging.default_handler) # Uncomment to debug CORS # logging.getLogger('flask_cors').setLevel(logging.DEBUG) CORS(app)
def _get_passages(self, timeo_resp, current_dt, line_uri=None): logging.getLogger(__name__).debug( 'timeo response: {}'.format(timeo_resp), extra={'rt_system_id': unicode(self.rt_system_id)}) st_responses = timeo_resp.get('StopTimesResponse') # by construction there should be only one StopTimesResponse if not st_responses or len(st_responses) != 1: logging.getLogger(__name__).warning( 'invalid timeo response: {}'.format(timeo_resp), extra={'rt_system_id': unicode(self.rt_system_id)}) raise RealtimeProxyError('invalid response') next_st = st_responses[0]['NextStopTimesMessage'] next_passages = [] for next_expected_st in next_st.get('NextExpectedStopTime', []): # for the moment we handle only the NextStop and the direction dt = self._get_dt(next_expected_st['NextStop'], current_dt) direction = self._get_direction_name( line_uri=line_uri, object_code=next_expected_st.get('Terminus'), default_value=next_expected_st.get('Destination')) next_passage = RealTimePassage(dt, direction) next_passages.append(next_passage) return next_passages
def _make_url(self, route_point, count=None, from_dt=None): """ The url returns something like a departure on a stop point """ stop_id = route_point.fetch_stop_id(self.object_id_tag) if not stop_id: # one a the id is missing, we'll not find any realtime logging.getLogger(__name__).debug( 'missing realtime id for {obj}: stop code={s}'.format( obj=route_point, s=stop_id)) self.record_internal_failure('missing id') return None count_param = '&rn={c}'.format(c=count) if count else '' # if a custom datetime is provided we give it to timeo dt_param = '&date={dt}'.format(dt=self._timestamp_to_date( from_dt).strftime('%Y-%m-%d %H:%M')) if from_dt else '' url = "{base_url}?SERVICE=tdg&roid={stop_id}{count}{date}".format( base_url=self.service_url, stop_id=stop_id, count=count_param, date=dt_param) return url
def _get_value(self, item, xpath, val): value = item.find(xpath) if value is None: logging.getLogger(__name__).debug( "Path not found: {path}".format(path=xpath)) return None return value.get(val)
def mutate(self, args, context, info): plant_id = args.get('plant_id') _, plant_db_id = from_global_id(plant_id) plant = db.session.query(PlantModel).get(plant_db_id) experiment_id = plant.sample_group.experiment_id timestamp, created = TimestampModel.get_or_create(experiment_id) snapshot = SnapshotModel(plant_id=plant_db_id, timestamp_id=timestamp.id, camera_position=args.get('camera_position'), measurement_tool=args.get('measurement_tool'), phenobox_id=args.get('phenobox_id')) try: db.session.add(snapshot) db.session.flush() except IntegrityError as err: db.session.rollback() if err.orig.diag.constraint_name == u'uq_snapshot_plant_id_timestamp_id': raise ConflictingDataError( 'There already exists a snapshot for this plant and timestamp' ) except DBAPIError as err: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") db.session.commit() return CreateSnapshot(id=to_global_id('Snapshot', snapshot.id), timestamp_id=to_global_id( 'Timestamp', timestamp.id), new_timestamp=created)
def _get_next_passage_for_route_point(self, route_point, count, from_dt, current_dt, duration=None): stop = route_point.fetch_stop_id(self.object_id_tag) request = self._make_request(monitoring_ref=stop, dt=from_dt, count=count) if not request: return None siri_response = self._call_siri(request) if not siri_response or siri_response.status_code != 200: raise RealtimeProxyError('invalid response') logging.getLogger(__name__).debug('siri for {}: {}'.format( stop, siri_response.text)) ns = {'siri': 'http://www.siri.org.uk/siri'} tree = None try: tree = et.fromstring(siri_response.content) except et.ParseError: logging.getLogger(__name__).exception("invalid xml") raise RealtimeProxyError('invalid xml') self._validate_response_or_raise(tree, ns) return self._get_passages(tree, ns, route_point)
def mutate(self, args, context, info): index = args.get('index') name = args.get('name') sample_group_id = args.get('sample_group_id') plant = PlantModel(index, name, sample_group_id) try: db.session.add(plant) db.session.flush() except IntegrityError as err: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") except DBAPIError: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") db.session.commit() return CreatePlant(id=plant.id, index=plant.index, name=plant.name, sample_group_id=plant.sample_group_id)
def _make_url(self, route_point, count=None, from_dt=None): """ the route point identifier is set with the StopDescription argument this argument is split in 3 arguments (given between '?' and ';' symbol....) * StopTimeoCode: timeo code for the stop * LineTimeoCode: timeo code for the line * Way: 'A' if the route is forward, 'R' if it is backward 2 additionals args are needed in this StopDescription ...: * NextStopTimeNumber: the number of next departure we want * StopTimeType: if we want base schedule data ('TH') or real time one ('TR') Note: since there are some strange symbol ('?' and ';') in the url we can't use param as dict in requests """ base_params = '&'.join( [k + '=' + v for k, v in self.service_args.items()]) stop = route_point.fetch_stop_id(self.object_id_tag) line = route_point.fetch_line_id(self.object_id_tag) route = route_point.fetch_route_id(self.object_id_tag) if not all((stop, line, route)): # one a the id is missing, we'll not find any realtime logging.getLogger(__name__).debug( 'missing realtime id for {obj}: ' 'stop code={s}, line code={l}, route code={r}'.format( obj=route_point, s=stop, l=line, r=route), extra={'rt_system_id': self.rt_system_id}) self.record_internal_failure('missing id') return None # timeo can only handle items_per_schedule if it's < 5 count = min(count or 5, 5) # if no value defined we ask for 5 passages # if a custom datetime is provided we give it to timeo dt_param = '&NextStopReferenceTime={dt}'\ .format(dt=self._timestamp_to_date(from_dt).strftime('%Y-%m-%dT%H:%M:%S')) \ if from_dt else '' stop_id_url = ("StopDescription=?" "StopTimeoCode={stop}" "&LineTimeoCode={line}" "&Way={route}" "&NextStopTimeNumber={count}" "&StopTimeType={data_freshness}{dt};").format( stop=stop, line=line, route=route, count=count, data_freshness='TR', dt=dt_param) url = "{base_url}?{base_params}&{stop_id}".format( base_url=self.service_url, base_params=base_params, stop_id=stop_id_url) return url
def _build(xml): try: root = et.fromstring(xml) except et.ParseError as e: logging.getLogger(__name__).error("invalid xml: {}".format(e)) raise for xml_journey in root.findall('journey'): yield xml_journey
def init_portals(): ips = SerializeUtils.get(MyConstant.portal_ips_serialize_data_key) if ips is None: logging.getLogger( GlobalInfo.logger_main).info('no ip to init zmq conn') return ips = set(ips) for ip in ips: PortalMsgManager.add_new_conn(ip)
def add_new_conn(ip): if Regular.verify_ip(ip): sms = SubsMsgServer(ip, ZMQPort.SUBS) sms.start() if sms.isAlive(): ZMQConns().put(ip, sms) else: logging.getLogger(GlobalInfo.logger_main).info( 'ip: ' + ip + ' is unlawful can not connect to server')
def _fun_execute_time(*args, **kwargs): begin_time = datetime.now() re = func(*args, **kwargs) end_time = datetime.now() sub_time = (end_time - begin_time) sub_ms = sub_time.seconds * 1000 + sub_time.microseconds / 1000 if GlobalInfo.is_debug: logging.getLogger(GlobalInfo.logger_main)\ .debug(func.func_name + ' execute time is:' + str(sub_ms) + ' ms') return re
def _get_next_passage_for_route_point(self, route_point, count, from_dt, current_dt, duration=None): stop = route_point.fetch_stop_id(self.object_id_tag) request = self._make_request(monitoring_ref=stop, dt=from_dt, count=count) if not request: return None siri_response = self._call_siri(request) if not siri_response or siri_response.status_code != 200: raise RealtimeProxyError('invalid response') logging.getLogger(__name__).debug('siri for {}: {}'.format(stop, siri_response.text)) return self._get_passages(siri_response.content, route_point)
def mutate(self, args, context, info): ql_id = args.get('id') _, id = from_global_id(ql_id) try: db.session.delete(db.session.query(PostprocessModel).get(id)) db.session.commit() except DBAPIError: logging.getLogger(__name__).exception("An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") return DeletePostprocess(id=ql_id)
def _call(self, url): """ http call to sytralRT """ logging.getLogger(__name__).debug( 'systralRT RT service , call url : {}'.format(url), extra={'rt_system_id': unicode(self.rt_system_id)}, ) try: return self.breaker.call(requests.get, url, timeout=self.timeout) except pybreaker.CircuitBreakerError as e: logging.getLogger(__name__).error( 'systralRT service dead, using base ' 'schedule (error: {}'.format(e), extra={'rt_system_id': unicode(self.rt_system_id)}, ) raise RealtimeProxyError('circuit breaker open') except requests.Timeout as t: logging.getLogger(__name__).error( 'systralRT service timeout, using base ' 'schedule (error: {}'.format(t), extra={'rt_system_id': unicode(self.rt_system_id)}, ) raise RealtimeProxyError('timeout') except Exception as e: logging.getLogger(__name__).exception( 'systralRT RT error, using base schedule', extra={'rt_system_id': unicode(self.rt_system_id)}) raise RealtimeProxyError(str(e))
def _call_cleverage(self, url): """ http call to cleverage """ logging.getLogger(__name__).debug( 'Cleverage RT service , call url : {}'.format(url)) try: return self.breaker.call(requests.get, url, timeout=self.timeout, headers=self.service_args) except pybreaker.CircuitBreakerError as e: logging.getLogger(__name__).error( 'Cleverage RT service dead, using base ' 'schedule (error: {}'.format(e)) raise RealtimeProxyError('circuit breaker open') except requests.Timeout as t: logging.getLogger(__name__).error( 'Cleverage RT service timeout, using base ' 'schedule (error: {}'.format(t)) raise RealtimeProxyError('timeout') except Exception as e: logging.getLogger(__name__).exception( 'Cleverage RT error, using base schedule') raise RealtimeProxyError(str(e))
def mutate(self, args, context, info): try: timestamp_id = args.get('timestamp_id') _, timestamp_db_id = from_global_id(timestamp_id) timestamp = db.session.query(TimestampModel).get(timestamp_db_id) setattr(timestamp, 'completed', True) db.session.flush() db.session.commit() return CompleteTimestamp(id=timestamp_id) except DBAPIError: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured")
def setup_log(config_name): # 设置日志的记录等级 logging.basicConfig(level=config[config_name].LOG_LEVEL) # 调试debug级 # 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限 file_log_handler = RotatingFileHandler("logs/log", maxBytes=1024 * 1024 * 100, backupCount=10) # 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息 formatter = logging.Formatter( '%(levelname)s %(filename)s:%(lineno)d %(message)s') # 为刚创建的日志记录器设置日志记录格式 file_log_handler.setFormatter(formatter) # 为全局的日志工具对象(flask app使用的)添加日志记录器 logging.getLogger().addHandler(file_log_handler)
def login_log(level): # 设置⽇志的记录等级 logging.basicConfig(level=level) # 调试debug级 # 创建⽇志记录器,指明⽇志保存的路径、每个⽇志⽂件的最⼤⼤⼩、保存的⽇志⽂件个数上限 file_log_handler = RotatingFileHandler("logs/log", maxBytes=1024 * 1024 * 100, backupCount=10) # 创建⽇志记录的格式 ⽇志等级 输⼊⽇志信息的⽂件名 ⾏数 ⽇志信息 formatter = logging.Formatter( '%(levelname)s %(filename)s:%(lineno)d %(message)s') # 为刚创建的⽇志记录器设置⽇志记录格式 file_log_handler.setFormatter(formatter) # 为全局的⽇志⼯具对象(flask app使⽤的)添加⽇志记录器 logging.getLogger().addHandler(file_log_handler)
def _call_synthese(self, url): """ http call to synthese """ try: if not self.rate_limiter.acquire(self.rt_system_id, block=False): raise RealtimeProxyError('maximum rate reached') return self.breaker.call(requests.get, url, timeout=self.timeout) except pybreaker.CircuitBreakerError as e: logging.getLogger(__name__).error( 'Synthese RT service dead, using base ' 'schedule (error: {}'.format(e)) raise RealtimeProxyError('circuit breaker open') except requests.Timeout as t: logging.getLogger(__name__).error( 'Synthese RT service timeout, using base ' 'schedule (error: {}'.format(t)) raise RealtimeProxyError('timeout') except redis.ConnectionError: logging.getLogger(__name__).exception( 'there is an error with Redis') raise RealtimeProxyError('redis error') except Exception as e: logging.getLogger(__name__).exception( 'Synthese RT error, using base schedule') raise RealtimeProxyError(str(e))
def _call_siri(self, request): encoded_request = request.encode('utf-8', 'backslashreplace') headers = { "Content-Type": "text/xml; charset=UTF-8", "Content-Length": len(encoded_request) } logging.getLogger(__name__).debug( 'siri RT service, post at {}: {}'.format(self.service_url, request)) try: return self.breaker.call( requests.post, url=self.service_url, headers=headers, data=encoded_request, verify=False, timeout=self.timeout, ) except pybreaker.CircuitBreakerError as e: logging.getLogger(__name__).error( 'siri RT service dead, using base ' 'schedule (error: {}'.format(e)) raise RealtimeProxyError('circuit breaker open') except requests.Timeout as t: logging.getLogger(__name__).error( 'siri RT service timeout, using base ' 'schedule (error: {}'.format(t)) raise RealtimeProxyError('timeout') except Exception as e: logging.getLogger(__name__).exception( 'siri RT error, using base schedule') raise RealtimeProxyError(str(e))
def mutate(self, args, context, info): ql_id = args.get('id') _, db_id = from_global_id(ql_id) snapshot = db.session.query(SnapshotModel).get(db_id) if snapshot is None: return # Exception to indicate this entry does not exist try: snapshot.purge() except DBAPIError as err: logging.getLogger(__name__).exception( "An unexpected DB error occured") db.session.rollback() raise UnknownDataError("An unexpected DB error occured") return DeleteSnapshot(id=ql_id)
def _get_next_passage_for_route_point(self, route_point, count=None, from_dt=None, current_dt=None): url = self._make_url(route_point) if not url: return None r = self._call_cleverage(url) if not r: return None if r.status_code != 200: # TODO better error handling, the response might be in 200 but in error logging.getLogger(__name__).error('Cleverage RT service unavailable, impossible to query : {}' .format(r.url)) raise RealtimeProxyError('non 200 response') return self._get_passages(route_point, r.json())
def _apply_settings(self): try: self._settings.update({ "host": self.cfg.get("server.host", check_type=str), "port": self.cfg.get("server.port", default=9000, check_type=int), "debug": self.cfg.get("server.debug.enabled", default=False, check_type=bool), "external_debug": self.cfg.get("server.debug.external_debug", default=False, check_type=bool), "debug_level": self.cfg.get("server.debug.debug_level", default=0, check_type=int), "enable_jsonp": self.cfg.get("server.api.enable_jsonp", default=False, check_type=bool), "api_enabled": self.cfg.get("server.api.enabled", default=True, check_type=bool), "static_enabled": self.cfg.get("server.static.enabled", default=False, check_type=bool), "static_path": self.cfg.get("server.static.path", default="", check_type=str), "static_index": self.cfg.get("server.static.index", default="", check_type=str), "storage_api_enabled": self.cfg.get("server.storage-api.enabled", default=False, check_type=bool), "storage_api_secret": self.cfg.get("server.storage-api.secret", default="", check_type=str), "storage_module": self.cfg.get("server.storage-api.storage-module", default="storage", check_type=str), "storage_api_check_token": self.cfg.get("server.storage-api.check_token", default=True, check_type=bool), "endpoints": { "api": self.cfg.get("server.api.endpoint", default="/api", check_type=str), "static": self.cfg.get("server.static.endpoint", default="/", check_type=str), "storage": self.cfg.get("server.storage-api.endpoint", default="/storage", check_type=str) } }) if self._settings["endpoints"]["storage"] == "": self._settings["endpoints"]["storage"] = "/storage" # ToDo: Check endpoints to be not math one location if self._settings["endpoints"]["api"][-1:] == "/": self._settings["endpoints"]["api"] = self._settings["endpoints"]["api"][:-1] # set highest log level for flask to suppress info messages if not self.cfg.get("logging.enabled", default=False, check_type=bool): from flask import logging as flask_logging alogger.setLogLevel(flask_logging.getLogger('werkzeug'), "critiacal") else: from flask import logging as flask_logging not self.cfg.get("logging.url_log", default=True, check_type=bool) and alogger.setLogLevel(flask_logging.getLogger('werkzeug'), "ERROR") # for debugging we could ignore json transforms and rest notation if self._settings["debug"] and self._settings["debug_level"] >= 100: self._settings["output"] = "string" if self._settings["output"] != "json": # disable jsonp if base filter is not json self._settings["enable_jsonp"] = False except KeyError: self._log.warning("Server settings not found (%s), use default ones", KeyError) except ValueError as err: self._log.error("Was passed unknown or wrong parameters. Please check configuration items, shutting down.")
import simplejson import time from Lib import queue import threading from datetime import timedelta import time import sqlite3 from flask import Flask, jsonify, g, redirect, request, url_for, logging from flask import make_response, request, current_app from flask.ext.cors import CORS from functools import update_wrapper app = Flask(__name__) CORS(app) logging.getLogger('flask_cors').level = logging.DEBUG try: unicode = unicode except NameError: # 'unicode' is undefined, must be Python 3 str = str unicode = str bytes = bytes basestring = (str,bytes) else: # 'unicode' exists, must be Python 2 str = str unicode = unicode bytes = str basestring = basestring
def handle_error(self, exc, data): logger = logging.getLogger('logger') logger.error(exc.messages)
from flask import Flask, request, render_template, send_file, logging, redirect from flask.ext.socketio import SocketIO, emit from flask.ext.uploads import UploadSet, configure_uploads import os import services app = Flask(__name__) app.config['SECRET_KEY'] = 'secret' DEFAULT_PORT = 5000 socketio = SocketIO(app) log = logging.getLogger('run') log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler()) photos = UploadSet() photos.file_allowed = lambda a, b: True app.config['UPLOADS_DEFAULT_DEST'] = '/tmp' configure_uploads(app, photos) @app.route('/') def index_demo(): return render_template('app.html', label_names=','.join(sorted(services.get_labels().keys()))) @app.route('/admin') def admin(): keys = services.get_data_keys() return render_template('admin.html', data_keys=keys, labels=services.get_labels())
from flask import logging from flask.globals import request class BaseRouter(object): def __init__(self, app=None): """ :type app: flask.app.Flask | flask.blueprints.Blueprint """ self.init_app(app) def init_app(self, app): self.app = app LOGGER = logging.getLogger("restframework.router") class DefaultRouter(BaseRouter): """ You should use this class for registering Resource/ModelResource classes. Example:: >>> router = DefaultRouter(app) >>> router.register("/test", ResourceCls, "test") For each register call (url, viewCls, basename) It will add 2 routing rules: * url with methods from viewCls.get_allowed_methods() * url + "/<id>" with methods from viewCls.get_allowed_object_methods()
from util import * import os from decimal import Decimal # mosquitto broker connection -- drop import mosquitto from subprocess import PIPE, STDOUT, Popen # it's Debug... import from flask.ext.login import AnonymousUserMixin from flask import logging import time log = logging.getLogger('werkzeug') #log = logging.getLogger() count = 0 # Create application app = Flask(__name__) # Create dummy secrey key so we can use sessions app.config['SECRET_KEY'] = '123456790' # MongoDB settings app.config['MONGODB_SETTINGS'] = {'DB': 'am2v'} db = MongoEngine() db.init_app(app) # Hum~~ MongoDB another tools
from wsgi.rr_people.commenting.connection import CommentHandler from wsgi.rr_people.he_manage import HumanOrchestra from wsgi.rr_people.human import HumanConfiguration from wsgi.rr_people.posting.posts import PostsStorage, CNT_NOISE, EVERY from wsgi.rr_people.posting.posts_sequence import PostsSequenceStore, PostsSequenceHandler from wake_up.views import wake_up_app from rr_lib.users.views import users_app, usersHandler __author__ = '4ikist' import sys reload(sys) sys.setdefaultencoding('utf-8') log = logging.getLogger("web") cur_dir = os.path.dirname(__file__) app = Flask("Humans", template_folder=cur_dir + "/templates", static_folder=cur_dir + "/static") app.secret_key = 'foo bar baz' app.config['SESSION_TYPE'] = 'filesystem' app.register_blueprint(wake_up_app, url_prefix="/wake_up") app.register_blueprint(users_app, url_prefix="/u") app.jinja_env.filters["tst_to_dt"] = tst_to_dt app.jinja_env.globals.update(array_to_string=array_to_string) login_manager = LoginManager() login_manager.init_app(app)
def __init__(self): super(Base, self).__init__() self.db = MongoBase(config) self.redis = RedisBase(config) self.ERR = self._error_msg self.logger = logging.getLogger(__name__)
# from flask_login import LoginManager from flask_restless import APIManager from flask_sqlalchemy import SQLAlchemy from flask import logging __author__ = 'sharp' db = SQLAlchemy() restless = APIManager(app=None, flask_sqlalchemy_db=db) logger = logging.getLogger() # login_manager = LoginManager()
# coding=utf-8 from lakers.models import User from lakers import db from lakers.common import utils from flask import logging logger = logging.getLogger("user") class UserCoreService(object): def get_by_id(self, id): user = User.query.filter_by(id=id).first() return user def get_by_name(self, username): user = User.query.filter_by(username=username).first() return user def get_by_email(self, email): user = User.query.filter_by(email=email).first() return user def add(self, form_data): user = User(form_data['username'], form_data['first_name'], form_data['last_name'], utils.create_password(form_data['password']), form_data['email']) try: db.session.add(user) db.session.commit() return True
""" The script re-parses old sqlite database of the Fall 2014 format and fills conditions according to the run """ import argparse import sqlite3 import sys from flask import logging import rcdb.model from rcdb import ConfigurationProvider from rcdb import coda_parser from rcdb.file_archiver import get_file_sha256, get_string_sha256 import xml.etree.ElementTree as ET # setup logger log = logging.getLogger('rcdb') # create run configuration standard logger log.addHandler(logging.StreamHandler(sys.stdout)) # add console output for logger log.setLevel(logging.DEBUG) if __name__ == "__main__": # hello print ("This program takes sqlite file of RCDB with data taken prior 2015" \ " and add to a database with new format") # parse arguments parser = argparse.ArgumentParser() parser.add_argument("in_sqlite_file", help="Input SQLite file") parser.add_argument("out_con_string", help="Connection string to empty output database. Example: sqlite:////home/john/out.db") args = parser.parse_args()
import random import string from multiprocessing import Process, Lock import requests import time from flask import logging from wsgi.db import DBHandler from wsgi.properties import wake_up_mongo_uri, wake_up_mongo_db_name log = logging.getLogger("wake_up") class WakeUpStorage(DBHandler): def __init__(self, name="?"): super(WakeUpStorage, self).__init__(name="wu %s"%name, uri=wake_up_mongo_uri, db_name=wake_up_mongo_db_name) collections = self.db.collection_names(include_system_collections=False) if "wake_up" not in collections: self.urls = self.db.create_collection("wake_up") self.urls.create_index("url_hash", unique=True) else: self.urls = self.db.get_collection("wake_up") def get_urls(self): return map(lambda x: x.get("url"), self.urls.find({}, projection={'_id': False, "url_hash": False})) def add_url(self, url): hash_url = hash(url) found = self.urls.find_one({"$or":[{"url_hash": hash_url}, {"url": url}]}) if not found:
from flask import abort, render_template from flask.logging import getLogger import httplib from logging import StreamHandler logger = getLogger('fetching') logger.addHandler(StreamHandler()) def http_get(server, path): conn = httplib.HTTPConnection(server) conn.request('GET', path) response = conn.getresponse() if response.status != 200: conn.close() error = '%s returned %d (%s)!' % (server + path, response.status, response.reason) logger.error(error) raise FetcherError(error) r = response.read() conn.close() return r class FetcherError(Exception): pass class BaseOGFetcher(object): def fetch(self):
import fetchers from fetchers.BaseFetcher import BaseOGFetcher, http_get import flask from flask.logging import getLogger import httplib from logging import StreamHandler import re import simplejson as json from urlparse import urlparse REDDIT_SERVER = 'www.reddit.com' REDDIT_URI = 'http://' + REDDIT_SERVER PROXY_URI = 'http://ogproxy.herokuapp.com' logger = getLogger('reddit-api') logger.addHandler(StreamHandler()) @fetchers.cache.memoize(timeout=1800) def cached_http_get(server, path): return http_get(server, path) def is_image(uri): return uri and uri.endswith(('jpg', 'png', 'gif', 'bmp')) def is_imgur_single(uri): if not uri: return False parsed = urlparse(uri) if not (parsed.netloc == 'imgur.com' or parsed.netloc.endswith('.imgur.com')): return False
import flask.logging import logging import argparse import tempfile import requests import subprocess import webbrowser import gdata.youtube import gdata.youtube.service from mutagen.mp3 import MP3 from threading import Thread, Lock DEFAULT_API_KEY_PATH = "apikeys.yml" log = logging.getLogger(__name__) log.addHandler(logging.StreamHandler()) log.setLevel(logging.INFO) logging.getLogger('requests').setLevel(logging.DEBUG) class MP3ToYoutube(object): def __init__(self, api_key_path, token, description, category, keywords, image, private, **kwargs): self.yt = gdata.youtube.service.YouTubeService() try: self.keys = yaml.load(open(api_key_path, 'r')) except IOError: log.critical("Could not open API key file!") if api_key_path == DEFAULT_API_KEY_PATH:
from flask import logging from .records import add_auto_ids, parse_timestamps, validate_record,\ add_period_keys, encode_unicode_records from .validation import validate_record_schema from .nested_merge import nested_merge, flat_merge from .errors import InvalidSortError from backdrop.core.response import (FlatData, GroupedData, PeriodData, PeriodGroupedData, PeriodFlatData, SimpleData) import timeutils import datetime log = logging.getLogger(__name__) DEFAULT_MAX_AGE_EXPECTED = 2678400 class DataSet(object): def __init__(self, storage, config): self.storage = storage self.config = config self._last_updated = None @property def name(self): return self.config['name'] def is_recent_enough(self):
def __init__(self, config): self.logger = logging.getLogger(__name__) self.redis = self.init_redis(config)