def pressed(self): logger.info('Button has been pressed') delta = datetime.now() - self.__last_pressed if delta.seconds >= int(config.get('BUTTON_PRESS_THRESHOLD')): chime = Chime() chime.start() notification = Notification() notification.start() camera = Camera() camera.start() self.__last_pressed = datetime.now() else: logger.debug('Button was pressed too quickly!') if not self._led_always_on: if Sundial().is_day(): self.__led.on() else: self.__led.off() else: self.__led.off() # Stop play doorbell wav file (if any) Sender.send({ 'action': SoundReceiver.STOP, 'file': 'doorbell' }, SoundReceiver.TYPE) # Start play doorbell wav file Sender.send({ 'action': SoundReceiver.START, 'file': 'doorbell' }, SoundReceiver.TYPE)
def _toggle(self, mode): if mode == Sundial.DAY: logger.info('Day mode: Turn IR cut-off filter ON') self.__ir_cutoff_motor.backward() else: logger.info('Night mode: Turn IR cut-off filter OFF') self.__ir_cutoff_motor.forward()
def eval_all(self, weighting_keys=None, verbose=True, file_early=None, knowledge=True): """ Calculates all custom metrics, if requested during instantiation :param weighting_keys: the preferred weighting key to use for knowledge impact :param verbose: whether or not to log progress """ if verbose: logger.info("Calculating metrics") t = Timer("Metric calculation") if self.quality: if verbose: logger.info("Calculating quality") self.eval_quality() if verbose: t.log() if self.h_index: if verbose: logger.info("Calculating H-index") self.eval_h() if verbose: t.log() if self.custom_centrality: if verbose: logger.info("Calculating centralities") self.eval_centrality() if verbose: t.log() if file_early is not None: self.file_custom_metrics(file_early) if self.knowledge and knowledge: if verbose: logger.info("Calculating knowledge") self.eval_k(self.weighting_methods if weighting_keys is None else weighting_keys) if verbose: t.log()
def transaction(cls, database=None): logger.info('Start transaction...') def wrap(func): @wraps(func) def transWrap(pipeline=None): if cls.CLIENT != None and pipeline != None: logger.info(pipeline) ''' Setting pipeline ''' session = cls.CLIENT.start_session() res = None try: session.start_transaction() res = func(client=cls.CLIENT[database], session=session) session.commit_transaction() except Exception as error: session.abort_transaction() logger.error(error) return res else: return func(client=None, session=None) return transWrap return wrap
def __init__(self, credentials): storage_bucket = os.environ.get("FIREBASE_STORAGE_BUCKET") self.app = firebase_admin.initialize_app( credentials, {"storageBucket": storage_bucket}, name="bookbnb-photouploader") logger.info("Authenticated in firebase successfully")
def time_limit_parse(author, rss_url): try: items = parse_rss(author, rss_url) logger.info(rss_url + ' over') return items except func_timeout.exceptions.FunctionTimedOut as e: logger.error(rss_url) logger.error(e) return None
async def post_with_json(uri: str, **params: Any) -> Any: logger.info(f'POST json {uri} {params}') try: async with httpx.AsyncClient(timeout=timeout) as client: response = await client.post(uri, json=params) except: raise HTTPException( status_code=500, detail='500 Server error') error_handlers(response) return response.json()
def insert(cls, collection=None, data={}, retry=RETRY): res = None try: res = cls.CLIENT[collection].insert_one(data, session=cls.SESSION) logger.info(f'inserted!!! ObjectId: { ObjectId(res.inserted_id) }') except Exception as error: logger.error(error) return res
def read(cls, message, last_time_received): if message['action'] == cls.STOP: # Stop `Cron`'s inner thread cron = Cron() cron.stop() # stop server and its thread. Must be the last thing to stop logger.info('Message broker stops listening') return False return True
def summary(self): custom = "" custom += "Connected components: {}\n".format( nx.number_connected_components(self.G.to_undirected())) # average metrics for key, values in { attribute: self.G.nodes[self.root][attribute] for attribute in self.attributes }.items(): custom += "{}: {} ({})\n".format(key, round(np.average(values), 3), round(sem(values), 3)) logger.info("\n== CN Summary ==\n{}\n{}====".format( nx.info(self.G), custom))
def load_data_from_file(self, datafile): """ Load data from file for this query (using the unique make_filename function) :param datafile: the file to search for :return: this instance """ logger.info("Munging data from {}".format(datafile)) self.df = pd.read_csv(datafile, delimiter='\t', nrows=self.limit) if self.limit is not None \ else pd.read_csv(datafile, delimiter='\t') logger.info("Loaded {} documents from file {}".format( self.df.shape[0], datafile)) self.ensure_data() return self
def connect(cls, retry=RETRY): logger.info('Start connection...') client = None try: for trying in range(1, retry + 1): client = MongoClient(MongoConf.URL) try: info = client.server_info() logger.info(info) break except errors.ServerSelectionTimeoutError as error: logger.info(f' { error } : retry...{ trying }/{ retry }') except Exception as error: client = None logger.error(error) finally: cls.CLIENT = client logger.info('End connection!!!') return client
def file_custom_metrics(self, filename): """ Files the calculated metrics in a CSV :param filename: the filename """ logger.info("Filing calculated metrics in {}".format(filename)) t = Timer("Filing metrics") with open(filename, 'w') as csv_file: writer = csv.writer(csv_file, delimiter=',') writer.writerow(['node'] + self.attributes) for node in self.G.nodes: row = [node] + list(self.G.nodes[node].values()) writer.writerow(row) t.log()
def select(cls, collection=None, cond=None, filter=None, retry=RETRY): res = None try: res = list(cls.CLIENT[collection].find(cond, filter, session=cls.SESSION)) cls.CLIENT[collection].aggregate(pipeline=pipe) logger.info(f'selected!!! length: { len( res ) }') except Exception as error: logger.error(error) return res
def create_app(): logger.info(f'Starting app in {config.ENV_APP} environment') app = Flask(__name__) app.config.from_object(config) app.url_map.strict_slashes = False for code in default_exceptions: app.register_error_handler(code, handle_error) db.init_app(app) from app.api.resources.v1 import api_v1 app.register_blueprint(api_v1, url_prefix='/v1') return app
def ctx(): content = cache.get('content') if content is None: logger.info('not hit cache') items = generate_all() content = { 'items': items, 'link': URL, 'title': TITLE, 'generator': ADMIN_NAME, 'lastBuildDate': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 'ttl': EXPIRE * 60 } cache.set('content', content, timeout=EXPIRE * 60) return content
def load_data_roles(): """ this function loads data from csv""" try: with open("data/role.csv", "r") as f: reader = csv.reader(f) next(reader) for record in reader: role = Role(role_id=record[0], role_name=record[1], role_desc=record[2]) db.session.add(role) db.session.commit() logger.info("loaded data to role table") db.session.close() except (Exception, psycopg2.Error) as e: print(e)
def get_network(self, metadata=False, limit=None): """ Constructs a citation network from the edge list. :param metadata: whether or not to include metadata :param limit: a limit to the number of documents to return :return: the NetworkX graph """ if self.cache: try: return nx.read_gpickle(self.make_filename(prefix="GRAPH")) except FileNotFoundError: logger.warn("No file on record, creating new graph") logger.info( "Generating network from data (metadata={}, limit={})".format( metadata, limit)) df_edges = self.get_edges() if limit is not None: df_edges = df_edges.head(limit) # for key in self.get_citation_keys(): # df_edges[key] = df_edges[key].str.strip() G = nx.from_pandas_edgelist(df_edges, source='citation_id', target='patent_id', edge_attr="date", create_using=nx.DiGraph()) if metadata: self.ensure_meta() for entry in self.df_meta.to_dict(orient='records'): try: G.nodes[entry['patent_number']].update({ key: val for key, val in entry.items() if key != 'patent_number' }) except KeyError: logger.error("Couldn't find network entry for {}".format( entry['patent_number'])) logger.info("Generated network with {} nodes and {} edges".format( len(G.nodes), len(G.edges))) self.write_graph_to_file(G, self.make_filename(prefix="GRAPH")) return G
def summary(self): """ Prints summary statistics for the graph metrics """ custom = "" custom += "Connected components: {}\n".format( nx.number_connected_components(self.G.to_undirected())) # average metrics for key, values in { attribute: list(nx.get_node_attributes(self.G, attribute).values()) for attribute in self.attributes }.items(): custom += "{}: {} ({})\n".format(key, round(np.average(values), 3), round(sem(values), 3)) logger.info("\n== CN Summary ==\n{}\n{}====".format( nx.info(self.G), custom))
def activate_led(self, cron_mode=False): if self._led_always_on: if cron_mode is not True: logger.debug('LED should be always on, turn it on') self.__led.on() else: sundial = Sundial() is_day = sundial.is_day() if self.__is_day != is_day: if sundial.mode == Sundial.DAY: logger.info("Day mode: Turn button's LED off") self.__led.off() else: logger.info("Night mode: Turn button's LED on") self.__led.on() self.__is_day = is_day
def load_data(self): """ Loads data from query or file to a dataframe. :return: the instance """ if self.cache: try: self.load_data_from_file(self.make_filename()) return self except (FileNotFoundError, DataFormatError) as e: if isinstance(e, FileNotFoundError): logger.info("Missing data file, querying USPTO") if isinstance(e, DataFormatError): logger.info("Problem loading data file, querying USPTO") self.query_data() self.ensure_data() if Config.USE_CACHED_QUERIES: self.write_data_to_file(self.make_filename()) return self
def load_data_users(): """ this function loads data from csv""" try: with open("data/user.csv", "r") as f: reader = csv.reader(f) next(reader) # This skips the 1st row which is the header. for record in reader: user = User( public_id=record[1], user_name=record[2], user_email=record[3], user_address=record[4], user_mobile=record[5], password=record[6], ) db.session.add(user) db.session.commit() logger.info("loaded data to user table") except (Exception, psycopg2.Error) as e: print(e)
def k(self, root, node, weighting_keys, depth, verbose=False): """ Recursively calculates the knowledge impact for a single node :math:`K_i = W_i + \sum_{j=1}^{n_i} \lambda P_jK_{j}` :param root: the root node :param node: the current node :param weighting_keys: the quality weighting key to use for knowledge impact :param depth: the current search depth :param verbose: whether or not to print progress to stdout :return: a dictionary containing the total knowledge impact score keyed by the weighting metric used """ # base case - exceeded depth allowed if self.k_depth is not None and depth > self.k_depth: return {key: 0 for key in weighting_keys} # keep track of the sum of child scores in a dictionary keyed by weighting method used sum_children = defaultdict(int) # generate the list of this node's children, excluding visited nodes children = [ x for x in self.G.successors(node) if x is not None and not self.k_search_tracker.is_visited(x) ] for child in children: # recursively evaluate k for this child (returns dict ordered by weighting key) # add the k score to the sum total by weighting key for key, val in self.k(root, child, weighting_keys, depth + 1).items(): sum_children[key] += val # keep track of the total in a dictionary keyed by weighting method total_k = defaultdict(int) # calculate the persistence index for this node p = self.p(root, node) # calculate total knowledge contribution by weighting method for key in weighting_keys: # Note that p is applied to ALL calculations - p for the root is simply 1, # and since p is distributive over the children it can be applied to each child separately. # Makes it easier to evaluate persistence (would otherwise have to remember each child). total_k[key] = (self.G.nodes[node][key] + self.discount * sum_children[key]) * p if verbose: logger.info('node', node) logger.info('> w: ', self.G.nodes[node][weighting_keys]) logger.info('> p: ', self.p(root, node)) logger.info('> k: ', total_k) return total_k
def query_data(self): t = Timer("Querying USPTO: {}".format(self.query_json)) count_patents = self.query_sounding() count_to_collect = self.limit if self.limit is not None and self.limit < count_patents else count_patents pages = math.ceil(count_to_collect / self.per_page) logger.info("Collecting {}/{} docs in {} page{}".format( count_to_collect, count_patents, pages, "s" if pages > 0 else "")) manager = enlighten.get_manager() ticker = manager.counter(total=pages, desc='Ticks', unit='ticks') for i in range(pages): if Config.ENV_NAME != "local": logger.info("{}/{}".format(i, pages)) page_df = self.query_paginated(i + 1, self.per_page) if self.df is None: self.df = page_df else: self.df = self.df.append(page_df, ignore_index=True) ticker.update() ticker.close() self.handle_external() t.log() logger.info("Collected {} edges".format(self.df.shape[0]))
def load_data_blogs(): """ this function loads data from csv""" try: with open("data/blog.csv", "r") as f: reader = csv.reader(f) next(reader) for record in reader: blog = Blog( blog_title=record[1], blog_type=record[2], blog_desc=record[3], blog_content=record[4], blog_user_id=record[5], ) db.session.add(blog) db.session.commit() logger.info("loaded data to blog table") except psycopg2.Error as e: print(e) except Exception as error: print(error)
def fit_write(mod, filename, **kwargs): file = "data/regression/{}_res.pkl".format(filename) try: vars = pickle.load(open(file, 'rb')) except (FileNotFoundError, EOFError): logger.info("Fitting model {}".format(filename)) pooled_res = mod.fit(**kwargs) vars = ( pooled_res.summary, pooled_res.params, pooled_res.pvalues, pooled_res.resids, pooled_res.std_errors, pooled_res.df_resid, pooled_res.tstats ) pickle.dump(vars, open(file, 'wb'), protocol=4) with open(file.strip(".pkl")+".txt", 'w') as f: f.write(str(pooled_res)) f.close() Results = namedtuple('Results', 'summary params pvalues resids std_errors df_resid tstats') return Results(*vars)
def signup(): """api for signup""" try: form = SignupForm() if form.validate_on_submit(): hashed_password = generate_password_hash(form.password.data, method="sha256") new_user = User( public_id=str(uuid.uuid4()), user_name=form.user_name.data, user_email=form.user_email.data, user_address=form.user_address.data, user_mobile=form.user_mobile.data, password=hashed_password, ) db.session.add(new_user) db.session.commit() flash("Congratulations {}!! happy blogging".format( new_user.user_name)) return render_template("signup.html", title="Sign In", form=form) except (Exception, TypeError) as e: logger.info(e)
def print_custom_metrics(self): """ Summarize the calculated metrics """ logger.info("== Calculated Metrics ==") for node in self.G.nodes: logger.info(self.G.nodes[node]) logger.info("====")
async def startup_event(): logger.info(f"Auth mode: {os.getenv('AUTH_MODE')}")
def refresh_handler(): current_user.name = 'refresh_wxnacy' logger.info('refresh') logger.info(current_user) logger.info(current_user.name) return redirect('iindex')