def app_factory(): configure_logger() app = FastAPI(title="BooksAPI", ) from src.api.api_v1.api import api_router app.include_router(api_router) return app
def main(): known_args, unknown_args = parse_arguments() logger.configure_logger() model = GenaModel(SAMPLE_SIZE) model.load_weights(known_args.checkpoint_path) generate_samples_seconds = SAMPLING_RATE * known_args.duration model.generate_wav(generate_samples_seconds, "gen.wav") print('generated')
def __init__(self, library): self.library = library self._total = len(library.collection) self._duplicates = 0 self._playlist_entries = {} self._removed_duplicates = [] self.logger = configure_logger(logging.getLogger(__name__))
def __init__(self, build_number, locale='zhCN'): self.build_number = build_number self.locale = locale self.con = mysql.connector.connect(**db_config) self.logger = configure_logger() # return row as dict self.cur = self.con.cursor(dictionary=True, buffered=True)
def main(): logger.configure_logger() log = logging.getLogger("Bot") btc_prices = [] auth_client = coinbase.get_auth_client() while True: try: btc_account = coinbase.get_account(auth_client, "BTC") eur_account = coinbase.get_account(auth_client, "EUR") #log.info(f"Balances: {eur_account['balance']} {eur_account['currency']} | {btc_account['balance']} {btc_account['currency']}") log.info("Balances: {} {} | {} {}".format(eur_account['balance'], eur_account['currency'], btc_account['balance'], btc_account['currency'])) order = coinbase.get_order(auth_client) btc_price = coinbase.get_btc_price() btc_prices.append(btc_price) btc_avg_price = calcul.average(btc_price, btc_prices) btc_mv_avg_price = calcul.moving_average(btc_prices, 20) #log.info(f'1 BTC={btc_price} EUR | Avg={btc_avg_price} | Mv. Avg={btc_mv_avg_price}') log.info("1 BTC={} EUR | Avg={} | Mv. Avg={}".format( btc_price, btc_avg_price, btc_mv_avg_price)) # give some time to the moving average to make decisions if len(btc_prices) > 40: make_trading_decision(auth_client, order, btc_price, btc_avg_price, btc_mv_avg_price, eur_account, btc_account) except Exception as e: #log.error(f"An error has occured: {str(e)}") log.error("An error has occured: {}".format(str(e))) time.sleep(60)
def __init__(self, root, locale_flags='zhCN', content_flags=None): """ :param root : 客户端根目录,如 D:/World of Warcraft Beta :param locale_flags : 语系,如 zhCN :param content_flags: content flags """ self.root = root self.locale_flags = locale_flags self.content_flags = content_flags self.logger = configure_logger() self.get_build_info()
def main(): known_args, unknown_args = parse_arguments() create_checkpoint_dir(CHECKPOINT_DIR) logger.configure_logger() dataset = tf.data.Dataset.list_files(known_args.data_dir + '*.wav') # float32 dataset = dataset.map(decode_wav) # drop sample rates dataset = dataset.map(lambda t: t[0]) dataset = dataset.unbatch() dataset = dataset.batch(SAMPLE_SIZE, drop_remainder=True).batch(BATCH_SIZE).prefetch( tf.data.experimental.AUTOTUNE) dataset = dataset.zip((dataset, dataset)).take(2) gena = GenaModel(SAMPLE_SIZE) gena.train(dataset, CHECKPOINT_DIR + CHECKPOINT_FILENAME, CHECKPOINT_PERIOD) print('end')
def __init__(self, library, volume): Exporter.instance = self self.library = library self.volume = volume self.destination = os.path.join("/Volumes", volume) self.music_dir = os.path.join(self.destination, Exporter.MUSIC_DIR) self._entries = {} # key: filename, value: xml_entry self._all_tracks = [] # a list of entries for exporting all tracks (to preserve order of tracks) self.message_queue = Queue() # message queue for real time GUI updating self.start_messages = False # flag that indicates when it is ok to start sending GUI messages self.workers = [] # we store worker threads here self._cancel = False self.logger = configure_logger(logging.getLogger(__name__))
def __init__(self, path): Library._instance = self self.semaphore = threading.Semaphore() self.traktor_path = path self.library_path = os.path.join(path, "collection.nml") self.logger = configure_logger(logging.getLogger(__name__)) if os.path.exists(self.library_path): self.semaphore.acquire() self.tree = etree.parse(self.library_path, parser=etree.XMLParser(encoding="utf-8")) self.collection = self.tree.getroot().find("COLLECTION") self.playlists = self.tree.getroot().find("PLAYLISTS") self.semaphore.release() else: self.logger.critical("Traktor library does not exist: {}".format(self.library_path))
import zmq import time import sys import logging #Constants ip_addr = publisher_address port = publisher_port TOT_WORKERS = total_workers event_list = ("addition", "multiply",) event_sign = event_list[0] topic = "even" #log = logging.getLogger('myzmq') log = configure_logger() def connect(): ctx = zmq.Context() sock = ctx.socket(zmq.PUB) sock.bind("tcp://%s:%s" %(ip_addr, port)) return sock def control_manager(): context = zmq.Context() control_receiver = context.socket(zmq.PULL) control_receiver.bind("tcp://%s:%s" %(controller_ip, controller_port))
def main(args): if len(args) != 4: print("Usage: "+sys.argv[0]+" [language] [working directory] [threshold value] [threshold change step value]") exit(1) lang=args[0] working_dir=os.path.join("/Users/authorofnaught/Projects/LORELEI/NER/WORKING/", os.path.basename(args[1])) if not os.path.exists(working_dir): os.mkdir(working_dir) threshold = args[2] threshold_step = args[3] logger = logging.getLogger() configure_logger(logger) temp_dir = tempfile.mkdtemp() """These directories and files are not updated""" GAZ_LAF_DIR="/Users/authorofnaught/Projects/LORELEI/NER/REF-LAF/"+lang+"/" # directory containing gold standard LAF files #REF_LAF_DIR="/Users/authorofnaught/Projects/LORELEI/NER/REF-LAF/"+lang+"/" # directory containing gold standard LAF files LTF_DIR_ABG="/Users/authorofnaught/Projects/LORELEI/NER/LTF-ABG/"+lang+"/" # directory containing LTF files with uhhmm features TEST_SCP="/Users/authorofnaught/Projects/LORELEI/NER/TEST-SCP/"+lang+"ALL.txt" # file with paths to LTF files for tagging, one per line """These directories and files are updated with each iteration""" iteration = 0 MODEL_DIR=os.path.join(working_dir, str(iteration), 'model') # directory for trained model SYS_LAF_DIR=os.path.join(working_dir, str(iteration), 'sys_laf') # directory for tagger output (LAF files) TRAIN_SCP=os.path.join(temp_dir, 'trainingfiles') # file with paths to LAF files for training, one per line updateTrainingScript(GAZ_LAF_DIR, TRAIN_SCP) # initialize TRAIN_SCP to contain paths to all gazetteer-generated LAFs traincmd = ["./train.py", "--displayprogress", # Display crfsuite output of model iterations, if desired. "-t", 0.4, "-S", TRAIN_SCP, MODEL_DIR, LTF_DIR_ABG ] tagcmd = ["./tagger.py", "-S", TEST_SCP, "-L", SYS_LAF_DIR, MODEL_DIR ] scorecmd = ["./score.py", REF_LAF_DIR, SYS_LAF_DIR, LTF_DIR] changeinNEs = True: while changeinNEs: subprocess.call(traincmd) subprocess.call(tagcmd) if iteration != 0: SYS_LAF_DIR, changeinNEs = updateNEdirs(PREV_SYS_LAF_DIR, SYS_LAF_DIR) iteration+=1 PREV_SYS_LAF_DIR = SYS_LAF_DIR MODEL_DIR = os.path.join(working_dir, str(iteration), 'model') SYS_LAF_DIR = os.path.join(working_dir, str(iteration), 'sys_laf') updateTrainingScript(PREV_SYS_LAF_DIR, TRAIN_SCP) # TODO: update threshold for each iteration print("Bootstrapping stopped after {} iterations".format(iteration)) subprocess.call(scorecmd) shutil.rmtree(temp_dir)
# from logger import logger, configure_logger from PlanetWars import Turn def main(): map_data = '' while(True): current_line = raw_input() if len(current_line) >= 2 and current_line.startswith("go"): turn = Turn(map_data) turn.go() turn.finish() map_data = '' else: map_data += current_line + '\n' if __name__ == '__main__': try: import psyco psyco.full() except ImportError: pass try: configure_logger() main() except KeyboardInterrupt: print 'ctrl-c, leaving ...'
#!/usr/bin/env python import argparse import glob import logging import os import sys from io_ import load_doc, LTFDocument, LAFDocument from logger import configure_logger from util import convert_extents logger = logging.getLogger() configure_logger(logger) def calc_stats(sys_laf, ref_dir, ltf_dir): """Return hits, false alarms, and misses for system output2 LAF relative to reference LAF located in ref_dir. Inputs ------ sys_laf : str LAF file containing system output2. ref_dir : str Directory containing reference LAF files. ltf_dir : str Directory containing LTF files. """ # Check that LTF and system and reference LAF are valid.
return 0 except FileNotFoundError as ex: self.__logger.error( f'Something wrong with the path "{path}". Error: {ex}.') return 1 except IOError as ex: self.__logger.error( f'Something wrong happened while saving the file "{filename}.json". Error: {ex}.' ) return 1 if __name__ == '__main__': from logger import configure_logger logger = configure_logger('INFO') logger = logging.getLogger(NAME) logger.info("Logging OK.") # lib = excel2json('library/example.xls') # sheet_names = lib.worksheets # print(sheet_names, end='\n\n') # # print(lib) # # print(lib['Anchors'][1], end='\n\n') # lib.write('test', 'library') # # fd = open('library/test.json', 'r') # # h = json.load(fd) # #h = json.loads(lib.__str__()) # h = lib.toDict() # print(h['Releases'], end='\n\n') # print(h['Anchors']['1'], end='\n\n')
import logging from http.server import HTTPServer # App from logger import configure_logger from handler import ServerHandler logger = logging.getLogger("server") def server_thread(port: int = 8000) -> None: server_address = ("0.0.0.0", port) httpd = HTTPServer(server_address, ServerHandler) try: logger.info(f"Starting server at port {port}") httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() if __name__ == "__main__": configure_logger(logging.INFO) server_thread()
os.chdir(wd) ### MAIN PROGRAM ### if __name__ == '__main__': from analysis_parms import all_parms #Avoid relative paths, it will confuse output scripts/macros inroot = op.abspath('testdata') outroot = op.abspath('RunResults') logfile=op.join(outroot, 'runlog.txt') #Haven't included anything special for debuging; need argparse/CLI if '-v' in sys.argv: configure_logger(screen_level='info', logfile=logfile, name=__name__) else: configure_logger(screen_level='warning',logfile=logfile, name=__name__) walker=os.walk(inroot, topdown=True, onerror=None, followlinks=False) ### Walk subdirectories (rootpath, rootdirs, rootfiles) = walker.next() # Error if directories are empty if not rootdirs: logger.error("VALID DIRECTORY STURCTURE MUST BE: RUN --> MAG --> IMAGES") for folder in rootdirs: indir = op.join(rootpath, folder)
''' Mooring simulator program entry point''' # Create the application handler app = QApplication([]) appName = Path(__file__).with_suffix('').stem # Recover and process optionnal line arguments parser = processArgs() args = parser.parse_args() # start logging #logger = configure_logger(stream_level='DEBUG' if args.debug else 'INFO', debug_file=Path(appName).with_suffix('.log')) #global logger logger = configure_logger( stream_level='DEBUG' if args.debug else 'INFO', debug_file=Path(appName).with_suffix('.log') if args.log else None) logger.info("The program starts") # Create and show the main application window #mainAppWindow = MainAppWindow('test_logging') mainAppWindow = MainAppWindow() # load command line given library if args.lib is None: mainAppWindow.library = path.normpath( mainAppWindow.cfg['config']['library']) else: mainAppWindow.library = args.lib # reset config file
from time import perf_counter, sleep from uuid import uuid4 from simple_producer import SimpleProducer from queue_producer import QueueProducer from settings import settings from logger import configure_logger, get_logger configure_logger() N_MSGS = 10_000 def main(): logger = get_logger("MAIN") # ----------------------- # Simple Producer # ----------------------- simple_producer = SimpleProducer(settings.KAFKA_URL, "simple_prod", log_name="SimpleProducer") t0 = perf_counter() for _ in range(N_MSGS): simple_producer.send(str(uuid4())) t1 = perf_counter() logger.warning(f"Simple Time: {t1 - t0}") # ----------------------- # Simple Producer w Flush # ----------------------- simple_producer = SimpleProducer( settings.KAFKA_URL,
from flask_restful import Resource, reqparse from db_connection import database_access import yaml import os from logger import configure_logger my_path = os.path.abspath(os.path.dirname(__file__)) file = yaml.safe_load( open(os.path.join(my_path, "config\\" + "application_properties.yaml"))) logging = configure_logger('default', 'logs/app.log') db_table = file['db_table'] class User(): TABLE_NAME = db_table def __init__(self, _id, username, password): self.id = _id self.username = username self.password = password @classmethod def find_by_username(cls, username): logging.info("finding user from datastore") connection = database_access() cursor = connection.cursor() query = 'SELECT * FROM {table} WHERE username=%s'.format( table=cls.TABLE_NAME) cursor.execute(query, (username, )) row = cursor.fetchone()
import os import json import config from flask import Blueprint, request, jsonify from dotenv import load_dotenv from marshmallow import ValidationError from stocks.stocks import get_stock_quote, calculate_percent_change, get_stock_name, insert_stock_tracker, get_tracked_stocks_details, get_tracked_stocks_news_details from database.database import Database from api.schema import StockDifferenceSchema, AddStocksSchema, TrackedStocksSchema, TrackedStocksNews from logger import configure_logger, get_logger_with_context load_dotenv() logger = configure_logger() api_bp = Blueprint("api_bp", __name__) @api_bp.route("/healthcheck") def healthcheck(): logger = get_logger_with_context("") logger.info("Health check status: OK") return jsonify({"status": "ok"}), 200 @api_bp.route("/stocks/difference", methods=["POST"]) def check_stock_difference(): """ POST request Body { "symbol": "F",
#!/usr/bin/env python3.7 # -*- coding: utf-8 -*- import datetime import os import tracemalloc import logger from jenkins_connector import Jenkins from postgre_connector import PostgresSql from settings import build_allowed_configs log = logger.configure_logger() def main(): global_start_time = datetime.datetime.now() tracemalloc.start() available_list = ['MDA'] configurations = build_allowed_configs(available_list) config = configurations[0] jenkins = Jenkins(config.jenkins_url, config.jenkins_username, config.jenkins_password) postgres = PostgresSql(host=config.postgres_address, port=config.postgres_port, user=config.postgres_username, password=config.postgres_password, database=config.db_name) pipeline_name = os.environ.get("BUILD_NAME") branch_name = None run_id = os.environ.get("BUILD_NUMBER")
urls = ( "/", "Landing", "/init", "Initialize", "/check/traktor", "CheckTraktor", "/export", "Export", "/export/scanvolumes", "ExportVolumeScan", "/export/status", "ExportStatus", "/export/cancel", "ExportCancel", "/clean", "Clean", "/clean/confirm", "CleanConfirm", "/choose/path", "ChoosePath", ) render = web.template.render('templates/') logger = configure_logger(logging.getLogger(__name__)) class Landing: def GET(self): conf.filelog = True # enable file logging conf.debug = True # enable verbose messages traktor_dir = librarian.get_traktor_dir() if librarian.library_exists(traktor_dir): conf.library_dir = traktor_dir web.header("Cache-Control", "no-cache") return render.index(traktor_dir.replace("\\", "\\\\"), sys.platform)
import os; import shutil; import subprocess; import sys; import tempfile; from joblib.parallel import Parallel, delayed; from align import Aligner; from chunk import BILOUChunkEncoder; from features import OrthographicEncoder; from io_ import load_doc, LTFDocument, LAFDocument, write_crfsuite_file; from logger import configure_logger; logger = logging.getLogger(); configure_logger(logger); def tag_file(ltf, aligner, enc, chunker, modelf, tagged_dir, tagged_ext): """Extract features for tokenization in LTF file and tag named entities. Inputs ------ ltf : str LTF file. aligner : align.Aligner Aligner instance used to obtain character onsets/offsets of discovered mentions. enc : features.Encoder
import os import boto3 from s3 import S3 from pipeline import Pipeline from repo import Repo from target import Target, TargetStructure from logger import configure_logger from errors import ParameterNotFoundError from deployment_map import DeploymentMap from cloudformation import CloudFormation from organizations import Organizations from sts import STS from parameter_store import ParameterStore LOGGER = configure_logger(__name__) DEPLOYMENT_ACCOUNT_REGION = os.environ.get("AWS_REGION", 'us-east-1') DEPLOYMENT_ACCOUNT_ID = os.environ["ACCOUNT_ID"] MASTER_ACCOUNT_ID = os.environ["MASTER_ACCOUNT_ID"] S3_BUCKET_NAME = os.environ["S3_BUCKET_NAME"] ADF_PIPELINE_PREFIX = os.environ["ADF_PIPELINE_PREFIX"] ADF_VERSION = os.environ["ADF_VERSION"] ADF_LOG_LEVEL = os.environ["ADF_LOG_LEVEL"] def clean(parameter_store, deployment_map): """ Function used to remove stale entries in Parameter Store and Deployment Pipelines that are no longer in the Deployment Map """ current_pipeline_parameters = parameter_store.fetch_parameters_by_path(
from flask import Flask from flask_restful import Api from flask_jwt import JWT from security import authenticate, identity from user import UserRegister from logger import configure_logger from dashboard import Dashboard logging = configure_logger('handlers', 'logs/app.log') app = Flask(__name__) app.config['PROPAGATE_EXCEPTIONS'] = True app.secret_key = 'zarurat' api = Api(app) jwt = JWT(app, authenticate, identity) api.add_resource(UserRegister, '/register') api.add_resource(Dashboard, '/dashboard') if __name__ == '__main__': app.run(debug=True)