Exemplo n.º 1
0
def app(ctx, alt_config, config_values, data_dir, log_config):

    # configure logging
    log_config = log_config or ':info'
    slogging.configure(log_config)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')
    ctx.obj = {'config': config}
def main():

    config.load_config()

    helloworld_conf = {
         '/': {
             'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
             'tools.response_headers.on': True,
             'tools.response_headers.headers': [('Content-Type', 'text/html')],
         }
     }

    reload_conf = {

         '/': {
             'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
             'tools.response_headers.on': True,
             'tools.response_headers.headers': [('Content-Type', 'text/html')],
         }
     }

    #sets the default error page
    cherrypy.config.update({'error_page.404': error_page_404})

    #mounts the two application endpoint
    cherrypy.tree.mount(HelloWorldWebService(), '/', helloworld_conf)
    cherrypy.tree.mount(HelloWorldReloadConfig(), '/reload', reload_conf)
Exemplo n.º 3
0
    def __init__(self):

        print '* Becoming self-aware'
        self.settings = settings = load_config('config/settings.yaml')
        self.secrets = secrets = load_config('config/secrets.yaml')

        self.ENABLED = self.settings.plugins.values().pop(0)
        self.active = True
        self.brain = cortex.Cortex(self)

        # The pulse file is set as a measure of how
        # long the bot has been spinning its gears
        # in a process. If it can't set the pulse
        # for too long, a signal kills it and reboots.
        # Note: this has become less of an issue
        # since all the bot's commands became threaded
        print '* Establishing pulse'
        self.setpulse()

        print '* Running monitor'

        while True:
            sleep(0.1)
            self.brain.monitor()
            if mktime(localtime()) - self.lastpulse > 10:
                self.setpulse()
Exemplo n.º 4
0
def main(argv):
    config.load_config()

    args = parse_cmdline(argv)

    if args.func:
        args.func(args)
Exemplo n.º 5
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(
        log_file = 'log_train_pixel_link_%d_%d.log'%image_shape, 
                    log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.load_config(FLAGS.train_dir)
            
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus
                   )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on'+ '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Exemplo n.º 6
0
def index(add_iterable, remove_iterable, config_name=None):
    """
    Takes config_name and data in a form of iterable object, selects apropiate config \n
    and indexes data. If error occurs, recuretly finds error source.     \n
    """

    if config_name:
        config.load_config(config_name)
    errors = []

    def recurently_do(operation, iterable, granulation=1):
        iterables = granulate(iterable, granulation)
        for iterable in iterables:
            response = operation(iterable)
            if not response.status == 200:
                if len(iterable) == 1:
                    if isinstance(iterable[0], dict) and iterable[0].has_key("id"):
                        errors.append(iterable[0]["id"])
                    else:
                        errors.append(iterable[0])
                else:
                    recurently_do(operation, iterable, granulation * 10)

    # go go go
    recurently_do(connection.add, add_iterable)
    recurently_do(connection.delete_multi, remove_iterable)
    return errors
Exemplo n.º 7
0
def main():
    if "--config" in sys.argv:
        index = sys.argv.index("--config")
        configfile = sys.argv[index+1]
        sys.argv = sys.argv[:index] + sys.argv[index+2:]        
        config.load_config(configfile)
    app.run()
Exemplo n.º 8
0
    def __init__(self, cortex):

        self.cx = cortex

        self.settings = load_config('config/settings.yaml')
        self.secrets = load_config('config/secrets.yaml')

        self.connect()
Exemplo n.º 9
0
    def ready(self):
        from config import load_config
        load_config()

        import signals

        if not (settings.DUTY_SERVER_MIN == 0 and settings.DUTY_SERVER_MAX == 0) and os.environ.get('UWSGI_RUNNING', '0') == '1':
            import cronjob
Exemplo n.º 10
0
def app(ctx, profile, alt_config, config_values, data_dir, log_config, bootstrap_node, log_json,
        mining_pct, unlock, password):

    # configure logging
    slogging.configure(log_config, log_json=log_json)

    # data dir default or from cli option
    data_dir = data_dir or konfig.default_data_dir
    konfig.setup_data_dir(data_dir)  # if not available, sets up data_dir and required config
    log.info('using data in', path=data_dir)

    # prepare configuration
    # config files only contain required config (privkeys) and config different from the default
    if alt_config:  # specified config file
        config = konfig.load_config(alt_config)
    else:  # load config from default or set data_dir
        config = konfig.load_config(data_dir)

    config['data_dir'] = data_dir

    # add default config
    konfig.update_config_with_defaults(config, konfig.get_default_config([EthApp] + services))

    log.DEV("Move to EthApp.default_config")
    konfig.update_config_with_defaults(config, {'eth': {'block': blocks.default_config}})

    # Set config values based on profile selection
    merge_dict(config, PROFILES[profile])

    # override values with values from cmd line
    for config_value in config_values:
        try:
            konfig.set_config_param(config, config_value)
            # check if this is part of the default config
            if config_value.startswith("eth.genesis"):
                del config['eth']['genesis_hash']
        except ValueError:
            raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
                               'specifies the parameter to set and d is a valid yaml value '
                               '(example: "-c jsonrpc.port=5000")')

    # Load genesis config
    update_config_from_genesis_json(config, config['eth']['genesis'])

    if bootstrap_node:
        config['discovery']['bootstrap_nodes'] = [bytes(bootstrap_node)]
    if mining_pct > 0:
        config['pow']['activated'] = True
        config['pow']['cpu_pct'] = int(min(100, mining_pct))
    if not config['pow']['activated']:
        config['deactivated_services'].append(PoWService.name)

    ctx.obj = {'config': config,
               'unlock': unlock,
               'password': password.read().rstrip() if password else None}
    assert (password and ctx.obj['password'] is not None and len(
        ctx.obj['password'])) or not password, "empty password file"
Exemplo n.º 11
0
def test_faceting():
    from datastructures import Facet
    from query import Query
    import connection
    import config
    config.set_config('dev', '0.0.0.0', 8983)
    config.load_config('dev') # set up addresses and stuff 
    
    response = connection.search(Query(Facet('regions', mincount=1)))
Exemplo n.º 12
0
    def __init__(self, master, cortex):

        self.cx = cortex
        self.master = master

        self.settings = load_config('config/settings.yaml')
        self.secrets = load_config('config/secrets.yaml')

        if master.sock:
            self.sock = master.sock
Exemplo n.º 13
0
def main():
    global src
    global NEW_GAME

    #load config options
    load_config()

    #start the app window
    root = Tk()
    App(root).pack()
    root.mainloop()
Exemplo n.º 14
0
 def __init__(self, params):
     self.params = params
     # load configuration
     config.load_config(params.config, basepath=params.path)
     # get configuration values
     self.wakeupinterval = config.DATA["options"]["wakeupinterval"]
     if config.DATA["plugins"]["wakeup_script_dir"].startswith("/"):
         self.wakeup_script_dir = \
             config.DATA["plugins"]["wakeup_script_dir"]
     else:
         self.wakeup_script_dir = params.path \
             + config.DATA["plugins"]["wakeup_script_dir"]
Exemplo n.º 15
0
def find(*args, **kwargs):
    """
    Wrapper around the API:
    - loads default config (check config module)
    - builds query from *args and **kwargs
    - passes that query into connection for searching

    """
    config.load_config(config.DEFAULT_CONFIG)
    connection_kwargs = {}
    if 'wt' in kwargs:
        connection_kwargs['wt'] = kwargs.pop('wt')
    return connection.search(query.Query(*args, **kwargs), **connection_kwargs)
Exemplo n.º 16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('command')
    parser.add_argument('command_arg')
    args = parser.parse_args()

    if args.command == 'compile':
        config = cf.load_config(args.command_arg)
        compileblog.compile_all(config)

    elif args.command == 'wpimport':
        import wpimport
        wpimport.main(args.command_arg)

    elif args.command == 'refresh':
        config = cf.load_config(args.command_arg)

        import twitter
        twitter.refresh_tweets(config)

    elif args.command == 'newpost':
        title = args.command_arg
        slug = utils.title_to_slug(title)
        now = datetime.utcnow()
        fname = 'drafts/{}-{}.md'.format(now.strftime('%Y-%m-%d-%H-%M'), slug)
        with open(fname, 'w') as f:
            f.write('---\ntitle: "')
            f.write(title)
            f.write('"\n---\n')

        import subprocess
        subprocess.call(['open', fname])

    elif args.command == 'edit' or args.command == 'editpost':
        # TODO
        pass
    elif args.command == 'publish':
        # TODO
        pass


    elif args.command == 'serve':
        config = cf.load_config(args.command_arg)
        config.is_dynamic = True
        server.serve(config)

    else:
        print("Unknown command " + args.command)
        sys.exit(1)
Exemplo n.º 17
0
    def reload(self, quiet=False):
        if self.brain.values and len(self.brain.values[0]):
            quiet = True

        if not quiet:
            self.brain.act('strokes out.')
        else:
            self.brain.act('strokes out.', False, self.secrets.owner)

        # TODO broken.
        #for channel in self.secrets.channels:
        #    name, attr = channel.popitem()
        #    if attr.primary:
        #        continue
        #    self.brain.brainmeats['channeling'].leave(name)

        self.active = False

        self.settings = settings = load_config('config/settings.yaml')
        self.secrets = secrets = load_config('config/secrets.yaml')

        import datastore
        import util
        import staff
        import autonomic
        import cortex
        import thalamus
        import id

        reload(datastore)
        reload(autonomic)
        reload(util)
        reload(staff)
        reload(cortex)
        reload(thalamus)
        reload(id)

        self.brain = cortex.Cortex(self, True)
        self.thalamus = thalamus.Thalamus(self, self.brain)

        self.brain.thalamus = self.thalamus
        self.active = True

        metacortex.cx = self.brain

        if not quiet:
            self.brain.act('comes to.')
        else:
            self.brain.act('comes to.', False, self.secrets.owner)
Exemplo n.º 18
0
def main(argv):
    args = parse_cmdline(argv)

    config.load_config(args.config)

    apply_cmdline_overwrites(args)

    # TODO: Path not expaned used before cache sanity-checked it
    back = backend.create(config.config['back'], config.config['cache']['dir'])
    cache = Cache(config.config['cache'], back)

    if config.config.getboolean('main', 'console'):
        CacheFSConsole(cache).cmdloop()

    cache.shutdown()
Exemplo n.º 19
0
def build_test_data(update_id=None, message_id=None, date=None, from_id=None, chat_id=None, text=None):
    config = load_config()
    telegram_config = config.get('telegram')
    if not telegram_config or not telegram_config.get('chat_id'):
        return
    entities = []
    if text.startswith('/'):
        entities.append({
            'type': 'bot_command',
            'offset': 0,
            'length': len(text),
        })
    return {
        'update_id': update_id or '123',
        'message': {
            'message_id': message_id or 111,
            'date': date or int(time.time()),
            'from': {
                'id': from_id or 222,
                'is_bot': False,
                'first_name': 'Test',
            },
            'entities': entities,
            'chat': {
                'id': chat_id or config['telegram']['chat_id'],
                'type': 'private',
            },
            'text': text or '/start',
        },
    }
Exemplo n.º 20
0
def set_defaults(docs):

    cfg = config.load_config()
    defaults = {
        '--region' : config.get_item(cfg, ('aws', 'configuration', 'region')),
        '--user' : config.get_item(cfg, ('ssh', 'user')),
        #'--password' : config.get_item(cfg, ('ssh', 'password')),
        '--key' : config.get_item(cfg, ('ssh', 'key_filename')),
        '--ait' : config.get_item(cfg, ('aws', 'configuration', 'instance_type')),
        '--ami' : config.get_item(cfg, ('aws', 'configuration', 'machine_image')),
        '--asg' : ','.join(config.get_item(cfg, ('aws', 'configuration', 'security_groups'))),
        '--akp' : config.get_item(cfg, ('aws', 'configuration', 'key_pair')),
        '--command' : config.get_item(cfg, ('command', 'command')),
    }

    for k, v in defaults.iteritems():
        if v:
            docs = re.sub(r'^(\s*{}=[A-Z]+\s+.+)\s*$'.format(k),
                          r'\1 [default: {}]'.format(v),
                          docs, flags=re.MULTILINE)

    argv = docopt.docopt(docs)

    # split lists
    for k in ['--hosts', '--asg']:
        if argv.has_key(k):
            if argv[k]:
                argv[k] = argv[k].split(',')

    return argv
Exemplo n.º 21
0
def main():
    try:
        config_file = sys.argv[1]
    except IndexError:
        config_file = "config.ini"

    try:
        config = load_config(config_file)
    except KeyError:
        _log.error("Error al obtener la configuración. Revisar el archivo " + config_file)

    projects = config.projects
    # Carga el último informe y lo mapea a los nuevos
    if config.last_report:
        map_new_to_old_projects(config, projects)

    # Ejecuta los comandos de los plugins
    if not config.skip_commands:
        exec_commands(projects)

    # Parsea los resultados
    parse_results(projects)

    # Guarda el informe en formato json
    result_json_filepath = config.output_folder + "/" + config.output_filename + ".json"
    save_projects(projects, result_json_filepath)

    # Guarda el informe en formato txt
    save_report(config, projects)

    # Guarda la ubicación del último informe en la config
    config.save_config(result_report_path=result_json_filepath)
Exemplo n.º 22
0
def get_master_key():
    master_key = ""
    if os.path.isfile(GLOBAL_CONFIG_FILEPATH):
        global_config_object = config.load_config(GLOBAL_CONFIG_FILEPATH)
        if "MAPAdmin" in global_config_object.sections():
            admin_items = config.load_user(global_config_object, "MAPAdmin")
            if "MAPILLARY_SECRET_HASH" in admin_items:
                master_key = admin_items["MAPILLARY_SECRET_HASH"]
            else:
                create_config = raw_input(
                    "Master upload key does not exist in your global Mapillary config file, set it now?")
                if create_config in ["y", "Y", "yes", "Yes"]:
                    master_key = set_master_key()
        else:
            create_config = raw_input(
                "MAPAdmin section not in your global Mapillary config file, set it now?")
            if create_config in ["y", "Y", "yes", "Yes"]:
                master_key = set_master_key()
    else:
        create_config = raw_input(
            "Master upload key needs to be saved in the global Mapillary config file, which does not exist, create one now?")
        if create_config in ["y", "Y", "yes", "Yes"]:
            config.create_config(GLOBAL_CONFIG_FILEPATH)
            master_key = set_master_key()

    return master_key
def pairwise_stats():
    cfg = load_config()
    dataset = create_dataset(cfg)
    dataset.set_shuffle(True)
    dataset.set_pairwise_stats_collect(True)

    num_images = dataset.num_images
    all_pairwise_differences = {}

    if cfg.mirror:
        num_images *= 2

    for k in range(num_images):
        print('processing image {}/{}'.format(k, num_images-1))

        batch = dataset.next_batch()
        batch_stats = batch[Batch.data_item].pairwise_stats
        for joint_pair in batch_stats:
            if joint_pair not in all_pairwise_differences:
                all_pairwise_differences[joint_pair] = []
            all_pairwise_differences[joint_pair] += batch_stats[joint_pair]

    stats = {}
    for joint_pair in all_pairwise_differences:
        stats[joint_pair] = {}
        stats[joint_pair]["mean"] = np.mean(all_pairwise_differences[joint_pair], axis=0)
        stats[joint_pair]["std"] = np.std(all_pairwise_differences[joint_pair], axis=0)

    save_stats(stats, cfg)
Exemplo n.º 24
0
def load_base_metadata():
    base_metadata_file, = load_config(('base_metadata_file',))

    with open(base_metadata_file, 'r') as jf:
        base_metadata = json.load(jf)

    return base_metadata
Exemplo n.º 25
0
def create_app():
    # init Flask
    app = Flask(__name__)

    # init ext of Flask
    bootstrap.init_app(app)
    login_manager.init_app(app)
    db_property.init_app(app)
    mail.init_app(app)
    moment.init_app(app)

    # Load config
    config = load_config()
    app.config.from_object(config)

    from application.views.auth.account import blueprint as auth_blueprint
    app.register_blueprint(auth_blueprint, url_prefix='/auth')

    from application.views.index import blueprint as index_blueprint
    app.register_blueprint(index_blueprint)

    from application.views.pages import blueprint as pages_blueprint
    app.register_blueprint(pages_blueprint)

    return app
Exemplo n.º 26
0
def create_app():
    """创建Flask app"""
    app = Flask(__name__)

    config = load_config()
    app.config.from_object(config)

    # CSRF protect
    CsrfProtect(app)

    if app.debug:
        DebugToolbarExtension(app)
    else:
        from .utils.sentry import sentry

        sentry.init_app(app, dsn=app.config.get('SENTRY_DSN'))

    # 注册组件
    register_db(app)
    register_routes(app)
    register_jinja(app)
    register_error_handle(app)
    register_uploadsets(app)

    # before every request
    @app.before_request
    def before_request():
        g.user = get_current_user()

    return app
Exemplo n.º 27
0
def get_connection():
    c = config.load_config()
    global __conn
    if not __conn:
        __conn = S3Connection(c["credentials"]["access_key"],
                              c["credentials"]["secret_key"])
    return __conn
Exemplo n.º 28
0
def parse_args():

    parser = argparse.ArgumentParser(description="LesStatic needs to be run in a valid LesStatic project folder (with a config.yaml) or with valid arguments")
    parser.add_argument('-i','--init',action='store_true',help='initialize new project')
    parser.add_argument('folder',metavar='FOLDER',nargs="?",help='Folder for project (undefined = current folder)')
    parser.add_argument('-p','--port',nargs='?')
    parser.add_argument('-s','--serve',action='store_true',help='Run server and rebuild on change')
    args=parser.parse_args()
    if args.init:
        init.init_project(args.folder)
        sys.exit(0)

    if args.folder != None:
        os.chdir(args.folder)
    has_config=load_config()
    if not has_config:
        print "Can't find configuration file"
        sys.exit(0)
    config['base_dir']=os.getcwd()
    if args.port!=None:
        config['port']=int(args.port)
    if args.serve:
        serve()
    else:
        build_site(parser)
def service(yaml_file):
    """
    Initializes a downstream event listener, an upstream event listener,
    and a threadpool to handle events from both. Also sets up a schdule
    if things need to be delayed.

    Runs in infinite loop until killed.
    Each loop iteration consists of checking the schedule, checking
    downstream, then checking upstream. Sleep if no action taken.

    @param yaml_file - String location to configuration

    """
    # Get configuraion
    _config = config.load_config(yaml_file)

    numthreads = int(_config['daemon']['numthreads'])
    sleep = int(_config['daemon']['sleep'])

    # Register the signal handler to kill threads
    signal.signal(signal.SIGINT, thread.stop_threads)
    signal.signal(signal.SIGTERM, thread.stop_threads)

    schedule = list()
    pool = thread.WorkerPool(numthreads)

    downstream_remote = gerrit.Remote(_config['gerrit'])
    downstream = downstream_remote.SSHStream()
    downstream.start()

    upstream_remote = gerrit.Remote(_config['upstream'])
    upstream = upstream_remote.SSHStream()
    upstream.start()

    while True:
        downstream_active = False
        upstream_active = False

        # Check schedule and add events to event pool
        if len(schedule) > 0 and time.time() > schedule[0][0]:
            t, func, args, kwargs = schedule.pop(0)
            pool.add_task(func, *args, **kwargs)
            continue

        # Check for new events
        downstream_active = pull_downstream(_config, downstream,
                                            pool, schedule, yaml_file)
        upstream_active = pull_upstream(_config, upstream,
                                        pool, schedule, yaml_file)

        if downstream_active:
            logger.debug("Downstream is active")
        if upstream_active:
            logger.debug("Upstream is active")
        logger.debug("Schedule len: %s" % len(schedule))

        # Sleep if no events recieved.
        if not downstream_active and not upstream_active:
            time.sleep(sleep)
            continue
Exemplo n.º 30
0
    def setUpClass(cls):
        super(MuranoTestsCore, cls).setUpClass()

        cfg.load_config()
        cls.keystone = keystoneclient.Client(username=CONF.murano.user,
                                             password=CONF.murano.password,
                                             tenant_name=CONF.murano.tenant,
                                             auth_url=CONF.murano.auth_url)
        murano_url = cls.keystone.service_catalog.url_for(
            service_type='application_catalog', endpoint_type='publicURL')
        cls.murano_url = murano_url if 'v1' not in murano_url else "/".join(
            murano_url.split('/')[:murano_url.split('/').index('v1')])
        cls.heat_url = cls.keystone.service_catalog.url_for(
            service_type='orchestration', endpoint_type='publicURL')
        cls.murano_endpoint = cls.murano_url + '/v1/'
        cls.keyname = CONF.murano.keyname
Exemplo n.º 31
0
import twitter
import reddit
from formatting import format_tweets
from config import load_config

# Initalization
config = load_config()
twitter_api = twitter.twitter_api(config["twitter"])
reddit_api = reddit.reddit_api(config["reddit"])
most_recent_id = twitter.last_tweet_id(twitter_api, config["twitter"])


def handler(event, context):
    global most_recent_id

    # In case of test, only get the lat tweet and display it, but don't submit
    if "test" in event:
        most_recent_id -= 1

    # Get new tweets and update id of the most recent tweet
    new_tweets, most_recent_id = twitter.last_tweets(twitter_api,
                                                     config["twitter"],
                                                     since=most_recent_id)
    if new_tweets:
        # Format title and URL for reddit
        titles_urls = format_tweets(new_tweets, config["format"])

        # Submit them on reddit, reversed to submit most recent one last
        permalinks = ["None"] * len(new_tweets)
        if "test" not in event:
            permalinks = reddit.submit(reddit_api, config["reddit"],
Exemplo n.º 32
0
    if not os.path.exists(logdir):
        os.makedirs(logdir)


def get_logger(name='gerrit-python-tools'):
    """
    Returns a logger for logging.

    @param name - String name of the logger. gerrit-python-tools by default.
    @returns - logger

    """
    return logging.getLogger(name)

logging_conf_file = '/etc/gerrit-python-tools/logging.yaml'
logging_conf = config.load_config(logging_conf_file, default=DEFAULT_CONFIG)

# Get log path/file from config
logfile = logging_conf['file']
init_logdir(logfile)

# Get log level from config
loglevel = logging_conf['level']
loglevel = getattr(logging, loglevel.upper())

# Create log format from config
logformat = logging_conf['format']
logformatter = logging.Formatter(logformat)

# Create log handler
loghandler = logging.handlers.TimedRotatingFileHandler(
Exemplo n.º 33
0
import gym
from config import load_config
from modules import DDPG, to_scalar
import numpy as np
import imageio


cf = load_config('config/config.py')
env = gym.make('Bet-v0')

cf.stat_dim = env.observation_space.shape[0]
cf.action_dim = env.action_space.spaces[0].n + 1 # because action_space is tuple

print('Trying environment Bet-v0')
print('State Dimensions: ', cf.state_dim)
print('Action Dimensions: ', cf.action_dim)
print('Action low: ', env.action_space.spaces[1].low)
print('Action high: ', env.action_space.spaces[1].high)

model = DDPG(cf)
model.load_models()

for epi in range(1):
    s_t = env.reset()
    avg_reward = 0
    while True:
        a_t = model.sample_action(s_t)
        s_t, r_t, done, info = env.step(a_t)
        avg_reward += r_t

        if done:
Exemplo n.º 34
0
    V = vehicle.Vehicle()
    client_ip = client_ip or cfg.CLIENT_IP

    # camA = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM,client_ip=client_ip)
    # camB = RS_D435i(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE,client_ip=client_ip)

    #V.add(camA, outputs=['cam/image_array_a'], threaded=True)
    #V.add(camB, outputs=['cam/image_array_b', 'cam/image_array_c'], threaded=True)

    if to_control:
        V.add(NaiveController(),
              outputs=['throttle', 'steering'],
              threaded=True)
        V.add(Sender(), inputs=['throttle', 'steering'], threaded=True)
    else:
        V.add(Receiver(client_ip), threaded=True)

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)


if __name__ == '__main__':
    cfg = load_config('myconfig.py')

    parser = argparse.ArgumentParser()
    parser.add_argument('--ip', required=False, default=None, type=str)
    parser.add_argument('-c', '--control', required=False, action='store_true')
    args = parser.parse_args()
    print(args.control)
    drive(cfg, args.ip, args.control)
                                              shuffle=True,
                                              num_workers=0)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=params["batch_size"],
                                             shuffle=True,
                                             num_workers=0)
    return trainloader, testloader


seeds = [
    42, 24365517, 6948868, 96772882, 58236860, 7111973, 5016789, 19469290,
    2384676, 10878630, 26484779, 78421105, 46346829, 65958905, 69757054,
    49361965, 84089155, 85116270, 8707926, 26474437, 46028029
]
if __name__ == '__main__':
    params = config.load_config()

    # select the available device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    Logger.cluster_log("DEVICE: {}".format(device))

    # initialize the logger
    logger = Logger(params=params)

    if params["load"]:
        # store the relevant input parameters
        steps = params["num_steps"]
        exp_folder = params["exp_folder"]
        # load all the parameters from the folder
        params = logger.load_parameters()
        params['num_steps'] = steps
Exemplo n.º 36
0
def main(args):
    # Load the config from the config file:
    config = load_config(args.config_file)
    # Load providers:
    provider = args.provider if args.provider else  config['network']['provider']
    provider_timeout_secs = config['network'].get("provider_timeout_secs", 60)
    # Open web3 provider from the arguments provided:
    w3 = Web3(Web3.HTTPProvider(provider, request_kwargs={'timeout': provider_timeout_secs}))
    # Load the pricefeed contract:
    pricefeedcontracts = pricefeed(w3, config)
    # Get account:
    account = config["account"]["address"]
    # Get minimum secs between update requests (for each given feed contract):
    min_secs_between_request_updates = config["account"].get("min_secs_between_request_updates", 15*60)
    # Get gas limit, defaults to 4 million units:
    gas = config["network"].get("gas", 4000000)
    # Get gas price, defaults to "estimate_medium":
    gas_price = config["network"].get("gas_price", "estimate_medium")
    # Get HTTP-JSON-RPC waiting timeout (in secs):
    tx_waiting_timeout_secs = config["network"].get("tx_waiting_timeout_secs", 130)
    # Get HTTP-JSON-RPC polling latency timeout (in secs):
    tx_polling_latency_secs = config["network"].get("tx_polling_latency_secs", 13)
    # Load the WRB contract:
    wrbcontract = wrb(w3, config)

    # Try connecting to JSON-RPC provider and get latest block:
    try:
      current_block = w3.eth.blockNumber
      print(f"Connected to {provider}")
    except Exception as ex:
      print(f"Fatal: connection failed to {provider}: {ex}")
      exit(-1)

    print(f"Current block: {current_block}")

    if not isinstance(gas_price, int):
      if gas_price == "estimate_medium" and w3.eth.chainId == 1:
        from web3 import middleware
        from web3.gas_strategies.time_based import medium_gas_price_strategy

        # Transaction mined within 5 minutes
        w3.eth.setGasPriceStrategy(medium_gas_price_strategy)

        # Setup cache because get price is slow (it needs 120 blocks)
        w3.middleware_onion.add(middleware.time_based_cache_middleware)
        w3.middleware_onion.add(middleware.latest_block_based_cache_middleware)
        w3.middleware_onion.add(middleware.simple_cache_middleware)

        gas_price = None
      else:
        if gas_price == "estimate_medium" and w3.eth.chainId != 1:
          print(f"Invalid gas price: {gas_price}. \"estimate_medium\" can only be used for mainnet (current id: {w3.eth.chainId})")
        else:
          print(f"Invalid gas price: {gas_price}. `gas_price` can only be an integer or \"estimate_medium\".")
        exit(1)

    # Call main loop
    log_loop(
      w3,
      wrbcontract,
      pricefeedcontracts,
      account,
      gas,
      gas_price,
      args.loop_interval_secs,
      tx_waiting_timeout_secs,
      tx_polling_latency_secs,
      min_secs_between_request_updates
    )
Exemplo n.º 37
0
import json
import uuid

from google.appengine.api import urlfetch
from urllib import urlencode

from flask import request
from config import load_config, dict_formation
from services.auth_services import AuthServices
from models.auth_datastore import Tokens

CONFIG = load_config()


class GoogleAuthServices(object):
    @staticmethod
    def get_access_code(scope, prompt, redirect='email'):
        if redirect == 'email':
            redirect_uri = CONFIG.get('redirect_uris')[0]
        elif redirect == 'contact':
            redirect_uri = CONFIG.get('redirect_uris')[2]
        params = dict_formation(
            client_id=CONFIG.get('client_id'),
            redirect_uri=redirect_uri,
            scope=scope,
            access_type='offline',
            include_granted_scopes='true',
            response_type='code',
            prompt=prompt,
        )
Exemplo n.º 38
0
                        metavar='/path/to/coco_class_names.json',
                        help='Path to class json file')
    args = parser.parse_args()

    assert args.threshold >= 0.0 and args.threshold < 1.0, \
        'Invalid threshold value {} given'.format(args.threshold)

    if args.label is not None:
        assert os.path.exists(args.label)
        fp = open(args.label, 'r')
        label = json.load(fp)
        fp.close()
    else:
        label = None

    model = get_model_wrapper(load_config(args.model_cfg))
    if args.weights:
        model.load_weights(args.weights)

    image_files = sorted([
        os.path.join(args.image_dir, x) for x in os.listdir(args.image_dir)
        if x.lower().endswith('.jpg') or x.lower().endswith('.png')
        or x.lower().endswith('.bmp')
    ])

    dst_dir = os.path.join(args.image_dir, 'results')
    if not os.path.exists(dst_dir):
        os.mkdir(dst_dir)

    for image_file in image_files:
        filename = os.path.basename(image_file)
Exemplo n.º 39
0
    log_path.mkdir(parents=True, exist_ok=True)
    logger = logging.getLogger('__name__')
    logger.setLevel(file_log_level)

    logger.debug(f"[start_worker] file log level: {file_log_level}")
    log_file = 'log'
    file_log = {
        "launch_info": dict(),
        "time_start": str(datetime.datetime.utcnow())
    }

    logger_fh = logging.FileHandler(posixpath.join(log_path, log_file))
    formatter_fh = logging.Formatter('%(message)s')
    logger_fh.setFormatter(formatter_fh)
    logger.addHandler(logger_fh)

    worker_type, worker_config, file_log_config = load_config(file_log)
    file_log["launch_info"]["build_version"] = build_version
    file_log["launch_info"]["video_download_dir"] = worker_config.get(
        "video_download_dir", None)

    file_log["launch_info"]["clean_folder"] = worker_config.get(
        "clean_folder", None)
    file_log["launch_info"]["smart_download"] = worker_config.get(
        "smart_download", None)
    file_log["launch_info"]["log_file"] = worker_config.get("log_file", None)

    logger.info(json.dumps(file_log_config))
    worker = use_worker(worker_type)(**worker_config)
    worker.run()
Exemplo n.º 40
0
    #     # have to specify it.
    data, trainIndices, testIndices, __ignore__ = pickle.load(
        f)  # The last parameter (proportion analyzed? is not used )

# loading meta data / i.e. training & test files
Data = pd.read_hdf(
    basefolder + folder + '/data-' + Task + '/CollectedData_' + scorer + '.h5',
    'df_with_missing')

# "/data-"+Task+'-labelledby'+scorer+'/'
for shuffle in Shuffles:
    for trainFraction in TrainingFraction:
        experimentname = Task + date + '-trainset' + str(
            int(trainFraction * 100)) + 'shuffle' + str(shuffle)
        # loading config file:
        cfg = load_config(basefolder + experimentname + '/test/' +
                          "pose_cfg.yaml")
        modelfolder = basefolder + experimentname
        ##################################################
        # Load and setup CNN part detector
        ##################################################

        # Check which snap shots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split('.')[0]
            for fn in os.listdir(basefolder + experimentname + '/train/')
            if "index" in fn
        ])
        increasing_indices = np.argsort(
            [int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]
Exemplo n.º 41
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 使用方法 python md2html.py  filename

import os, sys
sys.path.append("..")
import markdown
from model.blog_article import Article
import config
cfg = config.load_config("config/config.json")
model_article = Article(cfg)

head = """<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<style type="text/css">
code {
  color: inherit;
  background-color: rgba(0, 0, 0, 0.05);
}
</style>
</head>
<body>
"""

foot = """
</body>
</html>
"""
Exemplo n.º 42
0
class Signal:
    base_config = config.load_config('params.cfg')

    def __init__(self, file_name, data_dir=''):
        # Contains data about the signal, and loads its config
        self.file_name = file_name
        self.file_path = os.path.join(data_dir, file_name)

        # parse signal first - to get errors before config is created.
        self.x_full, self.y_full, self.delta = dio.get_x_y_delta(
            self.file_path, v=False)
        # create config if not created
        if self.file_name not in self.base_config:
            self.base_config.add_section(self.file_name)
        self.config = self.base_config[self.file_name]

        # process data
        self.x, self.y = self.preprocess_xy()

        self.base_config.write()

    def preprocess_xy(self, moving_y_offset_wavelengths=None, use_abs=False):
        """Returns preprocessed x and y, based on the config"""
        x, y = self.x_full, self.y_full
        # remove boundary signals
        x_limits = self.get_x_lims()

        left_lim, right_lim = np.argmax(x > x_limits[0]), np.argmax(
            x > x_limits[1])
        if right_lim == 0:
            self.set_x_lims((x_limits[0], -1))
            print("Error: steps_lim_high > last step. Set to -1.")
        x = x[left_lim:right_lim]
        y = y[left_lim:right_lim]

        # shift to around x axis
        self.y_offset = np.mean(y)
        norm_y = y - self.y_offset
        if use_abs:
            norm_y = np.fabs(norm_y)
        self.config['y_offset'] = str(self.y_offset)
        self.x = x
        self.y = norm_y

        if moving_y_offset_wavelengths is not None:
            self.y = self.y - self.get_moving_y_offset(
                moving_y_offset_wavelengths)

        return x, norm_y

    def get_moving_y_offset(self, wavelengths=5):
        """
        Returns a NumPy array that can be used as a y-offset (e.g. y = y - y_offset).
        Averages out the prior and next n wavelengths, where n is the passed parameter.
        The distance of a wavelength is calculated as a simple mean distance between peaks).
        """
        max_x, max_y = self.get_local_maxes()
        average_steps_between_max = np.mean(np.ediff1d(max_x))

        y_offsets = np.zeros_like(self.y)

        steps_buffer = int(wavelengths * average_steps_between_max)

        for i in range(steps_buffer, len(self.x) - steps_buffer):
            # TODO: Optimise (probably can be done with NumPy)
            _x, _y = self.x[i], self.y[i]
            _steps_buffer = min(i, len(self.x) - i, steps_buffer)
            _x_i_start = np.argmax(self.x > _x - _steps_buffer)
            _x_i_end = np.argmax(self.x > _x + _steps_buffer)

            y_offsets[i] = np.mean(self.y[_x_i_start:_x_i_end + 1])

        return y_offsets

    def get_x_lims(self):
        steps_lim_low = self.config.getint('steps_lim_low')
        steps_lim_high = self.config.getint('steps_lim_high')
        if steps_lim_high == -1:
            steps_lim_high = max(self.x_full) - 1

        return steps_lim_low, steps_lim_high

    def set_x_lims(self, x_lims):
        self.config['steps_lim_low'] = str(x_lims[0])
        self.config['steps_lim_high'] = str(x_lims[1])
        self.base_config.write()

    def get_x_centre(self):
        x_centre = self.config.getint('x_centre')
        return x_centre

    def set_x_centre(self, x_centre):
        self.config['x_centre'] = str(x_centre)
        self.base_config.write()

    def get_y_offset(self):
        y_offset = self.config.getfloat('y_offset')
        if y_offset == 0:
            y_offset = np.mean(self.y)
        return y_offset

    def set_y_offset(self, y_offset):
        self.config['y_offset'] = str(y_offset)
        self.base_config.write()

    def get_local_maxes(self, use_full=False, strict=False, x_y=None):
        """Passing x_y uses those. Else signal's .x and .y attributes will be used (unless use_full is True)"""
        if x_y is None:
            if use_full:
                x, y = self.x_full, self.y_full
                y_offset = 0
            else:
                x, y = self.x, self.y
                y_offset = self.y_offset
        else:
            x, y = x_y
            y_offset = 0

        if strict:
            # take only those greater than both adjacent
            maxes = sps.argrelextrema(y, np.greater)[0]
        else:
            # take all greater/equal to both sides
            maxes = sps.argrelextrema(y, np.greater_equal)[0]

        # check that max_y values > 0
        maxes = maxes[y[maxes] > 0]

        # filter capped values on both sides
        maxes = maxes[y[maxes] != 5 - y_offset]

        max_x = x[maxes]
        max_y = y[maxes]

        return max_x, max_y

    def find_best_fit_gaussian(self,
                               also_use_scipy=True,
                               save=True,
                               fix_mean=False,
                               x_y=None):
        """
        Returns scipy_fit, calculated_fit
            each fit = (amplitude, mean, and sigma).

        SciPy fit generated using scipy.optimise, optimising all of amplitude, mean, and sigma.
        Calculated fit calculates the mean and sigma using formulae, and takes the max(y) as amplitude.

        Uses get_local_maxes to get x and y unless x_y is passed.
        TODO: Create fix_mean
        """
        if x_y is None:
            x, y = self.get_local_maxes()
        else:
            x, y = x_y
        y_max = np.max(y)
        amplitude = np.max(y)
        mean = np.sum(x * y) / np.sum(y)
        sigma = np.sqrt(np.abs(np.sum(y * (x - mean)**2) / np.sum(y)))

        if also_use_scipy:
            if fix_mean is not False:
                # TODO: Allow fixed mean optimisation
                pass
            else:
                # fit_params is (amplitude, mean, sigma)
                gaussian_fit = lambda fit_params, x: fit_params[0] * np.exp(-(
                    x - fit_params[1])**2 / (2 * fit_params[2]**2))
                err_func = lambda fit_params, x, y: gaussian_fit(
                    fit_params, x) - y  # Distance to the target function
                initial_parameters = [y_max, mean, sigma
                                      ]  # Initial guess for the parameters
                fitted_params, success = optimize.leastsq(
                    err_func, initial_parameters[:], args=(x, y))
                # print(fitted_params, success)

                if save:
                    self.config['gaussian_fit_amplitude'] = str(
                        fitted_params[0])
                    self.config['gaussian_fit_mean'] = str(fitted_params[1])
                    self.config['gaussian_fit_sigma'] = str(fitted_params[2])

                return fitted_params, (amplitude, mean, sigma)

        else:
            # calculate gaussian fit from points
            return amplitude, mean, sigma

    def find_best_fit_lorentzian(self, save=True, fix_mean=False, x_y=None):
        """
        Returns scipy_fit: (amplitude, mean, and gamma).

        SciPy fit generated using scipy.optimise, optimising all of amplitude, mean, and gamma.
        Calculated fit calculates the mean and sigma^2 using formulae, and takes the max(y) as amplitude.

        Uses get_local_maxes to get x and y unless x_y is passed.
        TODO: Create fix_mean
        """
        if x_y is None:
            x, y = self.get_local_maxes()
        else:
            x, y = x_y
        y_max = np.max(y)

        mean = np.sum(x * y) / np.sum(y)
        sigma = np.sqrt(np.abs(np.sum(y * (x - mean)**2) / np.sum(y)))

        if fix_mean is not False:
            # TODO: Allow fixed mean optimisation
            pass
        else:
            # fit_params is (amplitude, mean, gamma)
            lorentzian_fit = lambda fit_params, x: fit_params[0] / (1 + (
                (x - fit_params[1]) / fit_params[2])**2)
            err_func = lambda fit_params, x, y: lorentzian_fit(
                fit_params, x) - y  # Distance to the target function
            initial_parameters = [y_max, mean,
                                  sigma]  # Initial guess for the parameters
            fitted_params, success = optimize.leastsq(err_func,
                                                      initial_parameters[:],
                                                      args=(x, y))
            if save:
                self.config['lorentzian_fit_amplitude'] = str(fitted_params[0])
                self.config['lorentzian_fit_mean'] = str(fitted_params[1])
                self.config['lorentzian_fit_gamma'] = str(fitted_params[2])

            return fitted_params

    def find_best_fit_exponential(self, save=True, x_y=None, beating=True):
        """
        Returns scipy_fit: (amplitude, mean, decay_constant, [beating_freq]). (beating_freq

        SciPy fit generated using scipy.optimise, optimising all of amplitude, mean, and gamma.
        Calculated fit calculates the mean and sigma^2 using formulae, and takes the max(y) as amplitude.

        Uses get_local_maxes to get x and y unless x_y is passed.
        """
        if x_y is None:
            x, y = self.get_local_maxes()
        else:
            x, y = x_y
        y_max = np.max(y)

        mean = np.sum(x * y) / np.sum(y)
        sigma = np.sqrt(np.abs(np.sum(y * (x - mean)**2) / np.sum(y)))

        if beating:
            # fit_params is (amplitude, mean, decay_constant, beating_freq)
            exp_fit = lambda fit_params, x: fit_params[0] * np.exp(-fit_params[2] * np.fabs(x - fit_params[1])) * \
                                            np.fabs(np.cos(2 * np.pi * (x - fit_params[1]) * fit_params[3]))
            err_func = lambda fit_params, x, y: exp_fit(
                fit_params, x) - y  # Distance to the target function
            initial_parameters = [y_max, mean, 1 / sigma, 1 / (4 * sigma)]
            fitted_params, success = optimize.leastsq(err_func,
                                                      initial_parameters[:],
                                                      args=(x, y))
            # fitted_params = initial_parameters
            if save:
                self.config['exponential_fit_amplitude'] = str(
                    fitted_params[0])
                self.config['exponential_fit_mean'] = str(fitted_params[1])
                self.config['exponential_fit_decay_constant'] = str(
                    fitted_params[2])
                self.config['exponential_fit_beating_freq'] = str(
                    fitted_params[3])

        else:
            # fit_params is (amplitude, mean, decay_constant)
            exp_fit = lambda fit_params, x: fit_params[0] * np.exp(-fit_params[
                2] * np.fabs(x - fit_params[1]))
            err_func = lambda fit_params, x, y: exp_fit(
                fit_params, x) - y  # Distance to the target function
            initial_parameters = [y_max, mean, 1 / sigma]
            fitted_params, success = optimize.leastsq(err_func,
                                                      initial_parameters[:],
                                                      args=(x, y))
            if save:
                self.config['exponential_fit_amplitude'] = str(
                    fitted_params[0])
                self.config['exponential_fit_mean'] = str(fitted_params[1])
                self.config['exponential_fit_decay_constant'] = str(
                    fitted_params[2])

        return fitted_params

    def find_step_size(self, known_wavelength=546.22E-9, bins=1):
        # dt = lambda / 2
        # displacement per step = peaks / steps * lambda / 2
        bin_width = int(np.floor(len(self.x) / bins))
        peaks_list = []
        steps_list = []

        # all remainder is ignored (e.g. data 1000 to 1050 if 1050 split into 100 bins)
        for bin_i in range(bins):
            offset = bin_width * bin_i
            right_lim = offset + bin_width - 1
            # print(offset, right_lim)
            max_x, max_y = self.get_local_maxes(x_y=(self.x[offset:right_lim],
                                                     self.y[offset:right_lim]))
            _peaks = len(max_x)
            _steps = self.x[right_lim] - self.x[offset]
            peaks_list.append(_peaks)
            steps_list.append(_steps)
        # print(peaks_list, steps_list)
        steps_list = np.array(steps_list)
        peaks_list = np.array(peaks_list)
        dps = peaks_list / steps_list * known_wavelength / 2
        # print('DPS: %.4e, pm %.1e '% (dps.mean(), np.std(dps)))

        return dps, bin_width

    @staticmethod
    def as_string(*args, fmt=('%i', '%.18e')):
        # returns a string that can be pasted into Excel/Origin/other stuff (tab as delimiter)
        _s = io.BytesIO()
        data = np.column_stack(args)
        np.savetxt(_s, data, fmt=fmt, delimiter='\t')
        return _s.getvalue().decode()

    def get_steps_between_peaks(self):
        """
        Returns steps_data and dps_data, where
            steps_data = (steps, unique_steps_between_peaks, unique_steps_counts)

        Calculates the number of steps between each peak and the next
        (e.g. if 5 peaks found, there will be 4 "steps between peaks", and the sum of unique steps_count will be 4)
        unique_steps_between_peaks is an array of multiples of self.delta.

        Filters out steps between peaks that are < 0.3 or > 1.7 times the modal steps count.
        1.7 chosen to avoid the small second peak at 2 times (probably due to single missed peaks), and
        0.3 chosen to preserve symmetry about the peak.
        """
        max_x, max_y = self.get_local_maxes()
        full_steps = np.ediff1d(max_x)
        # _full_mean, _full_std = np.mean(full_steps), np.std(full_steps)
        _full_count = len(full_steps)

        unique_steps_between_peaks, unique_steps_counts = np.unique(
            full_steps, return_counts=True)

        _filter = np.logical_and(
            full_steps <
            unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 1.7,
            full_steps >
            unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 0.3)
        # 1.7 chosen as filter, as there seems to be another peak ~2* (probably due to single missed peaks)
        # 1.7 avoids the start of the gaussian at 2*

        if not _filter.all():
            steps = full_steps[_filter]
            # print(unique_steps_between_peaks[np.argmax(unique_steps_counts)])
            _filtered_count = len(steps)
            _counts = (_full_count, _filtered_count,
                       _full_count - _filtered_count)
            # print('Original Count: %s, Filtered Count: %s, Excluded Count: %s' % _counts)
            # print('Filtered:', full_steps[np.invert(_filter)])
            unique_steps_between_peaks, unique_steps_counts = np.unique(
                steps, return_counts=True)
        else:
            steps = full_steps

        return steps, unique_steps_between_peaks, unique_steps_counts

    def get_motor_step_dps_per_peak(self, known_wavelength):
        """
        Takes in a known wavelength (in metres) to find motor step size (calibration).
        Returns steps_data and dps_data, where
            steps_data = (steps, unique_steps_between_peaks, unique_steps_counts)
            dps_data = (_dpses, unique_dpses, unique_dpses_counts, dps_mean, dps_std)

        Finds the number of steps between maxima, then finds the corresponding displacement per step (dps).
        If there are 5 maxima points, there will be 4 data points (4 'steps between maxima' to 4 'dps').
        Returns the mean and std for dps. Other returned information (such as steps_data) is returned for plotting use.
        """
        steps, unique_steps_between_peaks, unique_steps_counts = self.get_steps_between_peaks(
        )

        _dpses = known_wavelength / (2 * steps)
        dps_mean, dps_std = np.mean(_dpses), np.std(_dpses)
        unique_dpses, unique_dpses_counts = np.unique(_dpses,
                                                      return_counts=True)
        print('DPS: %s, DPS std dev: %s' % (dps_mean, dps_std))

        steps_data = (steps, unique_steps_between_peaks, unique_steps_counts)
        dps_data = (_dpses, unique_dpses, unique_dpses_counts, dps_mean,
                    dps_std)
        return steps_data, dps_data

    def get_motor_step_dps_with_fourier(self, known_wavelength):
        frequencies, magnitudes = self.get_frequencies_with_fourier(fit=False)

        # steps between peaks = 1 wavelength = 1 / freq
        # dps = known_wavelength / (2 * 1 wavelength) = freq * known_wavelength / 2
        _dpses = frequencies * known_wavelength / 2

        scipy_fit, calc_fit = self.find_best_fit_gaussian(x_y=(_dpses,
                                                               magnitudes))
        print('DPS: Mean: %.4e, std: %.4e' % (scipy_fit[1], scipy_fit[2]))
        return _dpses, scipy_fit, frequencies, magnitudes

    def get_frequencies_with_fourier(self,
                                     freq_limits=(2e-3, 1.5e-2),
                                     fit=True):
        fourier = np.fft.fft(self.y)
        freqs_full = np.fft.fftfreq(self.y.size, d=self.delta)

        freq_filter = np.where(
            np.logical_and(freqs_full >= freq_limits[0],
                           freqs_full <= freq_limits[1]))
        frequencies = freqs_full[freq_filter]
        magnitudes = np.abs(fourier[freq_filter])

        if fit:
            scipy_fit, calc_fit = self.find_best_fit_gaussian(x_y=(frequencies,
                                                                   magnitudes))
            return frequencies, magnitudes, scipy_fit

        return frequencies, magnitudes

    def get_investigation_data(self,
                               gamma,
                               dps,
                               gamma_err=0,
                               dps_err=0,
                               fit_type='exponential'):
        """Pass the standard deviation of the gaussian fit in terms of motor_steps
        and the displacement per motor step"""
        if fit_type == 'exponential':
            coherence_length = 2 * np.log(2) / (gamma / dps)

            # fourier transform of an exponential decay with decay constant g = lorentzian with hwhm g
            # decay constant = 1/g
            # Fourier(exp(-2pi k0 x)) = (1/pi)(k0 / (k^2 + k0^2))
            # 2 pi k0 = g, k0 = hwhm = g / (2 pi)
            # fwhm = g / pi (*c)
            spectral_width_hz = (gamma / dps) / np.pi * constants.c

        elif fit_type == 'lorentzian':
            coherence_length_in_motor_steps = 2 * gamma
            coherence_length = coherence_length_in_motor_steps * dps  # in metres
            # TODO: change this to in terms of the exponential decay.
            # but the lorentzian curve fit on the interferogram has no physical meaning anyway
            spectral_width_hz = constants.c / (np.pi * coherence_length)

        elif fit_type == 'gaussian':
            # gamma is actually sigma
            coherence_length_in_motor_steps = 2 * np.sqrt(
                2 * np.log(2)) * gamma
            coherence_length = coherence_length_in_motor_steps * dps  # in metres

            # spectral_width_hz = constants.c / (np.pi * coherence_length)
            # new sigma is pi / sigma
            # idk why it's 1/ (pi * newsigma) and not pi / newsigma
            # TODO: Figure out why gaussian spec width is so narrow.
            spectral_width_hz = constants.c * 2 * np.sqrt(
                2 * np.log(2)) / (np.pi * gamma * dps)
            # print("%.4e, %.4e" % (constants.c * 2 * np.sqrt(2 * np.log(2)) * np.pi / (gamma * dps), constants.c / (np.pi * coherence_length)))

        # steps between peaks = 1 wavelength = 1 / freq
        # dps = known_wavelength / (2 * 1 wavelength) = freq * known_wavelength / 2
        frequencies_per_motor_step, magnitudes = self.get_frequencies_with_fourier(
            fit=False)
        wavelengths = 2 * dps / frequencies_per_motor_step
        scipy_fit, calc_fit = self.find_best_fit_gaussian(x_y=(wavelengths,
                                                               magnitudes))
        wavelengths_mean, wavelengths_std = scipy_fit[1], scipy_fit[2]
        mean_wavelength = wavelengths_mean
        spectral_width_m = mean_wavelength**2 / constants.c * spectral_width_hz

        frequencies = constants.c / wavelengths

        (amplitude, mean,
         gamma) = self.find_best_fit_lorentzian(x_y=(frequencies, magnitudes))
        scipy_fit = (amplitude, mean, gamma)
        frequencies_mean, frequencies_std = scipy_fit[1], scipy_fit[2]

        ## This section plots the fits to the FT
        # plt.plot(frequencies, magnitudes, '.')
        #
        # x_min, x_max = plt.xlim()
        # lorentzian_fit = lambda fit_params, x: fit_params[0] / (1 + ((x - fit_params[1]) / fit_params[2]) ** 2)
        # fit_x = np.linspace(x_min, x_max, 10000)
        # optimised_lorentzian_fit = lorentzian_fit(scipy_fit, fit_x)
        # fwhm = 2 * scipy_fit[2]
        # plt.plot(fit_x, optimised_lorentzian_fit, 'k', label='SciPy fit (Lorentzian)\nFWHM: %.4e' % fwhm)
        #
        # gaussian_fit = lambda fit_params, x: fit_params[0] * np.exp(-(x - fit_params[1]) ** 2 / (2 * fit_params[2] ** 2))
        # scipy_fit, calc_fit = self.find_best_fit_gaussian(also_use_scipy=True, x_y=(frequencies, magnitudes))
        # optimised_gaussian_fit = gaussian_fit(scipy_fit, fit_x)
        # fwhm = 2 * np.sqrt(2 * np.log(2)) * scipy_fit[2]
        # plt.plot(fit_x, optimised_gaussian_fit, 'g', label='SciPy fit (G)\nFWHM: %.4e' % fwhm)
        # plt.legend()
        #
        # plt.show()

        print('With Fourier:')
        print('Spectral Width (m): %.5e' % spectral_width_m)
        print('Coherence length: %.5e' % coherence_length)
        print('Mean frequencies: %.5e pm %.5e' %
              (frequencies_mean, frequencies_std))
        print('Mean wavelength: %.5e pm %.5e' %
              (mean_wavelength, wavelengths_std))

        steps, unique_steps_between_peaks, unique_steps_counts = self.get_steps_between_peaks(
        )
        distances = dps * steps
        # distances_mean, distances_std = np.mean(distances), np.std(distances)
        wavelengths = distances * 2
        wavelengths_mean, wavelengths_std = np.mean(wavelengths), np.std(
            wavelengths)

        mean_wavelength = wavelengths_mean

        frequencies = constants.c / wavelengths
        frequencies_mean, frequencies_std = np.mean(frequencies), np.std(
            frequencies)

        spectral_width_m = mean_wavelength**2 / constants.c * spectral_width_hz
        print("With Step calculation")
        print('Spectral Width (Hz): %.5e' % spectral_width_hz)
        print('Spectral Width (m): %.5e' % spectral_width_m)

        if gamma_err == 0 and dps_err == 0:
            print('Coherence length: %.5e' % coherence_length)
            print('Spectral width (Hz): %.5e' % spectral_width_hz)
            print('Mean frequencies: %.5e pm %.5e' %
                  (frequencies_mean, frequencies_std))
            print('Mean wavelength: %.5e pm %.5e' %
                  (mean_wavelength, wavelengths_std))
        else:
            print(dps_err, 'asdasdasfaegsegeg')
            coherence_length_err = dps_err / dps * coherence_length
            spectral_width_err = np.sqrt(
                (coherence_length_err /
                 (constants.c * mean_wavelength**2))**2 +
                (2 * coherence_length / (constants.c * mean_wavelength**3))**2)
            print(mean_wavelength**2 / coherence_length, 'dlambda')
            print('Coherence length: %.5e pm %.5e' %
                  (coherence_length, coherence_length_err))
            print('Spectral width (Hz): %.5e pm %.5e' %
                  (spectral_width_hz, spectral_width_err))
            print('Mean frequencies: %.5e pm %.5e' %
                  (frequencies_mean, frequencies_std))
            print('Mean wavelength: %.5e pm %.5e' %
                  (mean_wavelength, wavelengths_std))

        data = {
            'coherence_length': coherence_length,
            'spectral_width_hz': spectral_width_hz,
            'spectral_width_m': spectral_width_m,
            'mean_wavelength': mean_wavelength,
            'mean_frequency': frequencies_mean,
        }
        return data
from invoke import task, Collection
import os
from config import load_config


_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
env_values = load_config()


def _benchmark_code_exists():
    dir_path = os.path.dirname(os.path.realpath(__file__))
    return os.path.exists(os.path.join(dir_path, "src", "tf_cnn_benchmarks.py"))


@task
def clone_benchmarks(c):
    """Clones the Tensorflow benchmarks from https://github.com/tensorflow/benchmarks.git into the src folder
    """
    if _benchmark_code_exists():
        return None
    c.run(
        "git clone -b cnn_tf_v1.12_compatible  https://github.com/tensorflow/benchmarks.git"
    )
    dir_path = os.path.dirname(os.path.realpath(__file__))
    c.run(
        f"cp -r benchmarks/scripts/tf_cnn_benchmarks/* {os.path.join(dir_path, 'src')}"
    )
    c.run("rm -r benchmarks")


@task(pre=[clone_benchmarks])
Exemplo n.º 44
0
def handler(req, fields = None, profiling = True):
    req.content_type = "text/html; charset=UTF-8"
    req.header_sent = False

    # Create an object that contains all data about the request and
    # helper functions for creating valid HTML. Parse URI and
    # store results in the request object for later usage.
    html = html_mod_python(req, fields)
    html.enable_debug = config.debug
    html.id = {} # create unique ID for this request
    __builtin__.html = html
    response_code = apache.OK

    try:

        # Ajax-Functions want no HTML output in case of an error but
        # just a plain server result code of 500
        fail_silently = html.has_var("_ajaxid")

        # Webservice functions may decide to get a normal result code
        # but a text with an error message in case of an error
        plain_error = html.has_var("_plain_error")

        config.load_config() # load multisite.mk
        if html.var("debug"): # Debug flag may be set via URL
            config.debug = True

        if html.var("screenshotmode") or config.screenshotmode: # Omit fancy background, make it white
            html.screenshotmode = True

        html.enable_debug = config.debug
        html.set_buffering(config.buffered_http_stream)

        # profiling can be enabled in multisite.mk
        if profiling and config.profile:
            import cProfile # , pstats, sys, StringIO, tempfile
            # the profiler looses the memory about all modules. We need to hand over
            # the request object in the apache module.
            # Ubuntu: install python-profiler when using this feature
            profilefile = defaults.var_dir + "/web/multisite.profile"
            retcode = cProfile.runctx(
                "import index; "
                "index.handler(profile_req, profile_fields, False)",
                {'profile_req': req, 'profile_fields': html.fields}, {}, profilefile)
            file(profilefile + ".py", "w").write(
                "#!/usr/bin/python\n"
                "import pstats\n"
                "stats = pstats.Stats(%r)\n"
                "stats.sort_stats('time').print_stats()\n" % profilefile)
            os.chmod(profilefile + ".py", 0755)
            release_all_locks()
            return apache.OK

        # Make sure all plugins are avaiable as early as possible. At least
        # we need the plugins (i.e. the permissions declared in these) at the
        # time before the first login for generating auth.php.
        load_all_plugins()

        # Detect mobile devices
        if html.has_var("mobile"):
            html.mobile = not not html.var("mobile")
        else:
            user_agent = html.req.headers_in.get('User-Agent', '')
            html.mobile = mobile.is_mobile(user_agent)

        # Redirect to mobile GUI if we are a mobile device and
        # the URL is /
        if html.myfile == "index" and html.mobile:
            html.myfile = "mobile"

        # Get page handler.
        handler = pagehandlers.get(html.myfile, page_not_found)

        # First initialization of the default permissions. Needs to be done before the auth_file
        # (auth.php) ist written (it's done during showing the login page for the first time).
        # Must be loaded before the "automation" call to have the general.* permissions available
        # during automation action processing (e.g. hooks triggered by restart)
        default_permissions.load()

        # Special handling for automation.py. Sorry, this must be hardcoded
        # here. Automation calls bybass the normal authentication stuff
        if html.myfile in [ "automation", "run_cron" ]:
            try:
                handler()
            except Exception, e:
                html.write(str(e))
                if config.debug:
                    html.write(html.attrencode(format_exception()))
            release_all_locks()
            return apache.OK

        # Prepare output format
        output_format = html.var("output_format", "html")
        html.set_output_format(output_format)

        # Is the user set by the webserver? otherwise use the cookie based auth
        if not html.user or type(html.user) != str:
            config.auth_type = 'cookie'
            # When not authed tell the browser to ask for the password
            html.user = login.check_auth()
            if html.user == '':
                if fail_silently:
                    # While api call don't show the login dialog
                    raise MKUnauthenticatedException(_('You are not authenticated.'))

                # Redirect to the login-dialog with the current url as original target
                # Never render the login form directly when accessing urls like "index.py"
                # or "dashboard.py". This results in strange problems.
                if html.myfile != 'login':
                    html.http_redirect(defaults.url_prefix + 'check_mk/login.py?_origtarget=%s' %
                                                html.urlencode(html.makeuri([])))

                # Initialize the i18n for the login dialog. This might be overridden
                # later after user login
                load_language(html.var("lang", config.get_language()))

                # This either displays the login page or validates the information submitted
                # to the login form. After successful login a http redirect to the originally
                # requested page is performed.
                login.page_login(plain_error)
                release_all_locks()
                return apache.OK

        # Call userdb page hooks which are executed on a regular base to e.g. syncronize
        # information withough explicit user triggered actions
        userdb.hook_page()

        # Set all permissions, read site config, and similar stuff
        config.login(html.user)
        html.load_help_visible()

        # Initialize the multiste i18n. This will be replaced by
        # language settings stored in the user profile after the user
        # has been initialized
        load_language(html.var("lang", config.get_language()))

        # All plugins might have to be reloaded due to a language change
        load_all_plugins()

        # Reload default permissions (maybe reload due to language change)
        default_permissions.load()

        # User allowed to login at all?
        if not config.may("general.use"):
            reason = _("You are not authorized to use Check_MK Multisite. Sorry. "
                       "You are logged in as <b>%s</b>.") % config.user_id
            if len(config.user_role_ids):
                reason += _("Your roles are <b>%s</b>. " % ", ".join(config.user_role_ids))
            else:
                reason += _("<b>You do not have any roles.</b> ")
            reason += _("If you think this is an error, "
                        "please ask your administrator to check the permissions configuration.")

            if config.auth_type == 'cookie':
                reason += _('<p>You have been logged out. Please reload the page to re-authenticate.</p>')
                login.del_auth_cookie()

            raise MKAuthException(reason)

        handler()
Exemplo n.º 45
0
# Loading data and evaluating network on data
####################################################

basefolder = os.path.join('..', 'pose-tensorflow', 'models')
folder = os.path.join('UnaugmentedDataSet_' + Task + date)

for shuffleIndex, shuffle in enumerate(Shuffles):
    for trainFractionIndex, trainFraction in enumerate(TrainingFraction):
        ################################################################################
        # Check which snapshots exist for given network (with training data split).
        ################################################################################

        experimentname = Task + date + '-trainset' + str(
            int(trainFraction * 100)) + 'shuffle' + str(shuffle)
        modelfolder = os.path.join(basefolder, experimentname)
        cfg = load_config(
            os.path.join(basefolder, experimentname, 'test', "pose_cfg.yaml"))
        # Check which snap shots are available and sort them by # iterations
        Snapshots = np.array([
            fn.split('.')[0] for fn in os.listdir(
                os.path.join(basefolder, experimentname, 'train'))
            if "index" in fn
        ])

        increasing_indices = np.argsort(
            [int(m.split('-')[1]) for m in Snapshots])
        Snapshots = Snapshots[increasing_indices]

        if snapshotindex == -1:
            snapindices = [-1]
        elif snapshotindex == "all":
            snapindices = range(len(Snapshots))
Exemplo n.º 46
0
from sqlalchemy.orm import sessionmaker
from protocol import ProtocolError, Connection, ConnectionError, Keepalive
from models import Node, NodeVisitation, Base, CrawlSummary, UserAgent
from config import load_config, DefaultFlaskConfig

try:
    from flask_user_config import SQLALCHEMY_DATABASE_URI
except ImportError:
    try:
        from flask_config import SQLALCHEMY_DATABASE_URI
    except ImportError:
        SQLALCHEMY_DATABASE_URI = DefaultFlaskConfig.SQLALCHEMY_DATABASE_URI

logging.basicConfig(level=logging.INFO)

CONF = load_config()

ASN = Reader("geoip/GeoLite2-ASN.mmdb")
COUNTRY = Reader("geoip/GeoLite2-Country.mmdb")
CITY = Reader("geoip/GeoLite2-City.mmdb")

RENAMED_COUNTRIES = {"South Korea": "Republic of Korea"}
USER_AGENTS = {}


def get_user_agent_id(user_agent, _session):
    user_agent = str(user_agent)
    if len(user_agent) > 60:
        user_agent = user_agent[:60]
    if user_agent not in USER_AGENTS:
        u = _session.query(UserAgent).filter(
Exemplo n.º 47
0
import os
import sys

sys.path.append(os.path.dirname(__file__) + "/../")

from scipy.misc import imread

from config import load_config
from nnet import predict
from util import visualize

from dataset.pose_dataset import data_to_input

cfg = load_config("demo/pose_cfg.yaml")

# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)

# Read image from file
file_name = "demo/image.png"
image = imread(file_name, mode='RGB')

image_batch = data_to_input(image)

# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref = predict.extract_cnn_output(outputs_np, cfg)

# Extract maximum scoring location from the heatmap, assume 1 person
pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
Exemplo n.º 48
0
from config import load_config, config
from djc_helper import DjcHelper
from log import color
from util import show_head_line

if __name__ == '__main__':
    # 读取配置信息
    load_config("config.toml", "config.toml.local")
    cfg = config()

    # 12.30 送卡片次数(re:好像送给别人没有上限?)
    indexes = [4]

    for idx in indexes:  # 从1开始,第i个
        account_config = cfg.account_configs[idx - 1]
        show_head_line("开始处理第{}个账户[{}]".format(idx, account_config.name),
                       color("fg_bold_yellow"))

        djcHelper = DjcHelper(account_config, cfg.common)
        djcHelper.check_skey_expired()
        djcHelper.get_bind_role_list()

        lr = djcHelper.fetch_pskey()

        # re: 先填QQ
        # undone: 然后填写卡片
        targetQQ = "XXXXXXXXXXX"
        cards_to_send = [
            ("XXXXXXXXXXX", 1),
            ("XXXXXXXXXXX", 1),
            ("XXXXXXXXXXX", 1),
Exemplo n.º 49
0
import discord
import config
from discord.ext import commands
from cogs import status

cfg = config.load_config()
bot = commands.Bot(command_prefix=cfg["command_prefix"])

COGS = [status.Status]


def add_cogs(bot):
    for cog in COGS:
        bot.add_cog(cog(bot, cfg))


def run():
    add_cogs(bot)

    if cfg["token"] == "":
        raise ValueError(
            "No token has been provided. Please ensure that config.toml contains the bot token."
        )
        sys.exit(1)
    bot.run(cfg["token"])
Exemplo n.º 50
0
    scmap, locref = predict.extract_cnn_output(outputs_np, cfg)
    pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
    if outall:
        return scmap, locref, pose
    else:
        return pose


####################################################
# Loading data, and defining model folder
####################################################

basefolder = '../pose-tensorflow/models/'  # for cfg file & ckpt!
modelfolder = (basefolder + Task + str(date) + '-trainset' +
               str(int(trainingsFraction * 100)) + 'shuffle' + str(shuffle))
cfg = load_config(modelfolder + '/test/' + "pose_cfg.yaml")

##################################################
# Load and setup CNN part detector
##################################################

# Check which snap shots are available and sort them by # iterations
Snapshots = np.array([
    fn.split('.')[0] for fn in os.listdir(modelfolder + '/train/')
    if "index" in fn
])
increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]

print(modelfolder)
print(Snapshots)
Exemplo n.º 51
0
    "--vtt",
    type=parse_bool,
    nargs="?",
    const=True,
    default=False,
    metavar="BOOL",
    help="create video progress thumbnails (default: False)",
)

args = parser.parse_args()
args = vars(args)

if args["config"]:
    args["config"] = Path(args["config"])
    if args["config"].exists() and args["config"].is_file():
        config = load_config(args["config"])
        config = parse_config(config)
        args.update(config)
    else:
        raise ValueError("Cannot find config file: " + str(args["config"]))

if not args["output"]:
    args["output"] = Path(str(args["input"]) + "-copy")
args["output"].mkdir(parents=True, exist_ok=True)

Write.text("Input: {}".format(args["input"]))
Write.text("Output: {}".format(args["output"]))
# TODO: add file count based on above generator options
# TODO: add estimated time based on above generator options * delta

Exemplo n.º 52
0
from model import RNNModel
import numpy as np
from config import load_config


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--path', required=True)
    parser.add_argument('-s', '--save_path', default='./')
    parser.add_argument('-l', '--load_path', default=None)
    args = parser.parse_args()
    return args


args = parse_args()
cf = load_config(args.path)
dataset = Corpus()
dataset.process_data()

cf.ntokens_source = len(dataset.source_dict)
cf.ntokens_target = len(dataset.target_dict)

if not os.path.exists(args.save_path):
    os.makedirs(args.save_path)

criterion = nn.CrossEntropyLoss(
    ignore_index=dataset.target_dict.word2idx['<pad>'])

model = RNNModel(cf).cuda()
optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-4)
Exemplo n.º 53
0
    ds.append(['XOR', (X, Y)])
    """
    from sklearn.datasets import fetch_openml
    https://scikit-learn.org/stable/datasets/index.html#openml
    """

    return ds


# %%

if __name__ == "__main__":
    #X,Y=load_example_dataset(data_set_type='binary') # old test

    import config
    conf = config.load_config()
    ds_abs_path = conf['Paths']['Abs path to Dataset']
    ds_type = conf['Global variables']['DS_type']
    cd_abs_path = conf['Paths']['Abs path to CD']
    cd_type = conf['Global variables']['CD_type']

    #X, y = data.load_X_y(ds_abs_path, ds_type, cd_abs_path, cd_type)
    print(load_CD(cd_abs_path, cd_type))

    print(load_dataset_numpy(ds_abs_path, ds_type))
    print(type(load_dataset_numpy(ds_abs_path, ds_type)))

# %%
"""
sparce swmlight datasets
#        from sklearn.datasets import load_svmlight_files
Exemplo n.º 54
0
from dataset.pose_dataset import data_to_input

from multiperson.detections import extract_detections
from multiperson.predict import SpatialModel, eval_graph, get_person_conf_multicut
from multiperson.visualize import PersonDraw, visualize_detections

import matplotlib.pyplot as plt

from PIL import Image, ImageDraw, ImageFont
font = ImageFont.truetype("./font/NotoSans-Bold.ttf", 12)

import random

####################

cfg = load_config("demo/pose_cfg_multi.yaml")

dataset = create_dataset(cfg)

sm = SpatialModel(cfg)
sm.load()

draw_multi = PersonDraw()

# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)

##########
## Get the source of video

parser = ap.ArgumentParser()
Exemplo n.º 55
0
        help='Verbose output. Changes log level from INFO to DEBUG.')
    parser.add_argument(
        '--config',
        help='Specify a configuration file (defaults to ./config.yml)')
    parser.add_argument('-l',
                        '--logfile',
                        help="Log file to append logs to.",
                        default=None)
    args = parser.parse_args()

    logging.basicConfig(level=logging.DEBUG if args.v else logging.INFO,
                        filename=args.logfile,
                        format="%(asctime)-15s: %(message)s")
    enable_color_logging(debug_lvl=logging.DEBUG if args.v else logging.INFO)
    logger.info(intro())
    CONFIG = load_config(args.config or "./config.yml")
    li = lichess.Lichess(CONFIG["token"], CONFIG["url"], __version__)

    user_profile = li.get_profile()
    username = user_profile["username"]
    is_bot = user_profile.get("title") == "BOT"
    logger.info("Welcome {}!".format(username))

    if args.u and not is_bot:
        is_bot = upgrade_account(li)

    if is_bot:
        engine_factory = partial(engine_wrapper.create_engine, CONFIG)
        start(li, user_profile, engine_factory, CONFIG)
    else:
        logger.error(
Exemplo n.º 56
0
        checkpoints = torch.load(os.path.join('checkpoints', args.checkpoints))
        model.load_state_dict(checkpoints['model_state_dict'])
        optimizer.load_state_dict(checkpoints['optimizer_state_dict'])
        start_epoch = checkpoints['global_epoch']
    else:
        start_epoch = 1

    if args.cuda:
        model = model.cuda()

    if not args.evaluation:
        criterion = nn.CrossEntropyLoss()
        lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=0.0001)

        global_acc = 0.
        for epoch in range(start_epoch, args.epochs + 1):
            _train(epoch, train_loader, model, optimizer, criterion, args)
            best_acc = _eval(epoch, test_loader, model, args)
            if global_acc < best_acc:
                global_acc = best_acc
                save_checkpoint(best_acc, model, optimizer, args, epoch)

            lr_scheduler.step()
            print('Current Learning Rate: {}'.format(lr_scheduler.get_last_lr()))
    else:
        _eval(start_epoch, test_loader, model, args)


if __name__ == '__main__':
    args = load_config()
    main(args)
Exemplo n.º 57
0
    def run(self):
        # read configuration files
        config_root = self.ENV.get("CONFIG_ROOT", "../config")
        config_schemas = config.load_config(config_root + "/schemas.yml",
                                            env=self.ENV)
        config_services = config.load_config(config_root + "/services.yml",
                                             env=self.ENV)

        # print("schemas.yml -->", json.dumps(config_schemas))
        # print("services.yml -->", json.dumps(config_services))

        agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
        if not agent_admin_url:
            raise RuntimeError(
                "Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
            )
        app_config["AGENT_ADMIN_URL"] = agent_admin_url

        # get public DID from our agent
        response = requests.get(agent_admin_url + "/wallet/did/public",
                                headers=ADMIN_REQUEST_HEADERS)
        result = response.json()
        did = result["result"]
        print("Fetched DID from agent: ", did)
        app_config["DID"] = did["did"]

        # determine pre-registered schemas and cred defs
        existing_schemas = agent_schemas_cred_defs(agent_admin_url)
        print("Existing schemas:", json.dumps(existing_schemas))

        # register schemas and credential definitions
        for schema in config_schemas:
            schema_name = schema["name"]
            schema_version = schema["version"]
            schema_key = schema_name + "::" + schema_version
            if schema_key not in existing_schemas:
                schema_attrs = []
                schema_descs = {}
                if isinstance(schema["attributes"], dict):
                    # each element is a dict
                    for attr, desc in schema["attributes"].items():
                        schema_attrs.append(attr)
                        schema_descs[attr] = desc
                else:
                    # assume it's an array
                    for attr in schema["attributes"]:
                        schema_attrs.append(attr)

                # register our schema(s) and credential definition(s)
                schema_request = {
                    "schema_name": schema_name,
                    "schema_version": schema_version,
                    "attributes": schema_attrs,
                }
                response = agent_post_with_retry(
                    agent_admin_url + "/schemas",
                    json.dumps(schema_request),
                    headers=ADMIN_REQUEST_HEADERS,
                )
                response.raise_for_status()
                schema_id = response.json()
            else:
                schema_id = {
                    "schema_id": existing_schemas[schema_key]["schema"]["id"]
                }
            app_config["schemas"]["SCHEMA_" + schema_name] = schema
            app_config["schemas"]["SCHEMA_" + schema_name + "_" +
                                  schema_version] = schema_id["schema_id"]
            print("Registered schema: ", schema_id)

            if (schema_key not in existing_schemas
                    or "cred_def" not in existing_schemas[schema_key]):
                cred_def_request = {"schema_id": schema_id["schema_id"]}
                response = agent_post_with_retry(
                    agent_admin_url + "/credential-definitions",
                    json.dumps(cred_def_request),
                    headers=ADMIN_REQUEST_HEADERS,
                )
                response.raise_for_status()
                credential_definition_id = response.json()
            else:
                credential_definition_id = {
                    "credential_definition_id":
                    existing_schemas[schema_key]["cred_def"]["id"]
                }
            app_config["schemas"]["CRED_DEF_" + schema_name + "_" +
                                  schema_version] = credential_definition_id[
                                      "credential_definition_id"]
            print("Registered credential definition: ",
                  credential_definition_id)

        # what is the TOB connection name?
        tob_connection_params = config_services["verifiers"]["bctob"]

        # check if we have a TOB connection
        response = requests.get(
            agent_admin_url + "/connections?alias=" +
            tob_connection_params["alias"],
            headers=ADMIN_REQUEST_HEADERS,
        )
        response.raise_for_status()
        connections = response.json()["results"]
        tob_connection = None
        for connection in connections:
            # check for TOB connection
            if connection["alias"] == tob_connection_params["alias"]:
                tob_connection = connection

        if not tob_connection:
            # if no tob connection then establish one
            tob_agent_admin_url = tob_connection_params["connection"][
                "agent_admin_url"]
            if not tob_agent_admin_url:
                raise RuntimeError(
                    "Error TOB_AGENT_ADMIN_URL is not specified, can't establish a TOB connection."
                )

            response = requests.post(
                tob_agent_admin_url + "/connections/create-invitation",
                headers=TOB_REQUEST_HEADERS,
            )
            response.raise_for_status()
            invitation = response.json()

            response = requests.post(
                agent_admin_url + "/connections/receive-invitation?alias=" +
                tob_connection_params["alias"],
                json.dumps(invitation["invitation"]),
                headers=ADMIN_REQUEST_HEADERS,
            )
            response.raise_for_status()
            tob_connection = response.json()

            print("Established tob connection: ", tob_connection)
            time.sleep(5)

        app_config["TOB_CONNECTION"] = tob_connection["connection_id"]
        synced[tob_connection["connection_id"]] = False

        for issuer_name, issuer_info in config_services["issuers"].items():
            # register ourselves (issuer, schema(s), cred def(s)) with TOB
            issuer_config = {
                "name": issuer_name,
                "did": app_config["DID"],
                "config_root": config_root,
            }
            issuer_config.update(issuer_info)
            issuer_spec = config.assemble_issuer_spec(issuer_config)

            credential_types = []
            for credential_type in issuer_info["credential_types"]:
                schema_name = credential_type["schema"]
                schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
                ctype_config = {
                    "schema_name":
                    schema_name,
                    "schema_version":
                    schema_info["version"],
                    "issuer_url":
                    issuer_config["url"],
                    "config_root":
                    config_root,
                    "credential_def_id":
                    app_config["schemas"]["CRED_DEF_" + schema_name + "_" +
                                          schema_info["version"]],
                }
                credential_type['attributes'] = schema_info["attributes"]
                ctype_config.update(credential_type)
                ctype = config.assemble_credential_type_spec(
                    ctype_config, schema_info.get("attributes"))
                if ctype is not None:
                    credential_types.append(ctype)

            issuer_request = {
                "connection_id": app_config["TOB_CONNECTION"],
                "issuer_registration": {
                    "credential_types": credential_types,
                    "issuer": issuer_spec,
                },
            }

            print(json.dumps(issuer_request))
            response = requests.post(
                agent_admin_url + "/issuer_registration/send",
                json.dumps(issuer_request),
                headers=ADMIN_REQUEST_HEADERS,
            )
            response.raise_for_status()
            response.json()
            print("Registered issuer: ", issuer_name)

        synced[tob_connection["connection_id"]] = True
        print("Connection {} is synchronized".format(tob_connection))
Exemplo n.º 58
0
            optimizer.step()

            if step % print_every == 0:
                tqdm.write(
                    f"Epoch: {epoch+1}/{epochs}, Loss: {loss.item():.4f}")
                # save model
                new_path = os.path.join(
                    save_path,
                    f"best_model_epoch_{epoch}_acc_{loss.item():.4f}.pt")

                for filename in glob.glob(os.path.join(save_path, "*.pt")):
                    os.remove(filename)
                torch.save(model.state_dict(), new_path)
                summarywriter.close()

            # print("Epoch: {}/{}".format(e+1, epochs))
            # print("Loss: ", loss.item()) # avg batch loss at this point in training
            # valid_examples, valid_similarities = dataset.cosine_similarity(model.in_embed, device=device)
            # _, closest_idxs = valid_similarities.topk(6)

            # valid_examples, closest_idxs = valid_examples.to('cpu'), closest_idxs.to('cpu')
            # for ii, valid_idx in enumerate(valid_examples):
            #     closest_words = [int_to_vocab[idx.item()] for idx in closest_idxs[ii]][1:]
            #     print(int_to_vocab[valid_idx.item()] + " | " + ', '.join(closest_words))
            # print("...\n")


if __name__ == "__main__":
    hparams = load_config()
    main(hparams)
Exemplo n.º 59
0
# -*- coding: UTF-8 -*-
import sys
import os
from config import load_config

DEFAULT_CONF_DIR = load_config().DEFAULT_CCNET_CONF_DIR
SEAFILE_CONF_DIR = load_config().SEAFILE_CONF_DIR
DEFAULT_DIR = load_config().DEFAULT_DIR
SEASERVER_PY_PACKAGE = load_config().SEASERVER_PY_PACKAGE
os.environ['MCNET_CONF_DIR'] = DEFAULT_CONF_DIR
os.environ['MORPHFILE_CONF_DIR'] = SEAFILE_CONF_DIR
sys.path.append(SEASERVER_PY_PACKAGE)
reload(sys)

from morphserv import morphfile_api


def get_quota_usage(username):
    # https://github.com/haiwen/seahub/blob/master/seahub/api2/views.py#L310
    used = morphfile_api.get_user_self_usage(
        username) + morphfile_api.get_user_share_usage(username)
    total = morphfile_api.get_user_quota(username)
    if used > 0:
        used = float(used)
    else:
        used = 0
    if total > 0:
        total = float(total)
    else:
        total = float(2 * (1 << 30))
    return used, total
Exemplo n.º 60
0
def main():
    # show_ask_message_box_only_once()

    if is_daily_first_run():
        logger.info("今日首次运行,尝试上报使用统计~")
        # 在每日首次使用的时候,上报一下(因为api限额只有3w次,尽可能减少调用)
        # 整体使用次数
        increase_counter(this_version_global_usage_counter_name)
        increase_counter(global_usage_counter_name)

        # 当前用户使用次数
        increase_counter(this_version_my_usage_counter_name)
        increase_counter(my_usage_counter_name)
    else:
        logger.info("今日已运行过,不再尝试上报使用统计")

    # 最大化窗口
    logger.info("尝试最大化窗口,打包exe可能会运行的比较慢")
    maximize_console()

    logger.warning("开始运行DNF蚊子腿小助手,ver={} {},powered by {}".format(
        now_version, ver_time, author))
    logger.warning(
        color("fg_bold_cyan") +
        "如果觉得我的小工具对你有所帮助,想要支持一下我的话,可以帮忙宣传一下或打开支持一下.png,扫码打赏哦~")

    check_djc_role_binding()

    # 读取配置信息
    load_config("config.toml", "config.toml.local")
    cfg = config()

    if len(cfg.account_configs) == 0:
        logger.error("未找到有效的账号配置,请检查是否正确配置。ps:多账号版本配置与旧版本不匹配,请重新配置")
        exit(-1)

    check_all_skey_and_pskey(cfg)

    show_accounts_status(cfg, "启动时展示账号概览")

    # 预先尝试创建和加入固定队伍,从而每周第一次操作的心悦任务也能加到队伍积分中
    try_join_xinyue_team(cfg)

    # 正式进行流程
    run(cfg)

    # 尝试领取心悦组队奖励
    try_take_xinyue_team_award(cfg)

    # 尝试派赛利亚出去打工
    try_xinyue_sailiyam_start_work(cfg)

    # show_lottery_status("运行完毕展示各账号抽卡卡片以及各礼包剩余可领取信息", cfg, need_show_tips=True)
    # auto_send_cards(cfg)
    # show_lottery_status("卡片赠送完毕后展示各账号抽卡卡片以及各礼包剩余可领取信息", cfg)

    show_accounts_status(cfg, "运行完毕展示账号概览")

    # 每次正式模式运行成功时弹出打赏图片
    show_support_pic(cfg)

    # 临时代码
    temp_code(cfg)

    # 显示小助手的使用概览
    if cfg.common._show_usage:
        show_usage()

    # 全部账号操作完成后,检查更新
    check_update(cfg)