Esempio n. 1
0
def test_load_name_order():
    env = {
        'HOME': '/home/user',
        'LOCALAPPDATA': 'C:/Users/user/AppData/Local'
    }

    with patch('confidence.io.path') as mocked_path, patch('confidence.io.environ', env):
        # hard-code user-expansion, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.join.side_effect = path.join
        # avoid actually opening files that might unexpectedly exist
        mocked_path.exists.return_value = False

        assert len(load_name('foo', 'bar')) == 0

    mocked_path.exists.assert_has_calls([
        call('/etc/xdg/foo.yaml'),
        call('/etc/xdg/bar.yaml'),
        call('/etc/foo.yaml'),
        call('/etc/bar.yaml'),
        call('/Library/Preferences/foo.yaml'),
        call('/Library/Preferences/bar.yaml'),
        call('/home/user/.config/foo.yaml'),
        call('/home/user/.config/bar.yaml'),
        call('/home/user/Library/Preferences/foo.yaml'),
        call('/home/user/Library/Preferences/bar.yaml'),
        call('C:/Users/user/AppData/Local/foo.yaml'),
        call('C:/Users/user/AppData/Local/bar.yaml'),
        call('/home/user/.foo.yaml'),
        call('/home/user/.bar.yaml'),
        call('./foo.yaml'),
        call('./bar.yaml'),
    ], any_order=False)
Esempio n. 2
0
def test_load_name_multiple():
    test_path = path.join(test_files, '{name}.{extension}')

    # bar has precedence over foo
    subject = load_name('foo', 'fake', 'bar', load_order=(test_path,))

    assert len(subject.semi.overlapping) == 2
    assert subject.semi.overlapping.foo is True
    assert subject.semi.overlapping.bar is False
    assert subject.overlapping.fully == 'bar'

    # foo has precedence over bar
    subject = load_name('fake', 'bar', 'foo', load_order=(test_path,))

    assert len(subject.semi.overlapping) == 2
    assert subject.semi.overlapping.foo is True
    assert subject.semi.overlapping.bar is False
    assert subject.overlapping.fully == 'foo'
Esempio n. 3
0
def test_load_name_envvar_file():
    env = {
        'FOO_CONFIG_FILE': path.join(test_files, 'foo.yaml'),
        'BAR_CONFIG_FILE': path.join(test_files, 'bar.yaml'),
    }

    with patch('confidence.io.environ', env):
        subject = load_name('foo', 'bar', load_order=(read_envvar_file,))

    assert len(subject.semi.overlapping) == 2
    assert subject.semi.overlapping.foo is True
    assert subject.semi.overlapping.bar is False
    assert subject.overlapping.fully == 'bar'
Esempio n. 4
0
def test_load_name_xdg_config_dirs_fallback():
    with patch('confidence.io.path') as mocked_path, patch('confidence.io.loadf') as mocked_loadf, patch('confidence.io.environ', {}):
        # hard-code path separator, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.pathsep = ':'
        mocked_path.join.side_effect = path.join
        mocked_path.exists.return_value = True

        assert len(load_name('foo', 'bar', load_order=(read_xdg_config_dirs,))) == 0

    mocked_loadf.assert_has_calls([
        call('/etc/xdg/foo.yaml', default=NotConfigured),
        call('/etc/xdg/bar.yaml', default=NotConfigured),
    ], any_order=False)
Esempio n. 5
0
def main():
    client = BlackVueClient(config=confidence.Configuration(
        confidence.Configuration({'endpoint': '10.99.77.1'}),
        confidence.load_name('blackvue'),
        confidence.Configuration(docopt(__doc__))
    ))
    options = docopt(__doc__)
    if options['--download-all-recordings']:
        client.download_all_recordings(out=options['<foldername>'])
    if options['--download-missing-recordings']:
        if options['--continuously']:
            print("Continous syncing will be included in the next version")
        else:
            print("Incremental downloads will be included in the next version")
Esempio n. 6
0
def test_load_name_envvars():
    env = {
        'FOO_KEY': 'foo',
        'FOO_NS_KEY': 'value',
        'BAR_KEY': 'bar',
        'BAR_N__S_KEY': 'space',
    }

    with patch('confidence.io.environ', env):
        subject = load_name('foo', 'bar', load_order=(read_envvars,))

    assert subject.key == 'bar'
    assert subject.ns.key == 'value'
    assert subject.n_s.key == 'space'
Esempio n. 7
0
def test_load_name_xdg_config_home_fallback():
    env = {
        'HOME': '/home/user'
    }

    with patch('confidence.io.path') as mocked_path, patch('confidence.io.loadf') as mocked_loadf, patch('confidence.io.environ', env):
        # hard-code user-expansion, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.join.side_effect = path.join
        mocked_path.exists.return_value = True
        mocked_loadf.return_value = NotConfigured

        assert len(load_name('foo', 'bar', load_order=(read_xdg_config_home,))) == 0

    mocked_loadf.assert_has_calls([
        call('/home/user/.config/foo.yaml', default=NotConfigured),
        call('/home/user/.config/bar.yaml', default=NotConfigured),
    ], any_order=False)
Esempio n. 8
0
def test_load_name_xdg_config_home():
    env = {
        'XDG_CONFIG_HOME': '/home/user/.not-config',
        'HOME': '/home/user'
    }

    with patch('confidence.io.path') as mocked_path, patch('confidence.io.environ', env):
        # hard-code user-expansion, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.join.side_effect = path.join
        # avoid actually opening files that might unexpectedly exist
        mocked_path.exists.return_value = False

        assert len(load_name('foo', 'bar', load_order=(read_xdg_config_home,))) == 0

    mocked_path.exists.assert_has_calls([
        call('/home/user/.not-config/foo.yaml'),
        call('/home/user/.not-config/bar.yaml'),
    ], any_order=False)
def run(event, context):
    config = confidence.load_name('polarflowtorunkeeper')
    current_time = datetime.datetime.now().time()
    name = context.function_name
    database = pymongo.MongoClient(config.mongodb)
    synced_runs = database['polar-flow-to-runkeeper']['synced-runs'].find_one(
    ) or {
        'synced': []
    }
    synced_runs = synced_runs['synced']
    logging.info(json.dumps(synced_runs))
    logger.info("Function " + name + " runs at " + str(current_time))
    flow = PolarFlowClient()
    flow.login(config.polarflow.username, config.polarflow.password)
    runkeeper = RunkeeperClient()
    runkeeper.login(config.runkeeper.username, config.runkeeper.password)
    year = datetime.datetime.now().year
    activities = flow.get('https://flow.polar.com/training/getCalendarEvents',
                          params={
                              'start': f'01.01.{year}',
                              'end': f'31.12.{year}'
                          }).json()
    logging.info(f'{len(activities)} retrieved from Polar Flow')
    activities = list(
        filter(lambda x: x['listItemId'] not in synced_runs, activities))
    logging.info(f'{len(activities)} not yet in Runkeeper')
    for activity in activities:
        tcx_export = flow.get(
            'https://flow.polar.com/api/export/training/tcx/' +
            str(activity['listItemId']))
        response = runkeeper.post(
            'https://runkeeper.com/trackMultipleFileUpload',
            data={'handleUpload': 'handleUpload'},
            files={
                'trackFiles':
                ('import.tcx', tcx_export.text, 'application/octet-stream')
            })
        logger.info(f'{str(activity["listItemId"])} returned {response.text}')
        synced_runs.append(activity['listItemId'])
        database['polar-flow-to-runkeeper']['synced-runs'].delete_one({})
        database['polar-flow-to-runkeeper']['synced-runs'].insert_one(
            {'synced': synced_runs})
Esempio n. 10
0
def test_load_name_overlapping_envvars():
    env = {
        'FOO_KEY': 'foo',
        'FOO_NS_KEY': 'value',
        'BAR_KEY': 'bar',
        'FOO_CONFIG_FILE': path.join(test_files, 'foo.yaml'),
        'BAR_CONFIG_FILE': path.join(test_files, 'bar.yaml'),
    }

    with patch('confidence.io.environ', env):
        subject = load_name('foo', 'bar', load_order=loaders(Locality.environment))

    assert subject.key == 'bar'
    assert subject.ns.key == 'value'
    assert subject.foo.config.file is NotConfigured
    assert subject.bar.config.file is NotConfigured
    assert subject.config.file is NotConfigured
    assert len(subject.semi.overlapping) == 2
    assert subject.semi.overlapping.foo is True
    assert subject.semi.overlapping.bar is False
    assert subject.overlapping.fully == 'bar'
Esempio n. 11
0
def test_load_name_xdg_config_dirs():
    env = {
        'XDG_CONFIG_DIRS': '/etc/xdg-desktop/:/etc/not-xdg',
    }

    with patch('confidence.io.path') as mocked_path, patch('confidence.io.environ', env):
        # hard-code path separator, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.pathsep = ':'
        mocked_path.join.side_effect = path.join
        # avoid actually opening files that might unexpectedly exist
        mocked_path.exists.return_value = False

        assert len(load_name('foo', 'bar', load_order=(read_xdg_config_dirs,))) == 0

    mocked_path.exists.assert_has_calls([
        # this might not be ideal (/etc/not-xdg should maybe show up twice first), but also not realistic…
        call('/etc/not-xdg/foo.yaml'),
        call('/etc/xdg-desktop/foo.yaml'),
        call('/etc/not-xdg/bar.yaml'),
        call('/etc/xdg-desktop/bar.yaml'),
    ], any_order=False)
Esempio n. 12
0
def test_load_name_envvar_dir():
    env = {
        'PROGRAMDATA': 'C:/ProgramData',
        'APPDATA': 'D:/Users/user/AppData/Roaming'
    }

    # only the envvar dir loaders are partials in DEFAULT_LOAD_ORDER
    load_order = [loader for loader in DEFAULT_LOAD_ORDER if isinstance(loader, partial)]

    with patch('confidence.io.path') as mocked_path, patch('confidence.io.loadf') as mocked_loadf, patch('confidence.io.environ', env):
        # hard-code user-expansion, unmock join
        mocked_path.expanduser.side_effect = _patched_expanduser
        mocked_path.join.side_effect = path.join
        mocked_path.exists.return_value = True
        mocked_loadf.return_value = NotConfigured

        assert len(load_name('foo', 'bar', load_order=load_order)) == 0

    mocked_loadf.assert_has_calls([
        call('C:/ProgramData/foo.yaml', default=NotConfigured),
        call('C:/ProgramData/bar.yaml', default=NotConfigured),
        call('D:/Users/user/AppData/Roaming/foo.yaml', default=NotConfigured),
        call('D:/Users/user/AppData/Roaming/bar.yaml', default=NotConfigured),
    ], any_order=False)
Esempio n. 13
0
        if len(arguments) == 0:
            return self.show_aliases()

        if len(arguments) == 2:
            return self.create_alias(*arguments)

    def run(self, arguments):
        actions = {
            'log': self.run_log,
            'show': self.run_show,
            'alias': self.run_alias,
        }

        # determine default action: no arguments at all = show, any arguments = log or explicit action
        if arguments:
            action = 'log'
            if arguments[0] in actions:
                action = arguments.pop(0)
        else:
            action = 'show'

        with self.database:
            ensure_db(self.database)

            actions[action](arguments)


if __name__ == '__main__':
    Session(confidence.load_name('hours')).run(sys.argv[1:])
Esempio n. 14
0
    with app.pgcon.cursor() as cur:
        cur.execute('SELECT SQRT(%s)', (num,))
        output = cur.fetchone()[0]

    print(f'the square root of {num} is {output}')


def get_operations(app):
    return [
        Operation(
            "create_database",
            app.setup_database,
            run_by_default=False,
        ),
        Operation(
            "clean",
            app.clear_results,
        ),
        Operation(
            "calculate_square_root",
            partial(calculate_square_root, app, 16),
        ),
    ]


if __name__ == '__main__':
    cfg = confidence.load_name('project', 'local')

    app = PostgresApp('my_project', get_operations, cfg.database.credentials, cfg.database.schema, cfg.resultdir)
    app.run()
Esempio n. 15
0
                experiment.get_scores_from_file(
                    'results_cal_pairs.txt',
                    ((pair.first.path, pair.second.path)
                     for pair in calibration_pairs)))
        else:
            p = lr_systems[category].scorer.predict_proba(calibration_pairs)
        assert len(p[0]) == 2
        # Remove invalid scores (-1) where no face was found on one of the images in the pair
        p_valid, calibration_pairs_valid = get_valid_scores(
            p[:, 1], calibration_pairs)
        y_cal = [int(pair.same_identity) for pair in calibration_pairs_valid]
        if 0 < np.sum(y_cal) < len(calibration_pairs_valid):
            lr_systems[category].calibrator.fit(X=p_valid, y=y_cal)
            cal_fraction_valid[category] = len(calibration_pairs_valid) / len(
                calibration_pairs)
        else:
            del lr_systems[category]

    return evaluate(experiment=experiment,
                    lr_systems=lr_systems,
                    test_pairs_per_category=test_pairs_per_category,
                    make_plots_and_save_as=make_plots_and_save_as,
                    cal_fraction_valid=cal_fraction_valid)


if __name__ == '__main__':
    config = confidence.load_name('lr_face')
    parser = parser_setup()
    args = parser.parse_args()
    run(**vars(args))
Esempio n. 16
0
def test_load_name_single():
    test_path = path.join(test_files, '{name}.{extension}')

    _assert_values(load_name('config', load_order=(test_path,)))
    _assert_values(load_name('config', load_order=(test_path,), extension='json'))
    # log model
    # logger.info("==Model==")
    # model.summary(print_fn=lambda x: logger.info(x))
    # create callbacks
    # callbacks = create_callbacks(int(arguments['--batch']), validation_gen, logdir)

    # fit model
    model.fit(x, y, batch_size)
    # model.fit_generator(train_gen, epochs=int(arguments["--epochs"]), validation_data=validation_gen,
    #                     callbacks=callbacks, verbose=1, shuffle=False)
    model.predict()
    # store final model.
    model.save(join(LOGDIR, 'model.hdf5'))


if __name__ == '__main__':
    # Parse command line arguments
    arguments = docopt(__doc__, version='rna 0.2')
    # Add to logging
    logger.info('==Command line arguments==')
    logger.info(yaml.dump(arguments, default_flow_style=False))

    # Read config
    config = load_name('rna')
    # Add to logging
    logger.info('==Configuration==')
    logger.info(yaml.dump(config._source, default_flow_style=False))

    # run main function
    main(arguments, config, LOGDIR)
Esempio n. 18
0
    exp.parameter('repeats', 10)

    try:
        exp.runDefaults()
        # exp.runSearch('max_n_of_pairs_per_class')
        # exp.runFullGrid(['n_frequent_words', 'max_n_of_pairs_per_class'])

    except Exception as e:
        LOG.fatal(e.args[1])
        LOG.fatal(e.args[0])
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_tb(exc_traceback, file=sys.stdout)


if __name__ == '__main__':
    config = confidence.load_name('authorship', 'local')
    warnings.filterwarnings("error")
    np.random.seed(0)

    parser = argparse.ArgumentParser()
    parser.add_argument('-v', help='increases verbosity', action='count', default=0)
    parser.add_argument('-q', help='decreases verbosity', action='count', default=0)
    parser.add_argument('--data', metavar='FILENAME',
                        help=f'dataset to be used; index file as generated by `sha256sum` (default: {config.data})',
                        default=config.data)
    parser.add_argument('--vocalise-data', metavar='FILENAME',
                        help=f'vocalize output to be used; (default: {config.vocalise_data})',
                        default=config.vocalise_data)
    parser.add_argument('--output-directory', '-o', metavar='DIRNAME',
                        help=f'path to generated output files (default: {config.resultdir})', default=config.resultdir)
    args = parser.parse_args()
Esempio n. 19
0
        games = await games.json()
        return {
            game['appid']: {
                'name': game.get('name'),
                'logo': game.get('img_logo_url')
            }
            for game in games['response']['games']
        }

    async def games(self, request, steam_id):
        return json(await self.get_games(steam_id))

    async def get_intersecting_games(self, steam_id, *friends):
        games = await self.get_games(steam_id)
        appids = set(games.keys())
        for friend in friends:
            appids &= set((await self.get_games(friend)).keys())

        return {
            appid: info
            for appid, info in games.items() if appid in appids
        }

    async def intersect_games(self, request, steam_id1, steam_id2):
        return json(await self.get_intersecting_games(steam_id1, steam_id2))


if __name__ == '__main__':
    config = confidence.load_name('stoom')
    Stoom(config).run()