Esempio n. 1
0
def main():
    args = parse_cli()

    with open(args.configure, 'r') as f:
        config = yaml.load(f)

    for clf_name, db_collection in config['experiment_name'].items():
        clf = user.clfs[clf_name]
        w = Worker(config['project_name'], db_collection,
                   objective_func, host=config['MongoDB']['host'], port=config['MongoDB']['port'],
                   loop_forever=args.loop)
        w.start_worker(clf=clf, X=user.X, y=user.y)
Esempio n. 2
0
def main():
    args = parse_cli()
    config = yread(args.conf)

    w = Worker(config['project_name'],
               config['experiment_name'],
               objective_random_sleep,
               host=config['MongoDB']['host'],
               port=config['MongoDB']['port'],
               loop_forever=args.loop)

    # do the tasks
    w.start_worker()
Esempio n. 3
0
def main():
    args = parse_cli()

    with open(args.conf, 'r') as f:
        config = yaml.load(f)

    for clf_name, db_collection in config['experiment_name'].items():
        clf = user.clfs[clf_name]
        w = Worker(config['project_name'],
                   db_collection,
                   user.objective_func,
                   host=config['MongoDB']['host'],
                   port=config['MongoDB']['port'],
                   loop_forever=False)
        w.start_worker(clf=clf, X=user.X, y=user.y)
Esempio n. 4
0
def main():
    args = parse_cli()

    with open(args.conf, 'r') as f:
        config = yaml.load(f)

    for clf_name, db_collection in config['experiment_name'].items():
        clf = user.clfs[clf_name]

	# call worker with loop_forever = False, for multi-experiment mode.
	# In this mode this worker will exit after all jobs in the current
        # experiment have been completed.
        w = Worker(config['project_name'], db_collection,
                   user.objective, host=config['MongoDB']['host'],
                   port=config['MongoDB']['port'],
                   loop_forever=False)
        w.start_worker(clf=clf, X=user.X, y=user.y)
Esempio n. 5
0
def main():
    args = parse_cli()

    with open(args.conf, 'r') as f:
        config = yaml.load(f)

    for clf_name, db_collection in config['experiment_name'].items():
        clf = user.clfs[clf_name]

	# call worker with loop_forever = True, for single-experiment mode.
	# In this mode this worker will not exit, and will continually check
	# for new jobs in the current experiment (db_collection).
	# Single-experiment mode is therefore not (yet) appropriate for 
	# running jobs from multiple different experiments.
        w = Worker(config['project_name'], db_collection,
                   user.objective, host=config['MongoDB']['host'],
                   port=config['MongoDB']['port'],
                   loop_forever=True)
        w.start_worker(clf=clf, X=user.X, y=user.y)
Esempio n. 6
0
def main():
    args = parse_cli()

    with open(args.configure, "r") as f:
        config = yaml.load(f)

    for clf_name, db_collection in config["experiment_name"].items():
        # clf = user.clfs[clf_name]
        # w = Worker(config['project_name'], db_collection,
        #            objective_func, host=config['MongoDB']['host'], port=config['MongoDB']['port'],
        #            loop_forever=args.loop)
        w = Worker(
            config["project_name"],
            db_collection,
            objective_func,
            host=config["MongoDB"]["host"],
            port=config["MongoDB"]["port"],
            loop_forever=False,
        )

        w.start_worker()
Esempio n. 7
0
    logger.info('Running in multi-experiment mode (reads multiple experiment names from *.experiment files)...')
    loop = False


# Loop over experiment files (generated by coordinator.py)
# read each one: extract the project and experiment names,
# then launch a Worker instance on the experiment.
for expt_file in expt_files:

    logger.info('reading {} experiment definition file'.format(expt_file))
    dat = load_experiment_file(expt_file)

    project_name = dat['project']
    experiment_name = dat['experiment']

    # we stored the name of the classifier in the experiment name
    # delimited by a colon: get that name and use it to decide which
    # clf object to pass to Worker.
    clf_name = experiment_name.split(':')[0]
    clf = user.clfs[clf_name]

    logger.info("starting worker on project {}, experiment {}".format(project_name, experiment_name))
    
    # Note: we use loop_forever=False here since we want this worker to compute on the number
    # of (currently) queued jobs in the experiment and then exit, to allow a new Worker object
    # to do the next experiment (if there is one).
    w = Worker(project_name, experiment_name, clf, user.X, user.y, user.objective, 
                   host=args.host, port=args.port, loop_forever=loop)
    w.start_worker()