コード例 #1
0
def run_testcase(testcase, args):
    testcase = path(testcase).name
    config = ConfigParser()
    config.read(['default.cfg', 'main.cfg', args.config])
    # overwrite config if provided in command line
    update_config(config, 'score', args.score)
    update_config(config, 'solve', args.solve)

    sc_fn = get_function('score', config)  # get the scorer function
    run_folder = setup_run_folder(sys.argv,
                                  config)  # where to save run information
    init_log(testcase)
    tear_down_streams = setup_stdstreams(
        run_folder)  # get the teardown function

    log.debug(f'Running testcase {testcase}')

    with open('in/' + testcase + '.in') as f:
        inp = f.read()
    solve_args = {}
    for kv in args.solve_args.split(','):
        if '=' in kv:
            k, v = kv.split('=')
            solve_args[k] = v
    solve_args['log'] = log
    solve_args['testcase'] = testcase
    solve_args['folder'] = run_folder

    get_ans = get_ans_fn(config, inp)  # get the solver function

    def run(solve_args):
        solve_args_orig = dict(solve_args)
        ans = get_ans(solve_args)  # run the solver function and get the answer
        process(inp, ans, solve_args_orig,
                sc_fn)  # add to best submission or not

    rounds = 1 if args.seed else args.iterations
    for i in range(rounds):
        seed = args.seed if args.seed else ri(0, 10**6 - 1)
        log.debug('seed:  {:<6}, test#: {}'.format(seed, i))
        sa = dict(solve_args)
        sa['iter'] = i
        sa['seed'] = seed
        run(sa)

    tear_down_streams()
コード例 #2
0
def run_testcase(testcase, args):
    testcase = path(testcase).name
    config = ConfigParser()
    config.read(['default.cfg', 'main.cfg', args.config])

    update_config(config, 'score', args.score)
    update_config(config, 'solve', args.solve)

    sc_fn = get_function('score', config)
    run_folder = setup_run_folder(sys.argv, config, testcase)
    init_log(testcase)
    tear_down_streams = setup_stdstreams(run_folder)

    log.debug('Running testcase {}'.format(testcase))

    with open('in/' + testcase + '.in') as f:
        inp = f.read()
    solve_args = {}
    for kv in args.solve_args.split(','):
        if '=' in kv:
            k, v = kv.split('=')
            solve_args[k] = v
    solve_args['log'] = log
    solve_args['testcase'] = testcase
    solve_args['folder'] = run_folder
    orig_module_name = config.get('solve', 'module')
    solve_args['module_name'] = sanitize_module_name(orig_module_name)

    get_ans = get_ans_fn(config, inp)

    def run(solve_args):
        solve_args_orig = dict(solve_args)
        ans = get_ans(solve_args)
        process(inp, ans, solve_args_orig, sc_fn)

    rounds = 1 if args.seed else args.iterations
    for i in range(rounds):
        seed = args.seed if args.seed else ri(0, 10**6 - 1)
        log.debug('seed:  {:<6}, test#: {}'.format(seed, i))
        sa = dict(solve_args)
        sa['iter'] = i
        sa['seed'] = seed
        run(sa)

    tear_down_streams()
コード例 #3
0
ファイル: unibox.py プロジェクト: iwisunny/syncMon
def get_app_version():
    # gen version num automatic
    ver=util.parse_config(ini_file, 'UNIBOX')

    if ver and ver['version'] != '':
        return ver['version']
    else:
        try:
            git_tags=os.popen('git tag').read().strip('\n')

            if git_tags:
                # get last tag
                lastest_ver=git_tags.split('\n')[-1]
                # save lastest_ver into extern file
                util.update_config(ini_file, {'version': lastest_ver}, 'UNIBOX')
                return lastest_ver
        except Exception, e:
            # fake initial ver
            return 'v0.0.1'
コード例 #4
0
ファイル: unibox.py プロジェクト: iwisunny/syncMon
def set_app_version():
    try:
        git_tags=os.popen('git tag').read().strip('\n')
        if git_tags:
            # get last tag
            lastest_ver=git_tags.split('\n')[-1]
            # save lastest_ver into extern file
            return util.update_config(ini_file, {'version': lastest_ver}, 'UNIBOX')

    except Exception, e:
        return False
コード例 #5
0
def get_app_version():
    # gen version num automatic
    ver = util.parse_config(ini_file, 'UNIBOX')

    if ver and ver['version'] != '':
        return ver['version']
    else:
        try:
            git_tags = os.popen('git tag').read().strip('\n')

            if git_tags:
                # get last tag
                lastest_ver = git_tags.split('\n')[-1]
                # save lastest_ver into extern file
                util.update_config(ini_file, {'version': lastest_ver},
                                   'UNIBOX')
                return lastest_ver
        except Exception, e:
            # fake initial ver
            return 'v0.0.1'
コード例 #6
0
def set_app_version():
    try:
        git_tags = os.popen('git tag').read().strip('\n')
        if git_tags:
            # get last tag
            lastest_ver = git_tags.split('\n')[-1]
            # save lastest_ver into extern file
            return util.update_config(ini_file, {'version': lastest_ver},
                                      'UNIBOX')

    except Exception, e:
        return False
コード例 #7
0
    return (m.group(1) if m else path(ans).name).join(in_f)


sub_f = ('submission/', '.ans')
ans_f = ('ans/', '.ans')
in_f = ('in/', '.in')
fname_re = re.compile(r'([A-Za-z0-9_]+)_(\d+)_(\d+|None)')

if __name__ == '__main__':
    args = get_args()
    if args.rescore:
        clean_max()
    config = ConfigParser()
    config.read(['default.cfg', 'main.cfg', args.config])
    update_config(config, 'score', args.score)

    sc_fn = get_function('score', config)

    if not (args and (args.inp or args.ans)):
        file_lst = glob('*'.join(ans_f if args.rescore else sub_f))
        files = [(ans2in(ans), ans) for ans in file_lst]
    else:
        if not args.ans:
            pth = path(args.inp)
            args.inp = pth.name.join(in_f)
            args.ans = pth.name.join(sub_f)
        files = [(args.inp, args.ans)]

    for inpf, ansf in files:
        ipth, apth = path(inpf), path(ansf)
コード例 #8
0
ファイル: score.py プロジェクト: ahnlabb/hashcode-template
    return (m.group(1) if m else path(ans).name).join(in_f)


sub_f = ('submission/', '.ans')
ans_f = ('ans/', '.ans')
in_f = ('in/', '.in')
fname_re = re.compile(r'([A-Za-z0-9_]+)_(\d+)_(\d+|None)')

if __name__ == '__main__':
    args = get_args()
    if args.rescore:
        clean_max()
    config = ConfigParser()
    config.read(['main.cfg', args.config])
    update_config(config, args.score)

    sc_fn = get_function('score', config)

    if not (args and (args.inp or args.ans)):
        file_lst = glob('*'.join(ans_f if args.rescore else sub_f))
        files = [(ans2in(ans), ans) for ans in file_lst]
    else:
        if not args.ans:
            pth = path(args.inp)
            args.inp = pth.name.join(in_f)
            args.ans = pth.name.join(sub_f)
        files = [(args.inp, args.ans)]

    for inpf, ansf in files:
        ipth, apth = path(inpf), path(ansf)
コード例 #9
0
def main():
    # parse args and get configs
    args = parser.parse_args()
    logging.set_verbosity(logging.INFO)
    random.seed(args.seed)
    # reload model from checkpoint or train from scratch
    if args.reload_ckpt != "None":
        checkpoint_path = os.path.join(local_settings.MODEL_PATH,
                                       args.all_checkpoints_folder)
        checkpoint_folders = os.listdir(checkpoint_path)
        checkpoint_folder = [
            f for f in checkpoint_folders if args.reload_ckpt in f
        ]
        if len(checkpoint_folder) == 0:
            raise Exception("No matching folder found.")
        elif len(checkpoint_folder) > 1:
            logging.info(checkpoint_folder)
            raise Exception("More than one matching folder found.")
        else:
            checkpoint_folder = checkpoint_folder[0]
            logging.info("Restoring from {}".format(checkpoint_folder))
        checkpoint_dir = os.path.join(checkpoint_path, checkpoint_folder)

        if not args.overwrite_configs:
            # reload configs from file
            with open(os.path.join(checkpoint_dir, "hparams.pkl"), 'rb') as f:
                config_dict = pickle.load(f)
        else:
            # get configs
            config_dict = util.get_config(args.config)
            config_dict = util.update_config(config_dict, args)
    else:
        # get configs
        config_dict = util.get_config(args.config)
        config_dict = util.update_config(config_dict, args)

    config_dict_copy = copy.deepcopy(config_dict)
    config = util.config_to_namedtuple(config_dict)

    # Initialize the repo
    logging.info("==> Creating repo..")
    exp_repo = repo.ExperimentRepo(local_dir_name=config.local_json_dir_name,
                                   root_dir=local_settings.MODEL_PATH)

    if args.reload_ckpt != "None":
        exp_id = config_dict["id"]
    else:
        exp_id = None

    # Create new experiment
    exp_id = exp_repo.create_new_experiment(config.dataset, config_dict_copy,
                                            exp_id)
    config_dict_copy["id"] = exp_id

    # Set up model directory
    current_time = datetime.datetime.now().strftime(r"%y%m%d_%H%M")
    ckpt_dir_name = args.all_checkpoints_folder if not DEBUG else 'checkpoints_tmp'
    ckpt_dir = os.path.join(local_settings.MODEL_PATH, ckpt_dir_name)
    os.makedirs(ckpt_dir, exist_ok=True)
    if args.reload_ckpt != "None":
        model_dir = checkpoint_dir
    else:
        model_dir = os.path.join(ckpt_dir,
                                 "ckpt_{}_{}".format(current_time, exp_id))

    # Save hyperparameter settings
    os.makedirs(model_dir, exist_ok=True)
    if not os.path.exists(os.path.join(model_dir, "hparams.json")):
        with open(os.path.join(model_dir, "hparams.json"), 'w') as f:
            json.dump(config_dict_copy, f, indent=2, sort_keys=True)
        with open(os.path.join(model_dir, "hparams.pkl"), 'wb') as f:
            pickle.dump(config_dict_copy, f)

    # Set optimizers
    # learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
    #     config.learning_rate, config.decay_every,
    #     config.decay_base, staircase=True)

    # learning rate = 0.02 in paper
    optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.02)

    if args.reload_ckpt != "None":
        # TODO: fix this hack
        epoch_start = int(
            sorted([f for f in os.listdir(checkpoint_dir)
                    if 'ckpt-' in f])[-1].split('ckpt-')[1].split('.')[0])
        init_gs = 0
    else:
        epoch_start = 0
        init_gs = 0

    global_step = tf.Variable(initial_value=init_gs,
                              name="global_step",
                              trainable=False,
                              dtype=tf.int64)

    # Get model
    model_domain = get_model(config.name_classifier_domain, config)
    model_label = get_model(config.name_classifier_label, config)

    # Get datasets
    if DEBUG:
        num_batches = 5
    else:
        num_batches = None

    ds_train1 = _get_dataset(
        config.dataset,
        model_label,
        split=tfds.Split.TRAIN.subsplit(tfds.percent[:50]),
        batch_size=tf.cast(config.batch_size / 2, tf.int64),
        num_batches=num_batches,
        domain=tf.constant(0),
        e=0.2)

    ds_train2 = _get_dataset(
        config.dataset,
        model_label,
        split=tfds.Split.TRAIN.subsplit(tfds.percent[-50:]),
        batch_size=tf.cast(config.batch_size / 2, tf.int64),
        num_batches=num_batches,
        domain=tf.constant(1),
        e=0.1)

    ds_val = _get_dataset(config.dataset,
                          model_label,
                          split=tfds.Split.TEST,
                          batch_size=config.batch_size,
                          num_batches=num_batches,
                          domain=tf.constant(2),
                          e=0.9)

    # TODO: add test set - done

    show_inputs = iter(ds_train1)
    _ = model_label(next(show_inputs)["image"])

    # Set up checkpointing
    if args.reload_ckpt != "None":
        ckpt = tf.train.Checkpoint(model=model_label, global_step=global_step)
        manager = tf.train.CheckpointManager(ckpt,
                                             checkpoint_dir,
                                             max_to_keep=3)
        status = ckpt.restore(manager.latest_checkpoint)
        status.assert_consumed()
    else:
        ckpt = tf.train.Checkpoint(model=model_label, global_step=global_step)
        manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3)

    writer = tf.summary.create_file_writer(manager._directory)
    with writer.as_default(), tf.summary.record_if(
            lambda: int(global_step.numpy()) % 100 == 0):
        for epoch in range(epoch_start, config.num_epochs):

            start_time = time.time()

            # random = np.array([0, 1, 2])
            # np.random.shuffle(random)
            # rand_inputs = [ds_train1, ds_train2, ds_train3]

            train_one_epoch(model_domain=model_domain,
                            model_label=model_label,
                            train_input1=ds_train1,
                            train_input2=ds_train2,
                            optimizer=optimizer,
                            global_step=global_step,
                            config=config)

            train1_metr = eval_one_epoch(model_label=model_label,
                                         dataset=ds_train1,
                                         summary_directory=os.path.join(
                                             manager._directory, "train1"),
                                         global_step=global_step,
                                         config=config,
                                         training=False)

            train2_metr = eval_one_epoch(model_label=model_label,
                                         dataset=ds_train2,
                                         summary_directory=os.path.join(
                                             manager._directory, "train2"),
                                         global_step=global_step,
                                         config=config,
                                         training=False)

            val_metr = eval_one_epoch(model_label=model_label,
                                      dataset=ds_val,
                                      summary_directory=os.path.join(
                                          manager._directory, "val"),
                                      global_step=global_step,
                                      config=config,
                                      training=False)

            # if epoch == (config.num_epochs - 1):
            #     # full training set
            #     train_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_train_complete,
            #         summary_directory=os.path.join(manager._directory, "train"),
            #         global_step=global_step, config=config, training=False)
            #     # full test_out set
            #     test_out_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_val_out,
            #         summary_directory=os.path.join(manager._directory, "val_out"),
            #         global_step=global_step, config=config, training=False)
            #     # full test_in set
            #     test_in_metr = eval_one_epoch(model_classifier=model_classifier, dataset=ds_val_in,
            #         summary_directory=os.path.join(manager._directory, "val_in"),
            #         global_step=global_step, config=config, training=False)

            manager.save()

            logging.info("\n #### \n epoch: %d, time: %0.2f" %
                         (epoch, time.time() - start_time))
            logging.info("Global step: {}".format(global_step.numpy()))
            logging.info("train1_accuracy: {:2f}, train1_loss: {:4f}".format(
                train1_metr['accuracy'], train1_metr['loss']))
            logging.info("train2_accuracy: {:2f}, train2_loss: {:4f}".format(
                train2_metr['accuracy'], train2_metr['loss']))
            logging.info("val_accuracy: {:2f}, val_loss: {:4f}".format(
                val_metr['accuracy'], val_metr['loss']))

            if epoch == epoch_start:
                dir_path = os.path.dirname(os.path.realpath(__file__))
                copy_source(dir_path, manager._directory)

    # Mark experiment as completed
    # TODO: add other metrics - done
    exp_repo.mark_experiment_as_completed(
        exp_id,
        train1_accuracy=train1_metr['accuracy'],
        train2_accuracy=train2_metr['accuracy'],
        val_accuracy=val_metr['accuracy'])