def sweep_core(config, graph_path, res_dir):
    # load the data
    data = load_data(config)

    # create a model
    model = build_model(config)

    # create trainer
    trainer = build_trainer(model, data, config)

    # train the model
    history = trainer.train()

    analyse_model_performance(model,
                              data,
                              history,
                              config,
                              graph_path=graph_path,
                              res_dir=res_dir)

    # evaluate model
    eval_res = trainer.evaluate()
    model = trainer.model_train
    res_dict = OrderedDict()
    for key in history.history.keys():
        res_dict[key] = history.history[key][-1]
    return res_dict, history.history
コード例 #2
0
def main():
    # capture the config path from the run arguments
    # then process configuration file
    SRC_DIR = os.getcwd()
    config = preprocess_meta_data()

    # load the data
    data = load_data(config)

    if not config.quiet:
        config.print()

    # create a model
    model = build_model(config)

    # create trainer
    trainer = build_trainer(model, data, config)

    # train the model
    history = trainer.train()

    # visualize training performance
    graph_path = os.path.join(SRC_DIR, 'graphs')

    analyse_model_performance(model, data, graph_path, history)

    # evaluate model
    trainer.evaluate()

    # run on MAFAT test
    model = trainer.model_train
    test_model(model, SRC_DIR, config)
def main():
    # capture the config path from the run arguments
    # then process configuration file
    SRC_DIR = os.getcwd()
    RADAR_DIR = os.path.join(SRC_DIR, os.pardir)
    config = preprocess_meta_data()
    exp_name = config.exp_name

    # for graph_dir and log file
    now = datetime.now()
    date = now.strftime("%Y_%m_%d_%H_%M_%S")
    exp_name_time = '{}_{}'.format(exp_name, date)
    # visualize training performance
    graph_path = os.path.join(RADAR_DIR, 'graphs', exp_name_time)
    LOG_DIR = os.path.join(RADAR_DIR, 'logs')
    if os.path.exists(LOG_DIR) is False:
        os.makedirs(LOG_DIR)
    log_path = '{}/{}.log'.format(LOG_DIR, exp_name_time)

    if config.use_mti_improvement is True:
        config.__setattr__("model_input_dim", [125, 32, 1])

    if bool(re.search('tcn', config.exp_name,
                      re.IGNORECASE)) and config.use_mti_improvement:
        config.__setattr__("model_input_dim", [32, 125, 1])

    # load the data
    data = load_data(config)

    # create a model
    model = build_model(config)

    # create trainer
    trainer = build_trainer(model, data, config)

    # train the model
    history = trainer.train()

    analyse_model_performance(model,
                              data,
                              history,
                              config,
                              graph_path=graph_path,
                              res_dir=exp_name_time)

    # evaluate model
    eval_res = trainer.evaluate()

    SUB_DIR = os.path.join(RADAR_DIR, 'submission_files')
    if os.path.exists(SUB_DIR) is False:
        os.makedirs(SUB_DIR)
    sub_path = "{}/submission_{}.csv".format(SUB_DIR, exp_name_time)
    test_model(model['train'], sub_path, SRC_DIR, config)

    print('#' * 70)
    print('log file is located at {}'.format(log_path))
    print('graphs are located at {}'.format(graph_path))
    print('submission file is at: {}'.format(sub_path))
    print('')
コード例 #4
0
def sweep_core(config, graph_path, res_dir, best_preformance_dir,
               current_sweep, param_value):
    # load the data
    data = load_data(config)

    # create a model
    model = build_model(config)

    # create trainer
    trainer = build_trainer(model, data, config)

    # train the model
    history = trainer.train()

    if config.learn_background is False:
        analyse_model_performance(model,
                                  data,
                                  history,
                                  config,
                                  graph_path=graph_path,
                                  res_dir=res_dir)

    result_data = analyse_model_performance(model,
                                            data,
                                            history,
                                            config,
                                            graph_path=graph_path,
                                            res_dir=res_dir)
    result_data['Log path'] = res_dir
    result_data['Graph path'] = graph_path
    result_data[
        'Submission path'] = "None - Sweep {} results with value {}".format(
            current_sweep, param_value)
    result_data['Model name'] = config.model_name
    result_data['Exp name'] = config.exp_name
    result_data['Snr type'] = config.snr_type
    # compare model performance
    if os.path.exists(best_preformance_dir) is False:
        os.makedirs(best_preformance_dir)

    compare_to_best_model_performance(result_data, model, best_preformance_dir,
                                      config)

    # evaluate model
    eval_res = trainer.evaluate()
    model = trainer.model_train
    # predictions for later use in sweep visualizations
    pred_dict = get_predictions_dict_per_model(model, data)

    res_dict = OrderedDict()
    for key in history.history.keys():
        res_dict[key] = history.history[key][-1]
    return res_dict, history.history, pred_dict
コード例 #5
0
def main():
    # capture the config path from the run arguments
    # then process configuration file
    config = preprocess_meta_data()

    # load the data
    data = load_data(config)

    if not config.quiet:
        config.print()

    # create a model
    model = build_model(config)

    # create trainer and pass all the previous components to it
    trainer = build_trainer(model, data, config)

    # train the model
    trainer.train()
コード例 #6
0
def main():
    # capture the config path from the run arguments
    # then process configuration file
    SRC_DIR = os.getcwd()
    RADAR_DIR = os.path.join(SRC_DIR, os.pardir)
    config = preprocess_meta_data(SRC_DIR)
    exp_name = config.exp_name


    # for graph_dir and log file
    now = datetime.now()
    date = now.strftime("%Y_%m_%d_%H_%M_%S")
    exp_name_time = '{}_{}'.format(exp_name, date)
    # visualize training performance
    graph_path = os.path.join(RADAR_DIR, 'graphs', exp_name_time)
    if os.path.exists(graph_path) is False:
        os.makedirs(graph_path)
    LOG_DIR = os.path.join(RADAR_DIR, 'logs')
    if os.path.exists(LOG_DIR) is False:
        os.makedirs(LOG_DIR)
    log_path = '{}/{}.log'.format(LOG_DIR, exp_name_time)

    '''
    Configure multiprocess
    '''
    strategy = tf.distribute.MirroredStrategy()

    if strategy.num_replicas_in_sync != 1:
        config.__setattr__("batch_size", config.batch_size * strategy.num_replicas_in_sync)

    config = adjust_input_size(config)

    # assert configurations
    assert not(config.learn_background and (config.with_rect_augmentation or config.with_preprocess_rect_augmentation))
    # assert not(config.background_implicit_inference)
    # load the data
    data = load_data(config)


    with strategy.scope():
        # create a model
        model = build_model(config)

        # create trainer
        trainer = build_trainer(model, data, config)

        # train the model
        history = trainer.train()

    # evaluate model
    eval_res = trainer.evaluate()

    SUB_DIR = os.path.join(RADAR_DIR,'submission_files')
    BEST_RESULT_DIR = os.path.join(RADAR_DIR, 'best_preformance_history')
    if os.path.exists(SUB_DIR) is False:
        os.makedirs(SUB_DIR)
    sub_path = "{}/submission_{}.csv".format(SUB_DIR,exp_name_time)
    test_model(model['train'], sub_path, SRC_DIR, config,BEST_RESULT_DIR)


    if config.learn_background is False:
        result_data = analyse_model_performance(model, data, history, config, graph_path=graph_path, res_dir=exp_name_time)
        result_data['Log path'] = log_path
        result_data['Graph path'] = graph_path
        result_data['Submission path'] = sub_path
        result_data['Model name'] = config.model_name
        result_data['Exp name'] = config.exp_name
        result_data['Snr type'] = config.snr_type

        # compare model performance
        if os.path.exists(BEST_RESULT_DIR) is False:
            os.makedirs(BEST_RESULT_DIR)

        compare_to_best_model_performance(result_data, model, BEST_RESULT_DIR, config)

    PREVIOUS_MODELS_DIR = os.path.join(RADAR_DIR, 'previous_models_files')
    if config.save_model is True:
        if os.path.exists(PREVIOUS_MODELS_DIR) is False:
            os.makedirs(PREVIOUS_MODELS_DIR)
        os.chdir(PREVIOUS_MODELS_DIR)
        save_model(name='{}_{}_{}'.format(config.model_name,config.exp_name,exp_name_time), model=model['train'])

    #if config.save_history_buffer is True:

    print('#' * 70)
    print('log file is located at {}'.format(log_path))
    print('graphs are located at {}'.format(graph_path))
    print('submission file is at: {}'.format(sub_path))
    print('')