コード例 #1
0
def experiment_with(dataset_name):
    config = load_config(FOLDER_PATH, dataset_name)
    setupper = set_up_learner(dataset_name)

    config['active_learning']['output_dir'] = OUTPUT_DIR
    config['experiment']['logger_name'] = logger_name
    model_name = config['experiment']['model']
    iterations_per_labeled_sample = config['experiment'][
        'iterations_per_labeled_sample']
    size_to_label = config['experiment']['size_to_label']

    score_data = {}
    logger.info('---------------------------------------')
    logger.info(f'--LAUNCHING EXPERIMENTS ON {dataset_name}--')
    logger.info('---------------------------------------')
    for i in range(config['experiment']['repeats']):
        logger.info('---------------------------')
        logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
        logger.info('---------------------------')
        for query_size in config['experiment']['query_sizes']:
            config['active_learning']['assets_per_query'] = query_size
            config['active_learning']['n_iter'] = np.ceil(
                size_to_label / query_size).astype(int)
            dataset, learner = setupper(
                config,
                OUTPUT_DIR,
                logger,
                queries_name=f'queries-{query_size}-{i}-{model_name}.txt')
            logger.info('---------------------------')
            logger.info(f'----QUERY SIZE : {query_size}----')
            logger.info('---------------------------')
            trainer = ActiveTrain(learner, dataset,
                                  config['experiment']['strategy'],
                                  logger_name)
            scores = trainer.train(config['train_parameters'],
                                   **config['active_learning'])
            score_data[(query_size, i)] = scores
            logger.info(f'----DONE----\n')
        logger.info('---------------------------')
        logger.info(f'--------DONE--------')
        logger.info('---------------------------\n\n\n')
    if config['experiment']['save_results']:
        with open(f'{OUTPUT_DIR}/scores-{dataset_name}-{model_name}.pickle',
                  'wb') as f:
            pickle.dump(score_data, f)
コード例 #2
0
ファイル: main.py プロジェクト: mDuval1/active-learning
def run_single_experiment(dataset_name, init_size):
    logger.info(f'INITIAL SIZE : {init_size}')
    config = load_config(FOLDER_PATH, dataset_name)
    setupper = set_up_learner(dataset_name)
    config['active_learning']['output_dir'] = OUTPUT_DIR
    config['active_learning']['init_size'] = init_size
    config['experiment']['logger_name'] = logger_name
    logger.debug('Getting dataset and learner')
    dataset, learner = setupper(config, OUTPUT_DIR, logger)
    logger.debug('Getting trainer')
    trainer = ActiveTrain(learner, dataset, config['experiment']['strategy'],
                          logger_name)
    logger.debug('Training...')
    scores = None
    # scores = trainer.train(config['train_parameters'], **config['active_learning'])
    logger.debug('Done training...')
    logger.info('-------------------------')
    return scores
コード例 #3
0
score_data = {}

for i in range(config['experiment']['repeats']):
    logger.info('---------------------------')
    logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
    logger.info('---------------------------')
    for size in config['experiment']['sizes']:
        logger.info(f'SIZE : {size}')
        config['dataset']['train_size'] = size
        for strategy in config['experiment']['strategies']:
            dataset, learner = setupper(
                config,
                OUTPUT_DIR,
                logger,
                queries_name=f'queries-{strategy}-{i}-{model_name}.txt')
            logger.info('---------------------------')
            logger.info(f'----STRATEGY : {strategy}----')
            logger.info('---------------------------')
            trainer = ActiveTrain(learner, dataset, strategy, logger_name)
            scores = trainer.train(config['train_parameters'],
                                   **config['active_learning'])
            score_data[(strategy, i, size)] = scores
            logger.info(f'----DONE----\n')
    logger.info('---------------------------')
    logger.info(f'--------DONE--------')
    logger.info('---------------------------\n\n\n')

if config['experiment']['save_results']:
    with open(f'{OUTPUT_DIR}/scores-{model_name}.pickle', 'wb') as f:
        pickle.dump(score_data, f)
コード例 #4
0
ファイル: mnist.py プロジェクト: mDuval1/active-learning
    return dataset, learner


# method = 'uncertainty_sampling'
# trainer = ActiveTrain(learner, dataset, method)
# trainer.train(train_parameters, **active_parameters)

logger.info('Launching trainings...')

score_data = {}

for i in range(experiment_parameters['n_repeats']):
    logger.info('---------------------------')
    logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
    logger.info('---------------------------')
    for strategy in experiment_parameters['strategies']:
        dataset, learner = set_up()
        logger.info('---------------------------')
        logger.info(f'----STRATEGY : {strategy}----')
        logger.info('---------------------------')
        trainer = ActiveTrain(learner, dataset, strategy, logger_name)
        scores = trainer.train(train_parameters, **active_parameters)
        score_data[(strategy, i)] = scores
        logger.info(f'----DONE----\n')
    logger.info('---------------------------')
    logger.info(f'--------DONE--------')
    logger.info('---------------------------\n\n\n')

if experiment_parameters['save_results']:
    with open(f'{OUTPUT_DIR}/scores.pickle', 'wb') as f:
        pickle.dump(score_data, f)
コード例 #5
0
    logger.info('Setting up models...')

    learner = SSDLearner(model=model, cfg=cfg, logger_name=logger_name)
    return dataset, learner


logger.info('Launching trainings...')

dataset, learner = set_up()

strategy = 'al_for_deep_object_detection'
# strategy='random_sampling'
trainer = ActiveTrain(learner,
                      dataset,
                      strategy,
                      logger_name,
                      strategy_params={
                          'agregation': 'sum',
                          'weighted': True
                      })
scores = trainer.train(train_parameters, **active_parameters)

# score_data = {}

# for i in range(experiment_parameters['n_repeats']):
#     logger.info('---------------------------')
#     logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
#     logger.info('---------------------------')
#     for strategy in experiment_parameters['strategies']:
#         dataset, learner = set_up()
#         logger.info('---------------------------')
#         logger.info(f'----STRATEGY : {strategy}----')
コード例 #6
0
ファイル: pascal_voc.py プロジェクト: mDuval1/active-learning
OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(EXPERIMENT_NAME)
DATASET = 'pascalvoc_segmentation'

logger.info('-------------------------')
logger.info('--LAUNCHING EXPERIMENTS--')
logger.info('-------------------------')

config = load_config(FOLDER_PATH, DATASET)
setupper = set_up_learner(DATASET)

config['active_learning']['output_dir'] = OUTPUT_DIR
config['experiment']['logger_name'] = logger_name
logger.debug('Getting dataset and learner')
dataset, learner = setupper(config, OUTPUT_DIR, logger)
logger.debug('Getting trainer')
trainer = ActiveTrain(learner, dataset, config['experiment']['strategies'][0], logger_name)
logger.debug('Training...')



scores = trainer.train(config['train_parameters'], **config['active_learning'])
# logger.debug('Done training...')
# logger.info('-------------------------')

# index_train = np.arange(TRAIN_SIZE)

# config_file = 'al/model/configs/unet.yaml'

# def get_model_config(config_file):
#     cfg.merge_from_file(config_file)
#     if 'unet' in config_file: