Beispiel #1
0
def experiment_with(dataset_name):
    config = load_config(FOLDER_PATH, dataset_name)
    setupper = set_up_learner(dataset_name)

    config['active_learning']['output_dir'] = OUTPUT_DIR
    config['experiment']['logger_name'] = logger_name
    model_name = config['experiment']['model']
    iterations_per_labeled_sample = config['experiment'][
        'iterations_per_labeled_sample']
    size_to_label = config['experiment']['size_to_label']

    score_data = {}
    logger.info('---------------------------------------')
    logger.info(f'--LAUNCHING EXPERIMENTS ON {dataset_name}--')
    logger.info('---------------------------------------')
    for i in range(config['experiment']['repeats']):
        logger.info('---------------------------')
        logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------')
        logger.info('---------------------------')
        for query_size in config['experiment']['query_sizes']:
            config['active_learning']['assets_per_query'] = query_size
            config['active_learning']['n_iter'] = np.ceil(
                size_to_label / query_size).astype(int)
            dataset, learner = setupper(
                config,
                OUTPUT_DIR,
                logger,
                queries_name=f'queries-{query_size}-{i}-{model_name}.txt')
            logger.info('---------------------------')
            logger.info(f'----QUERY SIZE : {query_size}----')
            logger.info('---------------------------')
            trainer = ActiveTrain(learner, dataset,
                                  config['experiment']['strategy'],
                                  logger_name)
            scores = trainer.train(config['train_parameters'],
                                   **config['active_learning'])
            score_data[(query_size, i)] = scores
            logger.info(f'----DONE----\n')
        logger.info('---------------------------')
        logger.info(f'--------DONE--------')
        logger.info('---------------------------\n\n\n')
    if config['experiment']['save_results']:
        with open(f'{OUTPUT_DIR}/scores-{dataset_name}-{model_name}.pickle',
                  'wb') as f:
            pickle.dump(score_data, f)
Beispiel #2
0
def run_single_experiment(dataset_name, init_size):
    logger.info(f'INITIAL SIZE : {init_size}')
    config = load_config(FOLDER_PATH, dataset_name)
    setupper = set_up_learner(dataset_name)
    config['active_learning']['output_dir'] = OUTPUT_DIR
    config['active_learning']['init_size'] = init_size
    config['experiment']['logger_name'] = logger_name
    logger.debug('Getting dataset and learner')
    dataset, learner = setupper(config, OUTPUT_DIR, logger)
    logger.debug('Getting trainer')
    trainer = ActiveTrain(learner, dataset, config['experiment']['strategy'],
                          logger_name)
    logger.debug('Training...')
    scores = None
    # scores = trainer.train(config['train_parameters'], **config['active_learning'])
    logger.debug('Done training...')
    logger.info('-------------------------')
    return scores
Beispiel #3
0
from al.model.model_zoo.simple_cnn import ConvModel
from al.model.mnist import MnistLearner
from al.dataset.mnist import MnistDataset
from al.train.active_train import ActiveTrain
from al.helpers.experiment import set_up_experiment, load_config
from al.experiments import set_up_learner

DATASET = 'mnist'

FOLDER_PATH = os.path.dirname(__file__)
OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment(__file__,
                                                                FOLDER_PATH,
                                                                logging_lvl=20)

config = load_config(FOLDER_PATH, DATASET)
setupper = set_up_learner(DATASET)
config['active_learning']['output_dir'] = OUTPUT_DIR
config['experiment']['logger_name'] = logger_name
model_name = config['experiment']['model']
dataset, learner = setupper(config, OUTPUT_DIR, logger)

queried = os.path.join(os.path.dirname(__file__), 'results',
                       'queries-margin_sampling-0-simplenet.txt')
df = pd.read_csv(queried, header=0, skiprows=1)
# print(df)
query_step = 0
plot_size = 32
indices = df.loc[query_step].values

if False:
    train_dataset = dataset.dataset
Beispiel #4
0
EXPERIMENT_NAME = os.path.dirname(__file__)
model_name = 'mobilenet'
OUTPUT_DIR = f'{EXPERIMENT_NAME}/results'
FIGURE_DIR = f'{EXPERIMENT_NAME}/figures'
plot_dir = os.path.join(os.path.dirname(__file__), 'figures')

analyze_results = True
analyze_queries = False
analyze_sizes = True

if analyze_queries:
    dataset = 'cifar'
    config = load_config(EXPERIMENT_NAME, dataset)
    logger = setup_logger('analysis', OUTPUT_DIR, logging_lvl=20)
    config['experiment']['logger_name'] = 'analysis'
    dataset, learner = set_up_learner(dataset)(config, OUTPUT_DIR, logger)
    labels = np.array([x[1] for x in dataset.dataset])
    train_distribution = pd.value_counts(labels).sort_values()
    print('train_distribution', train_distribution)
    pbar = tqdm.tqdm(total=config['experiment']['repeats'] *
                     len(config['experiment']['strategies']) *
                     config['active_learning']['n_iter'])
    list_data = []
    for i in range(config['experiment']['repeats']):
        for strategy in config['experiment']['strategies']:
            queries_name = f'queries-{strategy}-{i}-{model_name}.txt'
            queries = pd.read_csv(f'{OUTPUT_DIR}/{queries_name}')
            for j, query in queries.iterrows():
                pbar.update(1)
                new_query = query.dropna().astype(int)
                query_labels = labels[list(new_query)]