Пример #1
0
def train():
    # setting up, options contains all our params
    options = Options(
        library=0,  # use keras
        configs=2,  # use resnet50 model
        transform=1)  # use transform for resnet50

    # set options to your specific experiment
    options.experiment = "fine_tuned_oxford102_model_dropout"
    options.dropout = 0.1
    options.number = options.dropout

    # settings
    options.gpu = 2
    options.save_test_result = True

    # early stopping
    options.early_stopping = True
    options.patience = 20

    # general hyperparameters
    options.lr = 1E-2
    options.batch_size = 128

    # reduce lr on plateau
    options.reduce_lr = 0.5

    for i in range(0, 9):

        # initialize model
        model = options.FlowerClassificationModel(options)

        # fit model
        model.fit()

        # evaluate model
        model.evaluate()

        # reset model for next parameter
        model.reset()

        # change dropout from 0.1 to 0.9
        options.dropout += 0.1

        # change the log number saved to checkpoints
        options.number = options.dropout
Пример #2
0
def eval_config(config):
    lr = config['lr']
    embedding_dim = config['embedding_dim']
    train_ws = config['train_ws']
    batch_size = config['batch_size']
    num_sampled = config['num_sampled']
    hidden_units = config['hidden_units']
    shuffle_batch = config['shuffle_batch']
    optimizer_type = config['optimizer_type']
    use_batch_normalization = config['use_batch_normalization']
    train_nce_biases = config['train_nce_biases']

    hidden_units = get_hidden_units(hidden_units, embedding_dim)

    opts = Options()
    opts.train_data_path = '../../data/train_data.in'
    opts.eval_data_path = '../../data/eval_data.in'
    opts.lr = lr
    opts.embedding_dim = embedding_dim
    opts.train_ws = train_ws
    opts.min_count = 30
    opts.t = 0.025
    opts.verbose = 1
    opts.min_count_label = 5
    opts.label = "__label__"
    opts.batch_size = batch_size
    opts.num_sampled = num_sampled
    opts.max_train_steps = None
    opts.epoch = 2
    opts.hidden_units = hidden_units
    opts.model_dir = 'model_dir'
    opts.export_model_dir = 'export_model_dir'
    opts.prefetch_size = 100000
    opts.save_summary_steps = 100
    opts.save_checkpoints_secs = 600
    opts.keep_checkpoint_max = 2
    opts.log_step_count_steps = 1000
    opts.recall_k = 10
    opts.dict_dir = 'dict_dir'
    opts.use_saved_dict = False
    opts.use_profile_hook = False
    opts.profile_steps = 100
    opts.root_ops_path = 'lib/'
    opts.remove_model_dir = 1
    opts.optimize_level = 1
    opts.receive_ws = 100
    opts.use_subset = True
    opts.dropout = 0.0
    opts.ntargets = 1
    opts.chief_lock = 'model_dir/chief.lock'
    opts.max_distribute_train_steps = -1
    opts.train_nce_biases = train_nce_biases
    opts.shuffle_batch = shuffle_batch
    opts.predict_ws = 20
    opts.sample_dropout = 0.0
    opts.optimizer_type = optimizer_type
    opts.tfrecord_file = ''
    opts.num_tfrecord_file = 1
    opts.train_data_format = 'fasttext'
    opts.tfrecord_map_num_parallel_calls = 1
    opts.train_parallel_mode = 'train_op_parallel'
    opts.num_train_op_parallel = 4
    opts.use_batch_normalization = use_batch_normalization

    opts.tf_config = None
    opts.task_type = model_keys.TaskType.LOCAL  # default mode

    result = {}
    try:
        result = train.train(opts, export=False)
    except Exception as e:
        print(e)
    return -result.get('loss', -1)
Пример #3
0
import time
import copy
import numpy as np
import math

from options import Options
opt = Options().parse()  # set CUDA_VISIBLE_DEVICES before import torch
opt.batch_size = 1

import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import random
import numpy as np

from models.classifier import Model
from data.mnist_loader import MNIST_Loader
from util.visualizer import Visualizer
from data import augmentation


def model_state_dict_parallel_convert(state_dict, mode):
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    if mode == 'to_single':
        for k, v in state_dict.items():
            name = k[7:]  # remove 'module.' of DataParallel
            new_state_dict[name] = v
    elif mode == 'to_parallel':
Пример #4
0
def eval_config(id, config, f):
    opts = Options()
    opts.train_data_path = '../../data/train_data.in'
    opts.eval_data_path = '../../data/eval_data.in'
    opts.lr = get_lr(config)
    opts.embedding_dim = get_embedding_dim(config)
    opts.train_ws = get_train_ws(config)
    opts.min_count = 30
    opts.t = 0.025
    opts.verbose = 1
    opts.min_count_label = 5
    opts.label = "__label__"
    opts.batch_size = get_batch_size(config)
    opts.num_sampled = get_num_sampled(config)
    opts.max_train_steps = None
    opts.epoch = 2
    opts.hidden_units = get_hidden_units(config)
    opts.model_dir = 'model_dir'
    opts.export_model_dir = 'export_model_dir'
    opts.prefetch_size = 100000
    opts.save_summary_steps = 100
    opts.save_checkpoints_secs = 600
    opts.keep_checkpoint_max = 2
    opts.log_step_count_steps = 1000
    opts.recall_k = 10
    opts.dict_dir = 'dict_dir'
    opts.use_saved_dict = False
    opts.use_profile_hook = False
    opts.profile_steps = 100
    opts.root_ops_path = 'lib/'
    opts.remove_model_dir = 1
    opts.optimize_level = 1
    opts.receive_ws = 100
    opts.use_subset = True
    opts.dropout = 0.0
    opts.ntargets = 1
    opts.chief_lock = 'model_dir/chief.lock'
    opts.max_distribute_train_steps = -1
    opts.train_nce_biases = get_train_nce_biases(config)
    opts.shuffle_batch = get_shuffle_batch(config)
    opts.predict_ws = 20
    opts.sample_dropout = 0.0
    opts.optimizer_type = get_optimizer_type(config)
    opts.tfrecord_file = ''
    opts.num_tfrecord_file = 1
    opts.train_data_format = 'fasttext'
    opts.tfrecord_map_num_parallel_calls = 1
    opts.train_parallel_mode = 'train_op_parallel'
    opts.num_train_op_parallel = 4
    opts.use_batch_normalization = get_use_batch_normalization(config)

    opts.tf_config = None
    opts.task_type = model_keys.TaskType.LOCAL  # default mode

    result = {}
    start = int(time.time())
    try:
        result = train.train(opts, export=False)
    except Exception:
        pass
    end = int(time.time())
    elapsed = end - start
    f.write(str(opts.lr))
    f.write('\t')
    f.write(str(opts.embedding_dim))
    f.write('\t')
    f.write(str(opts.train_ws))
    f.write('\t')
    f.write(str(opts.batch_size))
    f.write('\t')
    f.write(str(opts.num_sampled))
    f.write('\t')
    f.write(str(opts.hidden_units))
    f.write('\t')
    f.write(str(opts.shuffle_batch))
    f.write('\t')
    f.write(str(opts.optimizer_type))
    f.write('\t')
    f.write(str(opts.use_batch_normalization))
    f.write('\t')
    f.write(str(opts.train_nce_biases))
    f.write('\t')

    f.write(str(result.get('loss', -1)))
    f.write('\t')
    f.write(str(result.get('precision_at_top_10', -1)))
    f.write('\t')
    f.write(str(result.get('recall_at_top_10', -1)))
    f.write('\t')
    f.write(str(result.get('average_precision_at_10', -1)))
    f.write('\t')
    f.write(str(result.get('accuracy', -1)))
    f.write('\t')
    f.write(str(elapsed))
    f.write('\n')
    f.flush()
Пример #5
0
    scatter_ax(ax, x=y, y=x, fx=fy, c_x='r', c_y='g', c_l='k', data_range=data_range)
    plt.savefig(os.path.join(dir, 'ty_%06d.png' % (step)), bbox_inches='tight')
    plt.clf()

    if config.gen:
        z = to_data(images['ZY'])
        fig, ax = plt.subplots()
        scatter_ax(ax, x=z, y=x, fx=y, c_x='m', c_y='g', c_l='0.5', data_range=data_range)
        plt.savefig(os.path.join(dir, 'gz_%06d.png' % (step)), bbox_inches='tight')
        plt.clf()
    return

gen = 1

config = Options().parse()
config.batch_size = DISPLAY_NUM    # For plotting purposes
config.gen = gen
utils.print_opts(config)

config.solver = 'bary_ot'
#config.solver = 'w1'
#config.solver = 'w2'
#plot_dataset = 'our_checkerboard'
plot_dataset = '8gaussians'
config.data = plot_dataset
config.trial = 3

dir_string = './{0}_{1}/trial_{2}/'.format(config.solver, config.data, config.trial) if config.solver != 'w2' else \
                            './{0}_gen{2}_{1}/trial_{3}/'.format(config.solver, config.data, config.gen, config.trial)

print(dir_string)
Пример #6
0
                        k: random.choice(v)
                        for k, v in param_grid.items()
                    }
                    curr_params = [
                        float(random_params['lr']),
                        int(random_params['batch_size']),
                        random_params['loss_type'],
                        random_params['loss_formulation'],
                        random_params['image_normalization'],
                        random_params['compensated_target']
                    ]

                all_params.append(curr_params)

            opts.lr = curr_params[0]
            opts.batch_size = curr_params[1]
            opts.loss_type = curr_params[2]
            opts.loss_formulation = curr_params[3]
            opts.image_normalization = curr_params[4]
            opts.compensated_target = curr_params[5]

            # Create data transforms
            if opts.image_normalization == True:
                data_transforms = transforms.Compose([
                    datatransforms.HorizontalFlip(opts),
                    datatransforms.VerticalFlip(opts),
                    datatransforms.ToTensor(opts),
                    datatransforms.NormalizeImage(opts)
                ])
            elif opts.image_normalization == False:
                data_transforms = transforms.Compose([