Esempio n. 1
0
def training_pipeline(config, desired_controls, desired_deviation):
    """
    Pretrain policy with given control sequence and simple model,
    then train again last layers with complex model.
    """

    # ODE model to start with
    model = SimpleModel()

    trainer = Trainer(model, config)

    # pretrain policy with given control sequence
    trainer.pretrain(desired_controls, desired_deviation)

    # on-policy training
    trainer.train()

    # more complex variation of same ODE model
    new_model = ComplexModel()
    trainer.set_model(new_model)

    # freeze all policy layers except last ones
    shift_grad_tracking(trainer.policy, False)
    shift_grad_tracking(trainer.policy.out_means, True)
    shift_grad_tracking(trainer.policy.out_sigmas, True)

    # retrain on-policy last layers
    trainer.train(post_training=True)
def run_mlp_conv_compare_experiment(model_dict_conv, model_dict_mlp,
                                    train_dict, out_dir, test_data):
    np.random.seed(12345)
    results = defaultdict(list)

    for model, model_dict in [(MlpNet(**model_dict_mlp), model_dict_mlp),
                              (ConvNet(**model_dict_conv), model_dict_conv)]:
        label = f'model={model.name}'
        print(f'{label}')
        train_dict['callbacks'][1] = ModelDump(
            output_dir=os.path.join(out_dir, label))
        train_dict['callbacks'][2] = SaveBestModel(
            output_dir=os.path.join(out_dir, label))
        trainer = Trainer(model, **train_dict)

        start_time = time()
        trainer.train_loop()
        time_period = (time() - start_time) / 60
        log_data = trainer.logger.logging_data

        results['model_dict'].append(model_dict)
        results['train_dict'].append(train_dict)
        results['time'].append(time_period)
        results['label'].append(label)
        results['log_data'].append(log_data)

        calc_test_accuracy(model, test_data, train_dict)

    save_results(out_dir, results_dict=results)
    return results
def run_experiment(experiment_generator, out_dir, test_data):
    np.random.seed(12345)
    results = defaultdict(list)

    for i, (model_dict, train_dict, exp_name,
            value) in enumerate(experiment_generator()):
        label = f'{exp_name}={value}'
        print(f'{i}. {label}')

        train_dict['callbacks'][1] = ModelDump(
            output_dir=os.path.join(out_dir, label))
        train_dict['callbacks'][2] = SaveBestModel(
            output_dir=os.path.join(out_dir, label))
        model = ConvNet(**model_dict)
        trainer = Trainer(model, **train_dict)

        start_time = time()
        trainer.train_loop()
        time_period = (time() - start_time) / 60

        log_data = trainer.logger.logging_data

        results['model_dict'].append(model_dict)
        results['train_dict'].append(train_dict)
        results['time'].append(time_period)
        results['label'].append(label)
        results['log_data'].append(log_data)

        calc_test_accuracy(model, test_data, train_dict)

    save_results(out_dir, results_dict=results)
    return results
def run_dropout_experiment(model_dict, train_dict, out_dir):
    np.random.seed(12345)
    results = defaultdict(list)
    model_dict['dropout'] = True
    kernel_size = model_dict['kernel_size']
    padding = model_dict['padding']
    label = f'kernel={kernel_size}x{kernel_size}, pad={padding}'

    model = ConvNet(**model_dict)

    train_dict['callbacks'][1] = ModelDump(
        output_dir=os.path.join(out_dir, label))
    train_dict['callbacks'][2] = SaveBestModel(
        output_dir=os.path.join(out_dir, label))
    trainer = Trainer(model, **train_dict)

    start_time = time()
    trainer.train_loop()
    time_period = (time() - start_time) / 60

    log_data = trainer.logger.logging_data

    results['model_dict'].append(model_dict)
    results['train_dict'].append(train_dict)
    results['time'].append(time_period)
    results['label'].append(label)
    results['log_data'].append(log_data)

    calc_test_accuracy(model, test_data, train_dict)

    save_results(out_dir, results_dict=results)
    return results
Esempio n. 5
0
    def learn(self):
        for epsilon, M in self.epsilon_strategy:
            episode = 1
            while episode <= M:
                print('Epsilon:', epsilon, 'Episode:', episode, '/', M)

                sequence = self.sample_network(epsilon)
                sequence_hashed = SequenceGenerator.hash(sequence)
                while sequence_hashed in self.memory:
                    sequence = self.sample_network(epsilon)
                    sequence_hashed = SequenceGenerator.hash(sequence)

                trainer = Trainer(sequence)
                history = trainer.train(self.data['X_train'], self.data['y_train'],
                                        self.data['X_val'], self.data['y_val'])
                evaluation = {'val_loss': history.history['val_loss'][-1]}
                for metric in METRICS:
                    evaluation[f'val_{metric}'] = history.history[f'val_{metric}'][-1]

                print('Network:', '--'.join(sequence_hashed.split('\n')))
                print('Evaluation:', evaluation)
                print()

                self.memory.add(sequence_hashed, evaluation, epsilon)

                for sample in self.memory.sample():
                    self._update_q_values_sequence(sample[0], REWARD_FUNCTION(sample[1]))

                episode += 1

            self.q_values.save(epsilon)
            self.memory.save(epsilon)
Esempio n. 6
0
def run_experiment(experiment_generator, out_dir, test_data, plot_loss_batch=False):
    np.random.seed(12345)
    results = defaultdict(list)

    for i, (model_dict, train_dict, exp_name, value) in enumerate(experiment_generator()):
        model = MlpNet(**model_dict)
        trainer = Trainer(model, **train_dict)

        label = f'{exp_name}={value}'
        print(f'{i}. {label}')

        start_time = time()
        trainer.train_loop()
        time_period = time() - start_time

        log_data = trainer.logger.logging_data

        if plot_loss_batch:
            # plot train loss per batch in first epoch
            filename = exp_name + str(value) + '_loss_one_batch'
            plot_val_loss_per_batch(log_data['loss_batch']['train'], filename, out_dir)

        results['model_dict'].append(model_dict)
        results['train_dict'].append(train_dict)
        results['time'].append(time_period)
        results['label'].append(label)
        results['log_data'].append(log_data)

        # calculate accuracy on test data
        acc_metric = LabelAccuracy()
        x_test, y_test = test_data
        accuracy = acc_metric(model.predict_classes(x_test), y_test)
        print('Accuracy on test data: {}'.format(accuracy))

    return results
Esempio n. 7
0
def train(file_name, net_file=None):
    trainer = Trainer(file_name, net_file)
    trainer.train()
    file_name = input(
        "Do you want to save the model? Specify file name if so, or leave blank otherwise: "
    )
    if len(file_name) > 0:
        trainer.save(file_name)
Esempio n. 8
0
def create_trainer(classifier, batch_size, num_epochs, training_data, test_data):
  training_data_feeder = data.BilexDataFeeder(training_data, batch_size, shuffle=True)
  trainer = Trainer(classifier, num_epochs, training_data_feeder)

  test_data_feeder = data.BilexDataFeeder(test_data, batch_size, shuffle=True)
  training_lexicon = load_lexicon(training_data)
  trainer.add_command(EpochLossLogger(classifier, LOG_DIR))
  trainer.add_command(BasicStatsLogger(classifier, training_data_feeder, num_epochs, 10))
  trainer.add_command(Evaluation(classifier, test_data_feeder, training_lexicon, num_epochs, LOG_DIR, ['all']))
  return trainer
def exp1(opt):
    model = getattr(models.concrete.single, opt.model)(opt).to(device)
    opt.exp_name += opt.model
    vd = VisionDataset(opt, class_order=list(range(10)))

    optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    logger = get_logger(folder=opt.log_dir + '/' + opt.exp_name + '/')
    logger.info(f'Running with device {device}')
    logger.info("==> Opts for this training: " + str(opt))

    trainer = Trainer(opt, logger, device=device)

    # pretraining
    if opt.num_pretrain_classes > 0:
        try:
            logger.info('Trying to load pretrained model...')
            model = load_pretrained_model(opt, model, logger)
            pretrain = False
        except Exception as e:
            logger.info(f'Failed to load pretrained model: {e}')
            pretrain = True

        if pretrain:
            assert opt.num_pretrain_passes > 0
            logger.info(f'==> Starting pretraining')
            for epoch in range(1, opt.num_pretrain_passes + 1):
                trainer.train(loader=vd.pretrain_loader, model=model, optimizer=optimizer, epoch=epoch)
                acc = trainer.test(loader=vd.pretest_loader, model=model, mask=vd.pretrain_mask, epoch_or_phase=epoch)
            logger.info(f'==> Pretraining completed! Acc: [{acc:.3f}]')
            save_pretrained_model(opt, model)

    if opt.num_tasks > 0:
        # TODO: use another optimizer?
        # Class-Incremental training
        # We start with pretrain mask bvecause in testing we want pretrained classes included
        logger.info(f'==> Starting Class-Incremental training')
        mask = vd.pretrain_mask.clone() if opt.num_pretrain_classes > 0 else torch.zeros(vd.n_classes_in_whole_dataset)
        dataloaders = vd.get_ci_dataloaders()
        cl_accuracy_meter = AverageMeter()
        for phase, (trainloader, testloader, class_list, phase_mask) in enumerate(dataloaders, start=1):
            trainer.train(loader=trainloader, model=model, optimizer=optimizer, phase=phase)

            # accumulate masks, because we want to test on all seen classes
            mask += phase_mask

            # this is the accuracy for all classes seen so far
            acc = trainer.test(loader=testloader, model=model, mask=mask, epoch_or_phase=phase)
            cl_accuracy_meter.update(acc)

        logger.info(f'==> CL training completed! AverageAcc: [{cl_accuracy_meter.avg:.3f}]')
Esempio n. 10
0
def main():
    """Create a trained feature set.
        1. Extract features from a cropped image set
        2. Evaluate features on labelled training data
        3. Remove features with high false positive rate
        4. Evaluate features again on same data
    """

    trainer = Trainer()
    detector = Detector(750)
    trainer.set_detector(detector)
    # feat = load_features('data/gazebo1_932.dat')
    # trainer.set_features(feat)

    trainer.load_data('train/gazebo/pos_info_PreyEmptyWorld.dat')
    trainer.load_data('train/gazebo/pos_info_PurplePrey1.dat')
    trainer.load_data('train/gazebo/pos_info_PurplePrey2.dat')
    trainer.load_data('train/gazebo/sim2.dat')

    # trainer.load_data('train/quadbox/white_courtyard1.dat')
    # trainer.load_data('train/quadbox/white_courtyard2.dat')
    # trainer.load_data('train/quadbox/black_drone_court.dat')
    # trainer.load_data('train/quadbox/pos_info.txt')

    # trainer.load_data('train/idea2/pos_info_red-inclass.dat')
    # trainer.load_data('train/idea2/red2m.dat')

    # trainer.load_data('train/idea2/courtyard323.dat')
    # trainer.load_data('train/idea2/orange10am.dat')
    # trainer.load_data('train/idea2/orange7_30am.dat')
    # trainer.load_data('train/pos_info_Courtyard_multi.dat')
    # trainer.load_data('train/idea2/orange9am.dat')

    # trainer.subsample_data(2)
    trainer.train_and_test(.8)
    trainer.evaluate(1)
    trainer.feature_selection()
    # trainer.evaluate(subsample=0.4)
    # trainer.feature_selection()
    # trainer.evaluate(subsample=0.7)
    trainer.save_features('sim2')
    return

    trainer.train_and_test(show=False)
    trainer.feature_selection()
    trainer.evaluate(show=False)
    trainer.save_features('ir_2')

    return
Esempio n. 11
0
def main():
    args = get_args()

    path = args['path']
    min_len = args['min_len']
    max_len = args['max_len']
    n_workers = args['worker']
    voc_n_keep = args['voc_size']
    batch_size = args['batch']
    shuffle = args['shuffle']
    embed_dim = args['embed']
    d_model = args['d_model']
    n_layers = args['layers']
    heads = args['heads']
    d_ff = args['dff']
    dropout = args['dropout']
    trainable = args['trainable']
    epochs = args['epoch']
    save_dir = args['save']
    loss_func = F.cross_entropy

    # Retrieves the dataset, cleans, processes and creates tensors from it
    training_set = Dataset(path, min_len, max_len, n_workers, voc_n_keep)

    vocab_size = training_set.vocab.num_words
    target_pad = training_set.vocab.PAD_token

    # Pytorchs batch generator
    training_iter = DataLoader(training_set,
                               batch_size,
                               shuffle,
                               num_workers=n_workers)

    pretrained = None
    if args['glove']:
        embed_dim = args['glove_size']
        print("Collecting GloVe embeddings size {}".format(embed_dim))
        pretrained = get_glove(embed_dim, training_set.vocab,
                               args['glove_path'])
        print("Successfully collected.")

    # Creates model
    trainer = Trainer(vocab_size, embed_dim, d_model, n_layers, heads, d_ff,
                      max_len, pretrained, trainable, dropout)

    # Train model
    trainer.train(training_iter, loss_func, epochs, target_pad, save_dir,
                  training_set.vocab)
Esempio n. 12
0
def restore_model(config, checkpoint_dir=None, checkpoint_file=None):
    if checkpoint_dir == None:
        checkpoint_dir = Trainer(None, None, None,
                                 config["trainer"]).checkpoint_dir()
    if checkpoint_file == None:
        checkpoint_file = checkpoint_dir + "/best"
    model = create_model(config, training=False)
    print("checkpoint_file", checkpoint_file)

    config = tf.ConfigProto()
    # pylint:disable=no-member
    config.gpu_options.allow_growth = True
    sess = tf.Session(graph=model.graph, config=config)

    with model.graph.as_default():  # pylint:disable=not-context-manager
        saver = tf.train.Saver(tf.global_variables())
        saver.restore(sess, checkpoint_file)

    return sess, model
Esempio n. 13
0
 def __init__(self, process_id, gpu='cpu', world_size=4, optimizer=optim.Adam, optimizer_sparse=optim.SparseAdam, optim_params=(1e-3, (0.9, 0.995), 1e-8), model_params=None, tb=None):
   super(Learner, self).__init__()
   print(gpu)
   self.model = Policy_Network(data_parallel=False)
   saved_checkpoint = torch.load("./checkpoint.pth")
   self.model.load_state_dict(saved_checkpoint['model'], strict=False)
   if process_id == 0:
     optim_params = (self.model.parameters(),) + optim_params
     self.optimizer = optimizer(*optim_params)
   
   self.meta_optimizer = optim.SGD(self.model.parameters(), 0.03)
   self.process_id = process_id
   self.device='cuda:'+str(process_id) if gpu is not 'cpu' else gpu
   self.model.to(self.device)
   self.num_iter = 0
   self.world_size = world_size
   self.original_state_dict = {}
   self.eps = np.finfo(np.float32).eps.item()
   self.use_ml = False
   self.use_rl = False
   self.trainer = Trainer(self.use_ml, self.use_rl, self.device)
   self.tb = tb
Esempio n. 14
0
def save_model(config, input_graph=None, checkpoint_dir=None):
    _sess, model = restore_model(config, checkpoint_dir)

    if input_graph == None:
        trainer = Trainer(None, None, None, config["trainer"])
        tf.train.write_graph(model.graph, trainer.log_dir(), "final.pb", False)
        input_graph = trainer.log_dir() + "/final.pb"

    if checkpoint_dir == None:
        checkpoint_dir = trainer.checkpoint_dir()

    freeze_graph(
        input_graph=input_graph,
        input_checkpoint=checkpoint_dir + "/best",
        output_graph=trainer.log_dir() + "/final_frozen.pb",
        output_node_names=model.output_node_names,
        input_binary=True,
        input_saver="",
        restore_op_name="save/restore_all",
        filename_tensor_name="save/Const:0",
        clear_devices=True,
        initializer_nodes="",
        variable_names_blacklist="",
    )
Esempio n. 15
0
def synthesize(args):
    device = CUDA(0)
    # Generating Training Data
    gt_paths = [
        f'{const.DATA_ROOT}/{args.train_mesh}/{args.train_mesh}_level{i:02d}.obj'
        for i in range(6)
    ]
    is_generated = all(list(os.path.isfile(gt_path) for gt_path in gt_paths))
    if (not is_generated) or args.no_cache:
        gen_args = options.GtOptions(tag='demo',
                                     mesh_name=args.train_mesh,
                                     template_name='sphere',
                                     num_levels=6)
        gt_gen = GroundTruthGenerator(gen_args, device)
    print("Finished generating training data with " + args.train_mesh,
          flush=True)

    # Training Synthesizer
    options_path = f'{const.PROJECT_ROOT}/checkpoints/{args.train_mesh}_demo/options.pkl'
    models_path = f'{const.PROJECT_ROOT}/checkpoints/{args.train_mesh}_demo/SingleMeshGenerator.pth'
    is_trained = os.path.isfile(options_path) and os.path.isfile(models_path)
    train_args = options.TrainOption(tag='demo',
                                     mesh_name=args.train_mesh,
                                     template_name='sphere',
                                     num_levels=6)
    if (not is_trained) or args.no_cache:
        trainer = Trainer(train_args, device)
        trainer.train()
    print("Finished training with " + args.train_mesh, flush=True)

    # Synthesizing Input
    m2m = Mesh2Mesh(train_args, CPU)
    mesh = mesh_utils.load_real_mesh(args.input_mesh, 0, True)
    out = m2m(mesh, 2, 5, 0)
    out.export(f'{const.RAW_MESHES}/{args.input_mesh}_hi')
    print("Finished synthesizing input on " + args.input_mesh, flush=True)
Esempio n. 16
0
    pretrained_embeddings = load_pretrained_embeddings(embeddings_path,
                                                       train_dataset.word2idx,
                                                       300, is_crf=crf_model)

    name_ = 'LSTM'
    hp = HyperParameters(name_, train_dataset.word2idx,
                         train_dataset.labels2idx,
                         pretrained_embeddings,
                         batch_size)

    # , collate_fn=DatasetParser.pad_collate
    train_dataset_ = DataLoader(dataset=train_dataset, batch_size=batch_size)
    dev_dataset_ = DataLoader(dataset=dev_dataset, batch_size=batch_size)
    test_dataset_ = DataLoader(dataset=test_dataset, batch_size=batch_size)

    model = BaselineModel(hp).to(train_dataset.get_device)
    trainer = Trainer(
        model=model,
        loss_function=CrossEntropyLoss(ignore_index=train_dataset.labels2idx['<PAD>']),
        optimizer=Adam(model.parameters()),
        batch_num=hp.batch_size,
        num_classes=hp.num_classes,
        verbose=True
    )

    save_to_ = join(RESOURCES_PATH, f"{model.name}_model.pt")
    trainer.train(train_dataset_, dev_dataset_, epochs=1, save_to=save_to_)

    evaluator = Evaluator(model, test_dataset_, crf_model)
    evaluator.check_performance(train_dataset.idx2label)
Esempio n. 17
0
    #atomic charges are present, so they replace the normal charge loss and nullify dipole loss
    if data.Qa is not None:
        qloss_train = qaloss_t
        qloss_valid = qaloss_v
        dloss_train = tf.constant(0.0)
        dloss_valid = tf.constant(0.0)

    #define loss function (used to train the model)
    l2loss = tf.reduce_mean(input_tensor=tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES))
    loss_t = eloss_train + args.force_weight * floss_train + args.charge_weight * qloss_train + args.dipole_weight * dloss_train + args.nhlambda * nhloss_t + args.l2lambda * l2loss
    loss_v = eloss_valid + args.force_weight * floss_valid + args.charge_weight * qloss_valid + args.dipole_weight * dloss_valid + args.nhlambda * nhloss_v + args.l2lambda * l2loss

#create trainer
trainer = Trainer(args.learning_rate,
                  args.decay_steps,
                  args.decay_rate,
                  scope="trainer")
with tf.compat.v1.name_scope("trainer_ops"):
    train_op = trainer.build_train_op(loss_t, args.ema_decay, args.max_norm)
    save_variable_backups_op = trainer.save_variable_backups()
    load_averaged_variables_op = trainer.load_averaged_variables()
    restore_variable_backups_op = trainer.restore_variable_backups()


#creates a summary from key-value pairs given a dictionary
def create_summary(dictionary):
    summary = tf.compat.v1.Summary()
    for key, value in dictionary.items():
        summary.value.add(tag=key, simple_value=value)
    return summary
Esempio n. 18
0
data_loader, _ = get_mnist_dataloaders(batch_size=64)
#data_loader, _ = get_fashion_mnist_dataloaders(batch_size=64)
#data_loader = get_lsun_dataloader(path_to_data="/ubc/cs/research/plai-scratch/saeid/datasets/lsun", batch_size=64)
img_size = (32, 32, 1)

generator = Generator(img_size=img_size, latent_dim=100, dim=16)
discriminator = Discriminator(img_size=img_size, dim=16)

print(generator)
print(discriminator)

# Initialize optimizers
lr = 1e-4
betas = (.5, .9)
G_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=betas)
D_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=betas)

# Train model
epochs = 200
trainer = Trainer(generator,
                  discriminator,
                  G_optimizer,
                  D_optimizer,
                  use_cuda=torch.cuda.is_available())
trainer.train(data_loader, epochs, save_training_gif=True)

# Save models
name = 'mnist_model'
torch.save(trainer.G.state_dict(), './gen_' + name + '.pt')
torch.save(trainer.D.state_dict(), './dis_' + name + '.pt')
Esempio n. 19
0
    elements = ["Cu"]
    size = (3, 3, 3)
    temp = 500
    n_train = int(2e4)
    n_test = int(8e3)
    save_interval = 100
    train_traj = "training.traj"
    test_traj = "test.traj"

    max_steps = int(2e3)
    cutoff = Polynomial(6.0, gamma=5.0)
    num_radial_etas = 6
    num_angular_etas = 10
    num_zetas = 1
    angular_type = "G4"
    trn = Trainer(cutoff=cutoff)
    trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas, angular_type)

    trjbd = TrajectoryBuilder()
    calc = EMT()
    train_atoms = trjbd.build_atoms(system, size, temp, calc)
    calc = EMT()
    test_atoms = trjbd.build_atoms(system, size, temp, calc)

    steps, train_traj = trjbd.integrate_atoms(
        train_atoms, train_traj, n_train, save_interval
    )
    steps, test_traj = trjbd.integrate_atoms(
        test_atoms, test_traj, n_test, save_interval
    )
Esempio n. 20
0
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics

import pickle

with open("../lib/models/korrespondez_model_stage4.pickle", "rb") as f:
    m = pickle.load(f)

labels = list(m.classes_)
labels.remove('O')

f1_scorer = make_scorer(metrics.flat_f1_score,
                        average='weighted',
                        labels=labels)

t = Trainer("../lib/config/korr_nlp.json")

#train,test = t.split(test_perc=0.20)
#t.training = train
#t.test = test

print("extracting features...")
t.set_feats_labels(template1)

params_space = {
    'c1': scipy.stats.expon(scale=0.5),
    'c2': scipy.stats.expon(scale=0.05),
}

# search
rs = RandomizedSearchCV(t.crf,
Esempio n. 21
0
import torch

name = 'small_14_adam_1em3_bs128_'
base_path = pathlib.Path('models') / name
state_dict_path = base_path / 'best_model.pth'

model = small_resnet14()
model.load_state_dict(torch.load(state_dict_path))

batch_size = 256

trainer = Trainer(model,
                  None,
                  None,
                  None,
                  None,
                  None,
                  'cuda',
                  batch_size=batch_size,
                  save_path=base_path)

data_path_root_test = pathlib.Path('test/')
test_anno = pd.DataFrame({
    'id': [f'pic{num:06}' for num in range(10699)],
    'category': [0 for num in range(10699)]
})
test_dataset = MyDataset(data_dir=data_path_root_test,
                         data_anno=test_anno,
                         phase='train')
test_dataloader = DataLoader(test_dataset,
                             batch_size=batch_size,
Esempio n. 22
0
    x_train = train[train.fold != current_val_fold].id.values
    x_val = train[train.fold == current_val_fold].id.values

    train_dataset = TGSSaltDataset(osp.join(directory, 'train'),
                                   x_train,
                                   is_test=False,
                                   is_val=False,
                                   augment_func=aug)

    val_dataset = TGSSaltDataset(osp.join(directory, 'train'),
                                 x_val,
                                 is_test=False,
                                 is_val=True)

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    trainer = Trainer(myloss, iou_numpy, optimizer, MODEL_NAME, None, DEVICE)

    train_loader = get_loader(train_dataset, 'train', BATCH_SIZE)
    val_loader = get_loader(val_dataset, 'val', BATCH_SIZE)

    for i in range(EPOCHS):
        trainer.train(train_loader, model, i)
        trainer.validate(val_loader, model)

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    trainer = Trainer(lovasz, iou_numpy, optimizer, MODEL_NAME, None, DEVICE)

    EPOCHS = 200

    for i in range(EPOCHS):
        trainer.train(train_loader, model, i)
        for k, v in pretrained_dict.items()
        if k in model_dict.keys() and v.size() == model_dict[k].size()
    }
    print('matched keys:', len(pretrained_dict))
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

# Move the model to the GPU.
# criterion = model.loss()

if (options["general"]["usecudnn"]):
    torch.cuda.manual_seed(options["general"]['random_seed'])
    torch.cuda.manual_seed_all(options["general"]['random_seed'])

if (options["training"]["train"]):
    trainer = Trainer(options, model)
if (options["validation"]["validate"]):
    if options['general']['mod'] == 'slice':
        validator = Validator2(
            options,
            'validation',
            model,
            savenpy=options["validation"]["saves"],
        )
    else:
        validator = Validator(
            options,
            'validation',
            model,
            savenpy=options["validation"]["saves"],
        )  # TODO:change mod
        for k, v in pretrained_dict.items()
        if k in model_dict.keys() and v.size() == model_dict[k].size()
    }
    print('matched keys:', len(pretrained_dict))
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)

#Move the model to the GPU.
#criterion = model.loss()

if (options["general"]["usecudnn"]):
    torch.cuda.manual_seed(options["general"]['random_seed'])
    torch.cuda.manual_seed_all(options["general"]['random_seed'])

if (options["training"]["train"]):
    trainer = Trainer(options)
if (options["validation"]["validate"]):
    validator = Validator(options, 'validation')
if (options['test']['test']):
    tester = Validator(options, 'test')

for epoch in range(options["training"]["startepoch"],
                   options["training"]["epochs"]):
    if (options["training"]["train"]):
        trainer(model, epoch)
    if (options["validation"]["validate"]):
        result, re_all = validator(model)
        print('-' * 21)
        print('All acc:' + str(re_all))
        print('{:<10}|{:>10}'.format('Cls #', 'Accuracy'))
        for i in range(len(result)):
Esempio n. 25
0
        "energy_rmse": 1e-16,
        "force_rmse": None,
        "max_steps": max_steps
    }
    force_coefficient = None
    overfit = 1e-7
    hidden_layers = [10, 10]
    cutoff = Polynomial(5.0, gamma=5.0)
    num_radial_etas = 7
    num_angular_etas = 11
    num_zetas = 1
    angular_type = "G4"
    trn = Trainer(
        convergence=convergence,
        force_coefficient=force_coefficient,
        overfit=overfit,
        cutoff=cutoff,
        hidden_layers=hidden_layers,
    )
    trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas,
                  angular_type)

    trjbd = TrajectoryBuilder()
    calc = OpenKIMcalculator("SW_StillingerWeber_1985_Si__MO_405512056662_005")
    train_atoms = trjbd.build_atoms(system, size, temp, calc)

    steps, train_traj = trjbd.integrate_atoms(train_atoms,
                                              train_traj,
                                              n_train,
                                              save_interval,
                                              timestep=timestep)
Esempio n. 26
0
                             phase="val",
                             valSize=4)
    print("Train Dataset Path :", trainData_path)
    print("Val Dataset Path :", valData_path)

    # make model
    pix2pixHD = Pix2PixHDModel(opt)

    # dataloader
    train_dataloader = DataLoader(
        dataset=train_dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.nThreads,
    )
    val_dataloader = DataLoader(
        dataset=val_dataset,
        batch_size=len(val_dataset),
        shuffle=False,
        num_workers=opt.nThreads,
    )

    # updater
    updater = Updater(dataloader=train_dataloader, model=pix2pixHD)

    # trainer
    trainer = Trainer(updater, opt, val_dataloader=val_dataloader)

    # run
    log = trainer.run()
from model_new import Generator, Discriminator
from training import Trainer
import random
import sys
from torchsummary import summary

img_size = (48, 48, 3)
batch_size = 64
#Hyper Paramenters
g_lr = 1e-4
d_lr = 4e-4
betas = (0., .99)

data_loader, _, _ = get_STL10_dataloaders(batch_size=batch_size)

generator = Generator(z_size = 128, channel = 3, output_size=48)
discriminator = Discriminator(channel = 3, ssup = True)

# Initialize optimizers
G_optimizer = optim.Adam(generator.parameters(), lr=g_lr, betas=betas)
D_optimizer = optim.Adam(discriminator.parameters(), lr=d_lr, betas=betas)


# Train model
epochs = 200
trainer = Trainer(generator, discriminator, G_optimizer, D_optimizer,
                  weight_rotation_loss_d = 1.0, weight_rotation_loss_g = 0.2, critic_iterations=1,
                  use_cuda=torch.cuda.is_available())
trainer.train(data_loader, epochs, save_training_gif=True)

Esempio n. 28
0
    elements = ["Cu"]
    size = (2, 2, 2)
    temp = 500
    n_test = int(2e4)
    save_interval = 100

    max_steps = int(2e3)
    convergence = {"energy_rmse": 1e-16, "force_rmse": None, "max_steps": max_steps}
    force_coefficient = None
    cutoff = Polynomial(6.0, gamma=5.0)
    num_radial_etas = 6
    num_angular_etas = 10
    num_zetas = 1
    angular_type = "G4"
    trn = Trainer(
        convergence=convergence, force_coefficient=force_coefficient, cutoff=cutoff
    )
    trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas, angular_type)

    trjbd = TrajectoryBuilder()
    n_images = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
    train_trajs = ["training_n{}.traj".format(ni) for ni in n_images]
    test_traj = "test.traj"
    for i in range(len(n_images)):
        calc = EMT()
        train_atoms = trjbd.build_atoms(system, size, temp, calc)
        n_train = n_images[i] * save_interval
        steps, train_trajs[i] = trjbd.integrate_atoms(
            train_atoms, train_trajs[i], n_train, save_interval
        )
        optimizer_D = torch.optim.Adam(model.discriminator.parameters(), lr=configs['d_learning_rate'], weight_decay=configs['weight_decay'])

        print('Model {}, Number of parameters {}'.format(args.model, count_params(model)))
        criterion = torch.nn.BCELoss()
        trainer = GANTrainer(model, optimizer_G, optimizer_D, train_loader, val_loader, test_loader, criterion, configs['epochs'], args.model)
        trainer.fit()
        sys.exit(0)

    elif args.model in ['inn']:
        
        model = INN(configs['ndim_total'], configs['input_dim'], configs['output_dim'], dim_z = configs['latent_dim']).to(DEVICE)
        print('Model {}, Number of parameters {}'.format(args.model, count_params(model)))
        optimizer = torch.optim.Adam(model.parameters(), lr=configs['learning_rate'], weight_decay=configs['weight_decay'])

        criterion = torch.nn.MSELoss()
        trainer = INNTrainer(model, optimizer, train_loader, val_loader, test_loader, criterion, configs['epochs'], args.model)
        trainer.fit()

        sys.exit(0)

    else:
        raise NameError
    
    print('Model {}, Number of parameters {}'.format(args.model, count_params(model)))
    criterion = nn.MSELoss()
    trainer = Trainer(model, optimizer, train_loader, val_loader, test_loader, criterion, configs['epochs'], args.model)
    # train the model 
    trainer.fit()


    
Esempio n. 30
0
    optimizer = Adam(model.parameters(), lr=1e-3)

    # Criterion (add weights?)
    criterion = nn.CrossEntropyLoss()

    # Scheduler
    scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1.5 * 1e-3, steps_per_epoch=len(train_dataloader),
                                                    epochs=num_epochs)

    # Training
    # optim_gen = lambda parameters, lr: SGD(parameters, lr=lr)
    # find_lr(model, optim_gen, min_lr, max_lr, num_epochs, train_dataloader, val_dataloader, criterion, device, batch_size,
    #         batches_per_epoch, comet_experiment)
    save_path = pathlib.Path('models') / name
    save_path.mkdir(parents=True, exist_ok=True)
    trainer = Trainer(model, train_dataloader, val_dataloader, criterion, optimizer, None, device, TRAFFIC_LABELS,
                      num_epochs, batch_size, batches_per_epoch, comet_experiment, save_path)
    try:
        trainer.fit()
    except KeyboardInterrupt:
        pass

    # Prediction
    data_path_root_test = pathlib.Path('test/')
    test_anno = pd.DataFrame({'id': [f'pic{num:06}' for num in range(10699)]})
    test_dataset = MyDataset(data_dir=data_path_root_test, data_anno=test_anno, phase='test')
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
    preds = trainer.predict(test_dataloader)
    submit = pd.DataFrame({'id': [f'pic{num:06}' for num in range(10699)],
                           'category': [TRAFFIC_LABELS[pred] for pred in preds]})
    submit.to_csv(save_path / 'submit.csv')