示例#1
0
def run(config: dict) -> None:
    experiment_name = (
        f"{config['model']['type']}_"
        f"{config['data']['train']['dataset_name']}_"
        f"{config['model']['loss']}_{datetime.now().strftime('%d%m%Y_%H%M%S')}"
    )
    model_path = Path(config['output_path']) / experiment_name
    model_path.mkdir(parents=True)
    with open(model_path / "config.yaml", "w") as config_file:
        yaml.dump(config, config_file)

    trainer = utils.Trainer(config=config, experiment_path=model_path)
    trainer.run()
示例#2
0
 def __init__(self, args, split=None, trainer=None):
     """
     :param args: a Namespace, from argparse.parse_args
     :param split: an integer between 1 and args.folds inclusive, or None
     :param trainer: a Trainer instance, or None
     """
     self.procdata = None
     # Command-line arguments (Namespace)
     self.args = self._tidy_args(args, split)
     # TODO(dacart): introduce a switch to allow non-GPU use, achieved with:
     # os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
     # Utility methods for training a model
     self.trainer = trainer or utils.Trainer(args, add_timestamp=True)
     # Attributes set in other methods:
     # Conditions, input signals of the data that are being modelled
     self.conditions = None
     # DatasetPair, from a training Dataset and validation Dataset
     self.dataset_pair = None
     # Decoder and encoder networks
     self.decoder = None
     self.encoder = None
     # Number of instances in a batch (int)
     self.n_batch = None
     # Number of "theta" parameters: local, global-conditional and global (int)
     self.n_theta = None
     # Collection of attributes related to training objective
     self.objective = None
     # Value of spec["params"] from YAML file (dict)
     self.params_dict = None
     # Collection of placeholder attributes, each a Tensor, fed with new values for each batch
     self.placeholders = None
     # Training feed_dict: dict from placeholder Tensor to np.array
     self.train_feed_dict = None
     # TrainingStepper object
     self.training_stepper = None
     # Validation feed_dict keys: dict from placeholder Tensor to np.array
     self.val_feed_dict = None
     # Model path for storing best weights so far
     self.model_path = os.path.join(self.trainer.tb_log_dir, 'saver',
                                    'sess_max_elbo')
            args.visdom_server = "http://" + args.visdom_server
        print(f"after: {args.visdom_server}")

        viz = pt_utils.VisdomViz(server=args.visdom_server,
                                 port=args.visdom_port)
    else:
        viz = pt_utils.CmdLineViz()

    viz.text(pprint.pformat(vars(args)))

    if not osp.isdir("checkpoints"):
        os.makedirs("checkpoints")

    trainer = pt_utils.Trainer(
        model,
        model_fn,
        optimizer,
        checkpoint_name="checkpoints/pointnet2_cls",
        best_name="checkpoints/pointnet2_cls_best",
        lr_scheduler=lr_scheduler,
        bnm_scheduler=bnm_scheduler,
        viz=viz,
    )

    trainer.train(
        it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss
    )

    if start_epoch == args.epochs:
        _ = trainer.eval_epoch(test_loader)
示例#4
0
        indices[li][1].append(cnt)
        cnt += 1

# In[52]:

for i, c in enumerate(classes):
    plt.plot(indices[i][1], label=c)
plt.legend()
plt.show()

# Test for LenNet

# In[58]:

lennet = Life_Universe_and_Everything.LenNet()
fitter1 = utils.Trainer(lennet, max_iter=300)
lennet_loss_over_tr, lennet_loss_val = fitter1.fit(train, valid)

# As training loss fluctuates fiercely, we average the near 5 mini-batches.

# In[31]:

#len_loss = [np.mean(lennet_loss_over_tr[5*i:5*i+5]) for i in range(int(len(lennet_loss_over_tr)/5))]

# let's view the last 20 validation loss

# In[54]:

#plt.plot([i for i in range(len(lennet_loss_over_tr))], lennet_loss_over_tr, label='train')
plt.plot([i for i in range(20)], lennet_loss_val[-20:], '-x')
#plt.plot([j for j in range(int(len(lennet_loss_val)/4))], [lennet_loss_val[4*k] for k in range(int(len(lennet_loss_val)/4))], '-x', label='valid')
示例#5
0
}

config = utils.Config()
dataset = {
    x: data.DogCat(config.data['dogcat'], phase=x, transform=transform[x])
    for x in ['train', 'val']
}
dataloader = {
    x: torch.utils.data.DataLoader(dataset[x], batch_size=32, shuffle=True)
    for x in ['train', 'val']
}

model = tv.models.densenet121(pretrained=True)
model.classifier = torch.nn.Linear(model.classifier.in_features, 2)

model = model.to(config.device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop([{
    'params': model.features.parameters()
}, {
    'params': model.classifier.parameters(),
    'lr': 1e-3,
    'momentum': 0.9
}],
                                lr=0,
                                momentum=0)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5)

trainer = utils.Trainer(model, criterion, optimizer, scheduler)
trainer.fit(dataloader, epochs=100, initial_epoch=0, device=config.device)
示例#6
0
# Define a parameter sample that is the mode of each LogNormal prior
theta = distributions.DotOperatorSamples()
for k, v in priors.items():
    if k != "conditioning":
        if 'mu' in v: 
            sample_value = np.exp(v['mu'])         
        else: 
            sample_value = shared[v['distribution']]
        theta.add(k, np.tile(sample_value, [1,1]).astype(np.float32))

# Add the constants separately
for k, v in para_settings['constant'].items():
    theta.add(k, np.tile(v, [1,1]).astype(np.float32))

# Set up model runner
trainer = utils.Trainer(args, add_timestamp=True)
self = Runner(args, 0, trainer)
self.params_dict = para_settings
self._prepare_data(data_settings)
self.n_batch = min(self.params_dict['n_batch'], self.dataset_pair.n_train)

# Set various attributes of the model
model = self.params_dict["model"]
model.init_with_params(self.params_dict, self.procdata)

# Define simulation variables and run simulator
times = np.linspace(0.0, 20.0, 101).astype(np.float32)
conditions = np.array([[1.0, 1.0]]).astype(np.float32)
dev_1hot = np.expand_dims(np.zeros(7).astype(np.float32),0)
sol_rk4 = model.simulate(theta, times, conditions, dev_1hot, 'rk4')[0]
sol_mod = model.simulate(theta, times, conditions, dev_1hot, 'modeulerwhile')[0]
importlib.reload(utils)


# In[2]:


train, valid, test, classes = utils.load(scale=32)


# # LenNet

# In[9]:


lennet = Life_Universe_and_Everything.LenNet()
coach_len = utils.Trainer(lennet)
len_tr_loss1, len_tr_ac1, len_te_loss1, len_te_ac1 = coach_len.fit(train=train, valid=test, early_stopping=3, lr=0.1, max_iter=100)


# In[11]:


len_tr_loss2, len_tr_ac2, len_te_loss2, len_te_ac2 = coach_len.fit(train=train, valid=test, early_stopping=3, lr=0.01)

len_tr_loss3, len_tr_ac3, len_te_loss3, len_te_ac3 = coach_len.fit(train=train, valid=test, early_stopping=3, lr=0.001)

torch.save(lennet.state_dict(), './final/LenNet-ep.pkl')


# In[13]: