Ejemplo n.º 1
0
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers)
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers)

        dset_loaders = {'train': train_loader, 'val': val_loader}
        dset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}

        optimizer = getattr(optim, args.optimizer)(net.parameters(), args.lr)
        scheduler = ReduceLROnPlateau(optimizer,
                                      'min',
                                      patience=2,
                                      verbose=True)

        model.compile(loss, optimizer, scheduler=scheduler)

        folder_checkpoint = "model_checkpoints/" + args.env_name.replace(
            ' ', '_') + '/'

        if not os.path.exists(folder_checkpoint):
            os.mkdir(folder_checkpoint)

        model_checkpoint = folder_checkpoint + "weights.{epoch:02d}-train_loss:{train_loss:.2f}" \
                           + "-train_acc:{train_acc:.2f}-val_loss:{val_loss:.2f}-val_acc:{val_acc:.2f}.pkl"

        monitor_outputs = [0, 1, 2]
        display_acc = 1

        loss_weights = None
Ejemplo n.º 2
0
        dset_loaders = {'train_two_class': train_loader, 'val': val_loader}
        dset_sizes = {
            'train_two_class': len(train_dataset),
            'val': len(val_dataset)
        }

        # dict_param = [{'params': x.parameters()} for x in net.phi_r]

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(
            net.parameters(),
            lr=1e-3,
            # momentum=0.9
        )

        model_checkpoint = "/scratch/datasets/vrd/weights.{epoch:02d}-train_loss:{train_loss:.2f}-train_acc:{train_acc:.2f}" \
                           + "-val_loss:{val_loss:.2f}-val_acc:{val_acc:.2f}.pkl"
        model.compile(loss=criterion,
                      opt=optimizer,
                      weight_decay=0.1,
                      decay_step=10)
        model.fit_loader(dset_loaders,
                         dset_sizes,
                         batch_size=batch_size,
                         num_epochs=num_epochs,
                         model_checkpoint=model_checkpoint,
                         env=env_name)

    else:
        run_evaluate(model, 'data/annotations_test.json', 'test')
Ejemplo n.º 3
0
        val_dataset = ImprovedVRDDataset('test', args)
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers)

        dset_loaders = {'train': train_loader, 'val': val_loader}
        dset_sizes = {'train': len(train_loader), 'val': len(val_dataset)}

        criterion = getattr(nn, args.loss)()
        optimizer = getattr(optim, args.optimizer)(net.parameters(), args.lr)
        scheduler = ReduceLROnPlateau(optimizer,
                                      'min',
                                      patience=2,
                                      verbose=True)

        model.compile(criterion, optimizer, scheduler=scheduler)

        folder_checkpoint = "model_checkpoints/" + args.env_name.replace(
            ' ', '_') + '/'

        if not os.path.exists(folder_checkpoint):
            os.mkdir(folder_checkpoint)

        model_checkpoint = folder_checkpoint + "weights.{epoch:02d}-train_loss:{train_loss:.2f}" \
            + "-train_acc:{train_acc:.2f}-val_loss:{val_loss:.2f}-val_acc:{val_acc:.2f}.pkl"

        monitor_outputs = [0, 1, 2] if args.exp_num in [
            6, 11, 12, 14, 16, 17, 18, 19, 20, 22
        ] else None

        model.fit_loader(dset_loaders,
Ejemplo n.º 4
0
 def _compile(self, inn, outt):
     model = Model(inputs=inn, outputs=outt)
     model.compile(optimizer='RMSprop', loss=tnorm_loss, metrics=[bin_acc])
     return model