Esempio n. 1
0
def run(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # dataset
    train_set = NoisyBSDSDataset(args.root_dir,
                                 image_size=args.image_size,
                                 sigma=args.sigma)
    test_set = NoisyBSDSDataset(args.root_dir,
                                mode='test',
                                image_size=args.test_image_size,
                                sigma=args.sigma)

    # model
    if args.model == 'dncnn':
        net = DnCNN(args.D, C=args.C).to(device)
    elif args.model == 'udncnn':
        net = UDnCNN(args.D, C=args.C).to(device)
    elif args.model == 'dudncnn':
        net = DUDnCNN(args.D, C=args.C).to(device)
    else:
        raise NameError('Please enter: dncnn, udncnn, or dudncnn')

    # optimizer
    adam = torch.optim.Adam(net.parameters(), lr=args.lr)

    # stats manager
    stats_manager = DenoisingStatsManager()

    # experiment
    exp = nt.Experiment(net,
                        train_set,
                        test_set,
                        adam,
                        stats_manager,
                        batch_size=args.batch_size,
                        output_dir=args.output_dir,
                        perform_validation_during_training=True)

    # run
    if args.plot:
        fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9, 7))
        exp.run(num_epochs=args.num_epochs,
                plot=lambda exp: plot(
                    exp, fig=fig, axes=axes, noisy=test_set[73][0]))
    else:
        exp.run(num_epochs=args.num_epochs)
Esempio n. 2
0
def run(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    # dataset
    train_set = BirdsDataset(args.root_dir, image_size=args.image_size)
    val_set = BirdsDataset(args.root_dir,
                           mode='val',
                           image_size=args.image_size)
    num_classes = train_set.number_of_classes()

    # model
    if args.model == 'vgg':
        net = VGG16Transfer(num_classes).to(device)
    else:
        net = Resnet18Transfer(num_classes).to(device)

    # optimizer
    adam = torch.optim.Adam(net.parameters(), lr=args.lr)

    # stats manager
    stats_manager = ClassificationStatsManager()

    # experiment
    exp = nt.Experiment(net,
                        train_set,
                        val_set,
                        adam,
                        stats_manager,
                        batch_size=args.batch_size,
                        output_dir=args.output_dir,
                        perform_validation_during_training=True)

    # run
    if args.plot:
        fig, axes = plt.subplots(ncols=2, figsize=(7, 3))
        exp.run(num_epochs=args.num_epochs,
                plot=lambda exp: plot(exp, fig=fig, axes=axes))
    else:
        exp.run(num_epochs=args.num_epochs)
Esempio n. 3
0
discriminator_.apply(weights_init)

criterion = args['loss_criterion']

params_gen = list(generator_.parameters())
params_dis = list(discriminator_.parameters())

optimizer_gen = torch.optim.Adam(params_gen, lr=args['learning_rate_gen'], betas=(args['beta'], 0.999))
optimizer_dis = torch.optim.Adam(params_dis, lr=args['learning_rate_dis'], betas=(args['beta'], 0.999))


d_stats_manager, g_stats_manager = nt.StatsManager(),nt.StatsManager()


exp1 = nt.Experiment(generator_, discriminator_, device, criterion, optimizer_gen, optimizer_dis,
                     d_stats_manager,g_stats_manager,
                     output_dir=args['model_path'])


#exp1.run(num_epochs=args['epochs'])


def generateZtoY(sampleSize, path):

	classifierCheckpoint = torch.load('./results/classifier')

	from models import classifier
	device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

	extractor = classifier.Extractor().to(device)
	emotion_classifier = classifier.Classifier(7).to(device)
Esempio n. 4
0
                formats[metric] % yolo.metrics.get(metric, 0)
                for yolo in model.yolo_layers
            ]
            metric_table += [[metric, *row_metrics]]

        log_str += AsciiTable(metric_table).table
        log_str += f"\nTotal loss {loss.item()}"

        epoch_batches_left = batches - (batch + 1)
        time_left = datetime.timedelta(seconds=epoch_batches_left *
                                       (time.time() - start_time) /
                                       (batch + 1))
        log_str += f"\n---- ETA {time_left}"

        print(log_str)

    train_set = ListDataset(train_path,
                            augment=True,
                            multiscale=opt.multiscale_training)
    optimizer = torch.optim.Adam(model.parameters())
    stats_manager = nt.StatsManager()
    exp_training = nt.Experiment(model,
                                 train_set,
                                 train_set,
                                 optimizer,
                                 stats_manager,
                                 opt.checkpoint_interval,
                                 output_dir="training_experiment_1",
                                 batch_size=2,
                                 perform_validation_during_training=False)
    exp_training.run(num_epochs=2, log=log)
Esempio n. 5
0
  If not specified, a new directory with an arbitrary name is created.
  Take time to read and interpret carefully the code of `Experiment`
  and run the following
"""

B = 4
D = 6
lr = 1e-3
dncnn = DnCNN(D, apply_init=True)
dncnn = dncnn.to(device)
adam = torch.optim.Adam(dncnn.parameters(), lr=lr)
stats_manager = DenoisingStatsManager()
exp1 = nt.Experiment(dncnn,
                     train_set,
                     val_set,
                     adam,
                     stats_manager,
                     batch_size=B,
                     output_dir="denoising1",
                     perform_validation_during_training=True)
"""### Question 5
Check that a directory `denoising1` has been created and inspect its content.
  Explain the file _config.txt_.
  What does the file _checkpoint.pth.tar_ correspond to? Base your answer on Pytorch documentation, `torch.save() ` and `nntools.py`.

Note: Do not try to open checkpoint.pth.tar file in a text editor.

### Question 6

Run the experiment for 20 epochs by executing the following code. Your function  should display at every $visu\_rate$ epochs
something similar to the results given here after.
 If it does not, interrupt it, 
Esempio n. 6
0
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


model = architectures.CovidDNN().to(device)

model.apply(weights_init)

criterion = args['loss_criterion']

params = list(model.parameters())

optimizer = torch.optim.SGD(
    params, lr=args['learning_rate'])  #, betas=(args['beta'], 0.999))

stats_manager = nt.StatsManager()

exp1 = nt.Experiment(model,
                     device,
                     criterion,
                     optimizer,
                     stats_manager,
                     output_dir=args['model_path'])

if __name__ == "__main__":
    exp1.run(num_epochs=args['epochs'])
Esempio n. 7
0
########## MultiBoxLoss Hyper Parameters #####################
overlap_thresh = 0.5
prior_for_matching=True
bkg_label=0
neg_mining=True
neg_pos=3
neg_overlap=0.5 
encode_target=False
alpha = 1
device = 'cuda' if torch.cuda.is_available() else 'cpu'
batch_size = 32
##############################################################

train_loader = td.DataLoader(dataset = train_dataset, batch_size=batch_size, shuffle=True, 
                             collate_fn=detection_collate, pin_memory=True)
val_loader = td.DataLoader(dataset = val_dataset, batch_size=batch_size, shuffle=False, 
                             collate_fn=detection_collate, pin_memory=True)

lr = 1e-3
SSD300_model = build_ssd300(voc['num_classes'], overlap_thresh, prior_for_matching, bkg_label, 
                            neg_mining, neg_pos, neg_overlap, encode_target, alpha, device)
SSD300_model = SSD300_model.to(device)
adam = torch.optim.Adam(SSD300_model.parameters(), lr=lr)
stats_manager = SSD300StatsManager()
exp = nt.Experiment(SSD300_model, train_loader, val_loader, adam, stats_manager,\
                  output_dir="../weight/SSD300_exp", batch_size = batch_size,\
                  perform_validation_during_training=False)
print('Training on {} ...'.format(device))
exp.run(num_epochs=1)
fig, axes = plt.subplots(figsize=(7,6))
plot(exp, fig, axes)
Esempio n. 8
0
    def summarize(self):
        loss=super(statsmanager,self).summarize()
        return {'loss':loss}

def plot(self,fig,ax1, ax2 ,im):
    ax1.set_title('Image')
    x,y=train_set[0]
    myimshow(x,ax=ax1)
    ax2.set_title('Loss')
    ax2.plot([exp1.history[k]['loss']for k in range(exp1.epoch)])
    plt.tight_layout()
    fig.canvas.draw()

lr=1e-3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
vgg = model.Resnet34Transfer(num_classes=20,n_batch=4)
vgg.to(device)
adam=torch.optim.Adam(vgg.parameters(),lr=lr)
stats_manager=statsmanager()
#train_set=VOCDataset('../VOCdevkit/VOC2012/')
#valid_set=VOCDataset('../VOCdevkit/VOC2012/', mode="val")
train_set=VOCDataset('/datasets/ee285f-public/PascalVOC2012')
valid_set=VOCDataset('/datasets/ee285f-public/PascalVOC2012', mode="val")
x,y=train_set[0]
exp1=nt.Experiment(vgg,train_set,valid_set,adam,stats_manager,batch_size=16,output_dir="runres34",perform_validation_during_training=False)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1)
#exp1.run(num_epochs=30,plot=lambda exp:plot(exp,fig=fig,ax1=ax1, ax2=ax2 ,im=x))
exp1.run(num_epochs=30)
Esempio n. 9
0
    ax1.set_title('Image')
    x, y = train_set[0]
    myimshow(x, ax=ax1)
    ax2.set_title('Loss')
    ax2.plot([exp1.history[k]['loss'] for k in range(exp1.epoch)])
    plt.tight_layout()
    fig.canvas.draw()


lr = 1e-3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
vgg = model.VGGTransfer(num_classes=20, n_batch=4)
vgg.to(device)
adam = torch.optim.Adam(vgg.parameters(), lr=lr)
stats_manager = statsmanager()
train_set = VOCDataset('../VOCdevkit/VOC2012/')
valid_set = VOCDataset('../VOCdevkit/VOC2012/', mode="val")
x, y = train_set[0]
exp1 = nt.Experiment(vgg,
                     train_set,
                     valid_set,
                     adam,
                     stats_manager,
                     batch_size=4,
                     output_dir="run1",
                     perform_validation_during_training=True)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1)
exp1.run(num_epochs=5,
         plot=lambda exp: plot(exp, fig=fig, ax1=ax1, ax2=ax2, im=x))
# through the network and returns the statistics computed by the stats manager <br>
# This method allows us to evaluate the current model by validating the parameters of the current experiment <br>
# This evaluation allows us to valuate the parameters and the model's perfromance on a dataset different than the training set

# In[19]:

#Part 13
lr = 1e-3
net = VGG16Transfer(num_classes)
net = net.to(device)
adam = torch.optim.Adam(net.parameters(), lr=lr)
stats_manager = ClassificationStatsManager()
exp1 = nt.Experiment(net,
                     train_set,
                     val_set,
                     adam,
                     stats_manager,
                     output_dir="birdclass1",
                     perform_validation_during_training=True)

# A new "birdclass1" directory has been created <br>
# Two files has been created in that directory: config.txt and checkpoint.pth.tar <br>
# I have visualized config.txt as requested, it is describing the setting of the experiment and saves the variable's values <br>
# checkpoint.pth.tar is a binary file containing the state of the experiment and a documentation of the experiment <br>

# In[20]:

# Part 14
lr = 1e-3

adam = torch.optim.Adam(net.parameters(), lr=lr)
Esempio n. 11
0
        avg_psnr = self.running_psnr / self.number_update
        return {'loss': loss, 'avg_psnr': avg_psnr}


#Part 10
D = 6
lr = 1e-3
net = DnCNN(D)
net = net.to(device)
adam = torch.optim.Adam(net.parameters(), lr=lr)
stats_manager = DenoisingStatsManager()
val_set = test_set
exp1 = nt.Experiment(net,
                     train_set,
                     val_set,
                     adam,
                     stats_manager,
                     output_dir="denoising21",
                     batch_size=4,
                     perform_validation_during_training=False)
exp2 = nt.Experiment(net,
                     train_set,
                     val_set,
                     adam,
                     stats_manager,
                     output_dir="denoising22",
                     batch_size=4,
                     perform_validation_during_training=False)


#Part 11
def plot(exp, fig, axes, noisy, visu_rate=2):
Esempio n. 12
0
    def accumulate(self, loss, x, y, d):
        super(ClassificationStatsManager, self).accumulate(loss, x, y, d)
        _, l = torch.max(y, 1)
        self.running_accuracy += torch.mean((l == d).float())

    def summarize(self):
        loss = super(ClassificationStatsManager, self).summarize()
        accuracy = (100 * self.running_accuracy) / self.number_update
        return {'loss': loss, 'accuracy': accuracy}


lr = 1e-3
adam = torch.optim.Adam(decoder.parameters(), lr=lr)
stats_manager = ClassificationStatsManager()
exp1 = nt.Experiment(decoder,
                     coco_train,
                     coco_val,
                     adam,
                     stats_manager,
                     output_dir="captioning2",
                     batch_size=64,
                     perform_validation_during_training=True)

fig, axes = plt.subplots(ncols=2, figsize=(7, 3))
exp1.run(encoder_net=encoder,
         num_epochs=20,
         save_step=50,
         log_step=10,
         plot=lambda exp: plot(exp, fig=fig, axes=axes))