Esempio n. 1
0
        weights = 1 - weights
        print(weights)
    if os.path.isfile(args.model):
        model = torch.load(args.model).to(args.device)
        print("Model loaded!")
        loaded = True
    else:
        model = UNet(4, len(classes), args.depth, args.filters).to(args.device)
        model.initialize()
        print("Model initialized!")
    if weights is None:
        criterion = torch.nn.CrossEntropyLoss().to(args.device)
    else:
        weights = torch.FloatTensor(weights).to(args.device)
        criterion = torch.nn.CrossEntropyLoss(weight=weights).to(args.device)
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    train_matrix = np.zeros((len(classes), len(classes)), dtype=np.int32)

    def onTrainBatch(batch_id, features, labels, output, loss):
        global train_matrix
        output = torch.argmax(output, dim=1)
        mat = confusion_matrix(labels, output, len(classes))
        train_matrix = np.add(train_matrix, mat)

    test_matrix = np.zeros((len(classes), len(classes)))

    def onTestBatch(batch_id, features, labels, output, loss):
        global test_matrix
        output = torch.argmax(output, dim=1)
        mat = confusion_matrix(labels, output, len(classes))
Esempio n. 2
0
    'loss': [],
    'val_loss': [],
}
random.seed(42)
np.random.seed(42)
dataloader = Dataloader()
dataloader.load(dataloader_path)
num_batches = len(dataloader.train_objs)
num_batches_val = len(dataloader.val_objs)
num_lr_increases = num_batches * num_epochs
num_lr_increase = 0

if torch.cuda.is_available():
    model.cuda(params['device'])

optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=momentum)

start = time.time()
for num_epoch in range(num_epochs):
    dataloader.on_epoch_start(params)

    for num_batch in range(num_batches):
        model.train()
        X, Y = dataloader.get_mini_batch(num_batch,
                                         params,
                                         mode='train',
                                         weight=False,
                                         data_augmentation=True)
        lr = min_lr * 3.**((float(num_lr_increase) / num_lr_increases) *
                           (np.log(max_lr / min_lr) / np.log(3.)))
        set_lr(optimizer, lr)