コード例 #1
0
def train(data_size='all'):

    device = torch.device('cuda') if torch.cuda.is_available else torch.device(
        'cpu')

    # Image input
    model = SegNet(opt, 3)
    model = model.to(device)
    model.train()
    criterion = torch.nn.CrossEntropyLoss()
    criterion_d = DiscriminativeLoss()
    optimizer = SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)

    if data_size == 'all':
        dataloader = get_dataloader(opt.paths, opt, device)
        model = batch_step(opt, optimizer, model, dataloader, criterion,
                           criterion_d, device)
        torch.save(model.state_dict(), 'model_all.pth')
    else:
        im = Image.open(opt.img_path)
        im = np.array(im, dtype=np.float32) / 255
        image = np.transpose(im, (2, 0, 1))
        data = torch.from_numpy(image).unsqueeze(0)
        data = Variable(data).to(device)

        labels = segmentation.slic(im,
                                   compactness=opt.compactness,
                                   n_segments=opt.num_superpixels)
        labels = labels.reshape(-1)
        label_nums = np.unique(labels)
        label_indices = [
            np.where(labels == label_nums[i])[0]
            for i in range(len(label_nums))
        ]

        model = one_step(opt, optimizer, model, data, label_indices, criterion,
                         criterion_d, device)
        torch.save(model.state_dict(), 'model_single.pth')
コード例 #2
0
    plt.show()

    plt.title("Learning Curve")
    plt.plot(epoch_lst, train_acc_lst, label="Train")
    plt.plot(epoch_lst, val_acc_lst, label="Validation")
    plt.xlabel("Epoch")
    plt.ylabel("Pixelwise Accuracy")
    plt.legend(loc='best')
    plt.show()


## train from scratch
torch.cuda.empty_cache()
TRANSFER_LEARNING = False
model = SegNet(3, 2, transfer_learning=TRANSFER_LEARNING).to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
loss = nn.CrossEntropyLoss(weight=LABEL_WEIGHTS).to(DEVICE)
train(model, optimizer, loss, X_train, y_train, X_valid, y_valid)

## train from vgg weights
torch.cuda.empty_cache()
TRANSFER_LEARNING = True
model_vgginit = SegNet(3, 2, transfer_learning=TRANSFER_LEARNING).to(DEVICE)
optimizer = torch.optim.Adam(model_vgginit.parameters(), lr=LEARNING_RATE)
loss = nn.CrossEntropyLoss(weight=LABEL_WEIGHTS).to(DEVICE)
torch.cuda.empty_cache()
train(model_vgginit, optimizer, loss, X_train, y_train, X_valid, y_valid)

## load a model
model_name = r'../Epoch49_loss2.1120_trainacc97.727_valacc97.859.pth'  # scratch
model_load = load_checkpoints(model_name)
コード例 #3
0
ファイル: train.py プロジェクト: nimuh/deep-learning
train_data = DataLoader(tr_data, batch_size=BATCH_SIZE, shuffle=True)
val_data = DataLoader(v_data, batch_size=BATCH_SIZE, shuffle=False)

# define model
model = SegNet(IN_CHANNELS, CLASSES)

if gpu:
    model.to(torch.device("cuda:0"))

if monitor:
    wandb.watch(model)

# optimizer and loss definition
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.9)

# training
for epoch in range(EPOCHS):

    tr_epoch_loss = 0
    tr_epoch_iou = 0
    tr_batch_count = 0
    val_batch_count = 0
    val_epoch_loss = 0
    val_epoch_iou = 0

    print('-------- EPOCH {} -------'.format(epoch))

    # TRAINING
    for i, (inputs, targets) in enumerate(train_data, 0):
コード例 #4
0
def train_autoencoder(epoch_plus):
    writer = SummaryWriter(log_dir='./runs_autoencoder_2')
    num_epochs = 400 - epoch_plus
    lr = 0.001
    bta1 = 0.9
    bta2 = 0.999
    weight_decay = 0.001

    # model = autoencoder(nchannels=3, width=172, height=600)
    model = SegNet(3)
    if ngpu > 1:
        model = nn.DataParallel(model)
    if use_gpu:
        model = model.to(device, non_blocking=True)
    if epoch_plus > 0:
        model.load_state_dict(
            torch.load('./autoencoder_models_2/autoencoder_{}.pth'.format(
                epoch_plus)))
    criterion = nn.MSELoss(reduction='sum')
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(bta1, bta2),
                                 weight_decay=weight_decay)

    for epoch in range(num_epochs):
        degree = randint(-180, 180)

        transforms = torchvision.transforms.Compose([
            torchvision.transforms.CenterCrop((172, 200)),
            torchvision.transforms.Resize((172, 200)),
            torchvision.transforms.RandomRotation((degree, degree)),
            torchvision.transforms.ToTensor()
        ])

        dataloader = get_dataloader(data_dir,
                                    train=True,
                                    transform=transforms,
                                    batch_size=batch_size)

        model.train()
        epoch_losses = AverageMeter()

        with tqdm(total=(1000 - 1000 % batch_size)) as _tqdm:
            _tqdm.set_description('epoch: {}/{}'.format(
                epoch + 1 + epoch_plus, num_epochs + epoch_plus))
            for data in dataloader:
                gt, text = data
                if use_gpu:
                    gt, text = gt.to(device, non_blocking=True), text.to(
                        device, non_blocking=True)

                predicted = model(text)

                # loss = criterion_bce(predicted, gt) + criterion_dice(predicted, gt)
                loss = criterion(
                    predicted, gt - text
                )  # predicts extracted text in white, all others in black
                epoch_losses.update(loss.item(), len(gt))
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                _tqdm.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
                _tqdm.update(len(gt))

        save_path = './autoencoder_models_2'
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        gt_text = gt - text
        predicted_mask = text + predicted

        torch.save(
            model.state_dict(),
            os.path.join(save_path,
                         'autoencoder_{}.pth'.format(epoch + 1 + epoch_plus)))
        writer.add_scalar('Loss', epoch_losses.avg, epoch + 1 + epoch_plus)
        writer.add_image('text/text_image_{}'.format(epoch + 1 + epoch_plus),
                         text[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image('gt/gt_image_{}'.format(epoch + 1 + epoch_plus),
                         gt[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image('gt_text/gt_image_{}'.format(epoch + 1 + epoch_plus),
                         gt_text[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image(
            'predicted/predicted_image_{}'.format(epoch + 1 + epoch_plus),
            predicted_mask[0].squeeze(), epoch + 1 + epoch_plus)
        writer.add_image(
            'predicted_text/predicted_image_{}'.format(epoch + 1 + epoch_plus),
            predicted[0].squeeze(), epoch + 1 + epoch_plus)

    writer.close()
コード例 #5
0
def train(epoch, dataloader, model, criterion, optimizer, image_set = 'train'):
    loss_meter = 0
    acc_meter = 0

    for i, (input, target) in enumerate(dataloader):
        if image_set == 'train':
            input = input.requires_grad_(True).float().cuda()
        else:
            input = input.float().cuda()
        target = target.float().cuda()

        # Get the model output
        output = 

        # Introducing the loss here. Compute the loss value
        loss = 
        loss_meter += loss.item()

        # Compute acc here
        acc = compute_acc(output, target)
        acc_meter += acc.item()

        if image_set == 'train':
            # In the next three lines:
            # Zero the existing gadients
            # Do a backward pass
            # Update the weights

        if i % 3 == 0:
            print(image_set, ' loss at epoch ', str(epoch), ' iteration ', str(i), ' is: ', loss_meter / (i+1),
                       ' and acc is: ', acc_meter / (i+1))


if __name__ == "__main__":
    train_dataset = VOC('./VOCdevkit/', 'train')
    val_dataset = VOC('./VOCdevkit/', 'val')
    train_dataloader = data.DataLoader(
                        train_dataset,
                        batch_size = 6,
                        shuffle = True,
                        num_workers = 4)

    val_dataloader = data.DataLoader(
                        val_dataset,
                        batch_size = 1,
                        shuffle = False,
                        num_workers = 1)

    model = SegNet()
    # criterion = nn.MSELoss()
    # criterion = nn.BCELoss()
    criterion = nn.BCEWithLogitsLoss()

    # Comment if not using a GPU
    model = model.cuda()
    criterion = criterion.cuda()

    # Inititialize the optimizer.
    lr = 0.1
    optimizer = torch.optim.Adam(model.parameters(), lr)
    n_epochs = 10
    for i in range(n_epochs):
        train(i, train_dataloader, model, criterion, optimizer, 'train')
        if i % 2 == 0:
            train(i, val_dataloader, model, criterion, optimizer, 'val')
コード例 #6
0
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    model = SegNet().to(device)
    class_weights = 1.0 / train_dataset.get_class_probability()
    print(class_weights)
    criterion = torch.nn.CrossEntropyLoss(weight=class_weights).to(device)

    # start from checkpoint
    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=LEARNING_RATE,
                                momentum=MOMENTUM)

    # training
    is_better = True
    prev_loss = float('inf')
    epoch_loss = AverageMeter()
    logger.info(args)

    model.train()

    for epoch in range(args.epochs):
        t_start = time.time()

        for index, (image, mask) in enumerate(train_dataloader):
コード例 #7
0
            out = model(x)
            for (out_, y_) in zip(out, y):
                out_, y_ = out_.cpu().detach().numpy(), y_.cpu().detach(
                ).numpy()
                # print(out_.shape, y_.shape)
                # print(eval_all(out_, y_))
                scores.append(eval_all(out_, y_))
    scores = np.array(scores)
    scores = np.mean(scores, axis=0)
    return scores


if __name__ == "__main__":

    model = SegNet().to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.01,
                                 betas=(0.9, 0.999))

    train_dataset, test_dataset = load_dataset('dataset/TrainingData')

    scores_avg = evaluate(test_dataset)
    print(scores_avg)

    for epoch in range(30):
        epoch_loss = 0
        for i, (x, y) in enumerate(train_dataset.batch(16)):
            x, y = x.to(device), y.to(device)
            optimizer.zero_grad()
            out = model(x)
            loss = loss_f(out, y)