Ejemplo n.º 1
0
def main():
	model_dir = './checkpoints/seg2/segnet_gen1/model_at_epoch_013.dat'
	save_dir = './test/0610/segnet_gen1/test'
	test_txt_path = './data/seg/valid.txt'

	# model = unet(in_channel=1, n_classes=1)
	model = SegNet(input_nbr = 1, label_nbr = 1)
	model = load_model(model, model_dir)
	model = model.cuda()
	model.eval()

	test_dataset = GuideWireDataset(test_txt_path)
	test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)
	prefetcher = data_prefetcher(test_loader)
	input, target, distance = prefetcher.next()

	dice = []
	IoU = []
	precision = []
	recall = []

	i = -1
	while input is not None:
		i += 1
		with torch.no_grad():
			output = model(input)
			dice.append(dice_coeff(output, target).item())
			IoU.append(iou_coeff(output, target).item())
			precision.append(Precision(output, target).item())
			recall.append(Recall(output, target).item())

			output = torch.sigmoid(output).squeeze().data.cpu().numpy()
			output[output < 0.5] = 0
			output[output >= 0.5] = 1
			# output = torch.argmax(output, dim=1).squeeze().data.cpu().numpy()
			# output = output.squeeze().data.cpu().numpy()
			# output = np.argmax(output, axis=0)
			cv2.imwrite(os.path.join(save_dir, str(i) + '_output.jpg'), output * 255)
			print(str(i) + ' finish!')

		input, target, distance = prefetcher.next()
	print('dice: ', np.mean(dice), np.max(dice), np.min(dice), np.std(dice))
	print('iou: ', np.mean(IoU), np.max(IoU), np.min(IoU), np.std(IoU))
	print('precision: ', np.mean(precision), np.max(precision), np.min(precision), np.std(precision))
	print('recall: ', np.mean(recall), np.max(recall), np.min(recall), np.std(recall))
Ejemplo n.º 2
0
def train(epoch, dataloader, model, criterion, optimizer, image_set = 'train'):
    loss_meter = 0
    acc_meter = 0

    for i, (input, target) in enumerate(dataloader):
        if image_set == 'train':
            input = input.requires_grad_(True).float().cuda()
        else:
            input = input.float().cuda()
        target = target.float().cuda()

        # Get the model output
        output = 

        # Introducing the loss here. Compute the loss value
        loss = 
        loss_meter += loss.item()

        # Compute acc here
        acc = compute_acc(output, target)
        acc_meter += acc.item()

        if image_set == 'train':
            # In the next three lines:
            # Zero the existing gadients
            # Do a backward pass
            # Update the weights

        if i % 3 == 0:
            print(image_set, ' loss at epoch ', str(epoch), ' iteration ', str(i), ' is: ', loss_meter / (i+1),
                       ' and acc is: ', acc_meter / (i+1))


if __name__ == "__main__":
    train_dataset = VOC('./VOCdevkit/', 'train')
    val_dataset = VOC('./VOCdevkit/', 'val')
    train_dataloader = data.DataLoader(
                        train_dataset,
                        batch_size = 6,
                        shuffle = True,
                        num_workers = 4)

    val_dataloader = data.DataLoader(
                        val_dataset,
                        batch_size = 1,
                        shuffle = False,
                        num_workers = 1)

    model = SegNet()
    # criterion = nn.MSELoss()
    # criterion = nn.BCELoss()
    criterion = nn.BCEWithLogitsLoss()

    # Comment if not using a GPU
    model = model.cuda()
    criterion = criterion.cuda()

    # Inititialize the optimizer.
    lr = 0.1
    optimizer = torch.optim.Adam(model.parameters(), lr)
    n_epochs = 10
    for i in range(n_epochs):
        train(i, train_dataloader, model, criterion, optimizer, 'train')
        if i % 2 == 0:
            train(i, val_dataloader, model, criterion, optimizer, 'val')
Ejemplo n.º 3
0
        # iou = compute_iou(output, target)
        # iou_meter += iou

        if image_set == 'train':
            optimizer.zero_grad()  # Why did we do this?
            loss.backward()
            optimizer.step()

        print('loss at epoch ', str(epoch), ' iteration ', str(i), ' is: ',
              loss.data.cpu().numpy())


if __name__ == "__main__":
    train_dataset = VOC('./', 'train')
    test_dataset = VOC('./', 'val')
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=8,
                                       shuffle=False,
                                       num_workers=4)

    model = SegNet()
    criterion = nn.MSELoss()

    # Comment if not using a GPU
    model = model.cuda()
    criterion = criterion.cuda()
    optimizer = torch.optim.SGD(model.parameters(), 0.01, momentum=0.9)
    n_epochs = 10
    for i in range(n_epochs):
        train(i, train_dataloader, model, criterion, optimizer, 'train')