Пример #1
0
def data_loader(dataset_path):
    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose([
        joint_transforms.JointResize((224)),
        joint_transforms.JointRandomHorizontalFlip()
    ])

    train_dset = saliency.Saliency(dataset_path,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=batch_size,
                                               shuffle=True)

    test_joint_transforms = transforms.Compose(
        [joint_transforms.JointResize(224)])
    val_dset = saliency.Saliency(dataset_path,
                                 'val',
                                 joint_transform=test_joint_transforms,
                                 transform=transforms.Compose(
                                     [transforms.ToTensor(), normalize]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=batch_size,
                                             shuffle=False)

    return train_loader, val_loader
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--EXP_NAME', type=str, default='segment')
    parser.add_argument('--EXP_DIR',
                        type=str,
                        default='/home/yangle/result/TrainNet/')
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/home/yangle/BasicDataset/dataset/MSRA10K/')
    parser.add_argument('--SAVE_DIR',
                        type=str,
                        default='/home/yangle/result/mask/MSRA10K/')
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    args = parser.parse_args()

    if not os.path.exists(args.SAVE_DIR):
        os.makedirs(args.SAVE_DIR)

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    test_joint_transformer = transforms.Compose(
        [joint_transforms.JointResize(224)])
    test_dset = saliency.TestImage(args.DATASET_PATH,
                                   'val',
                                   joint_transform=None,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    test_loader = torch.utils.data.DataLoader(test_dset,
                                              batch_size=1,
                                              shuffle=False)

    model = tiramisu.FCDenseNet57(in_channels=3, n_classes=2)
    # model = model.cuda()
    model = torch.nn.DataParallel(model).cuda()
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.LEARNING_RATE,
                              weight_decay=args.WEIGHT_DECAY)

    exper = experiment.Experiment(args.EXP_NAME, args.EXP_DIR)
    # exper.resume(model, optimizer)
    base_path = args.EXP_DIR + args.EXP_NAME + '/weights/'
    weights_fpath = base_path + 'segment-weights-132-0.109-4.278-0.120-4.493.pth'
    optim_path = base_path + 'segment-optim-132.pth'
    exper.resume(model, optimizer, weights_fpath, optim_path)

    # count = 1
    for count, (img, name) in enumerate(test_loader):
        # for img, name in test_loader:
        data = Variable(img.cuda(), volatile=True)
        output = model(data)
        pred = utils.get_predictions(output)
        pred = pred[0]
        img_name = name[0]
        # img_name = str(name)
        # img_name = img_name.replace('tif', 'png')
        save_path = args.SAVE_DIR + img_name
        torchvision.utils.save_image(pred, save_path)
        print(count)
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/disk5/yangle/PAMI/dataset/fc-resnet/')
    parser.add_argument('--SAVE_DIR',
                        type=str,
                        default='/disk5/yangle/PAMI/result/masks/resnet34/')
    # parser.add_argument('--DATASET_PATH', type=str, default='/disk1/hpl/segmentation/dataset/')
    # parser.add_argument('--SAVE_DIR', type=str, default='/disk1/hpl/segmentation/result/masks/fc_resnet50/')
    args = parser.parse_args()

    if not os.path.exists(args.SAVE_DIR):
        os.makedirs(args.SAVE_DIR)

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    joint_trans = transforms.Compose([joint_transforms.JointResize(224)])
    test_dset = saliency.TestImage(args.DATASET_PATH,
                                   'VAl',
                                   joint_transform=joint_trans,
                                   transform=transforms.Compose(
                                       [transforms.ToTensor(), normalize]))

    model = SegNet.resnet34()
    model = model.cuda()
    # model = torch.nn.DataParallel(model).cuda()
    # model = model.module
    weight_path = '/disk5/yangle/PAMI/result/LearnModel/fc-resnet34/weights/best_weights.pth'
    state = torch.load(weight_path)
    model.load_state_dict(state['state_dict'])
    model.eval()

    test_loader = torch.utils.data.DataLoader(test_dset,
                                              batch_size=1,
                                              shuffle=False)
    count = 1
    for data, name in test_loader:
        data = Variable(data.cuda(), volatile=True)
        output = model(data)
        # data = Variable(data.cuda())
        # with torch.no_grad():
        # 	output = model(data)
        pred = utils.get_predictions(output[4])
        pred = pred[0]
        # transforms_size = torchvision.transforms.Resize((hei, wid))
        # mask = transforms_size([pred])
        name = name[0]
        img_name = str(name)
        save_path = args.SAVE_DIR + img_name[:-4] + '.png'
        torchvision.utils.save_image(pred, save_path)
        print(count)
        count += 1
Пример #4
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--DATASET_PATH', type=str, default='/home/yangle/dataset/DUTS/')
	parser.add_argument('--RESULTS_PATH', type=str, default='/home/yangle/result/TrainNet/results/')
	parser.add_argument('--WEIGHTS_PATH', type=str, default='/home/yangle/result/TrainNet/models/')
	parser.add_argument('--EXPERIMENT', type=str, default='/home/yangle/result/TrainNet/')
	parser.add_argument('--N_EPOCHS', type=int, default=300)
	parser.add_argument('--MAX_PATIENCE', type=int, default=30)
	parser.add_argument('--batch_size', type=int, default=32)
	parser.add_argument('--seed', type=int, default=0)
	parser.add_argument('--N_CLASSES', type=int, default=2)
	parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
	parser.add_argument('--LR_DECAY', type=float, default=0.995)
	parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
	parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
	parser.add_argument('--CUDNN', type=bool, default=True)
	args = parser.parse_args()

	torch.cuda.manual_seed(args.seed)
	cudnn.benchmark = args.CUDNN

	normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
	train_joint_transformer = transforms.Compose([
		joint_transforms.JointResize(224),
		joint_transforms.JointRandomHorizontalFlip()
	])

	train_dset = saliency.Saliency(
		args.DATASET_PATH, 'train', joint_transform=train_joint_transformer,
		transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=args.batch_size, shuffle=False)

	val_joint_transformer = transforms.Compose([joint_transforms.JointResize(224)])
	val_dset = saliency.Saliency(
		args.DATASET_PATH, 'val', joint_transform=val_joint_transformer,
		transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=8, shuffle=False)

	print("TrainImages: %d" % len(train_loader.dataset.imgs))
	print("ValImages: %d" % len(val_loader.dataset.imgs))
	# print("TestImages: %d" % len(test_loader.dataset.imgs))

	example_inputs, example_targets = next(iter(train_loader))
	print("InputsBatchSize: ", example_inputs.size())
	print("TargetsBatchSize: ", example_targets.size())
	print("\nInput (size, max, min) ---")
	#input
	i = example_inputs[0]
	print(i.size())
	print(i.max())
	print(i.min())
	print("Target (size, max, min) ---")
	#target
	t = example_targets[0]
	print(t.size())
	print(t.max())
	print(t.min())

	######################################
	# load weights from pretrained model #
	######################################

	model_pre = tiramisu_pre.FCDenseNet57(in_channels=3, n_classes=2)
	model_pre = torch.nn.DataParallel(model_pre).cuda()
	fpath = '/home/yangle/result/TrainNet/segment/weights/segment-weights-132-0.109-4.278-0.120-4.493.pth'
	state = torch.load(fpath)
	pretrained_dict = state['state_dict']

	model = tiramisu.FCDenseNet57(in_channels=3, n_classes=2)
	model = torch.nn.DataParallel(model).cuda()
	model_dict = model.state_dict()

	# 1. filter out unnecessary keys
	pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
	# 2. overwrite entries in the existing state dict
	model_dict.update(pretrained_dict)
	# 3. load the new state dict
	model.load_state_dict(model_dict)
	# convert model trained with multiple GPUs into model within single GPU
	model = model.module

	# not train existing layers
	# for k in pretrained_dict:
	count = 0
	para_optim = []
	for k in model.children():
	# for k in model.module.children():
		count += 1
		if count > 6:
			for param in k.parameters():
				para_optim.append(param)
		else:
			for param in k.parameters():
				param.requires_grad = False
		# print(k)
	print('para_optim')
	print(len(para_optim))

	optimizer = optim.RMSprop(para_optim, lr=args.LEARNING_RATE,
							  weight_decay=args.WEIGHT_DECAY, eps=1e-12)
	criterion = nn.NLLLoss2d().cuda()
	exp_dir = args.EXPERIMENT + 'GRU_test'
	if os.path.exists(exp_dir):
		shutil.rmtree(exp_dir)

	exp = experiment.Experiment('GRU_test', args.EXPERIMENT)
	exp.init()

	START_EPOCH = exp.epoch
	END_EPOCH = START_EPOCH + args.N_EPOCHS

	for epoch in range(START_EPOCH, END_EPOCH):

		since = time.time()

		### Train ###
		trn_loss, trn_err = utils.train(model, train_loader, optimizer, criterion, epoch)
		print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(epoch, trn_loss, trn_err))
		time_elapsed = time.time() - since
		print('Train Time {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Test ###
		val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
		print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
		time_elapsed = time.time() - since
		print('Total Time {:.0f}m {:.0f}s\n'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Save Metrics ###
		exp.save_history('train', trn_loss, trn_err)
		exp.save_history('val', val_loss, val_err)

		### Checkpoint ###
		exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
		exp.save_optimizer(optimizer, val_loss)

		## Early Stopping ##
		if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
			print(("Early stopping at epoch %d since no "
				   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
			break

		# Adjust Lr ###--old method
		utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY, optimizer,
							 epoch, args.DECAY_LR_EVERY_N_EPOCHS)

		exp.epoch += 1
Пример #5
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--DATASET_PATH', type=str, default='/home/zhangdong/database/DUTS/')
	parser.add_argument('--WEIGHTS_PATH', type=str, default='/home/yangle/DAVIS/result/models/')
	parser.add_argument('--EXPERIMENT', type=str, default='/home/yangle/DAVIS/result/TrainNet/')
	parser.add_argument('--N_EPOCHS', type=int, default=200)
	parser.add_argument('--MAX_PATIENCE', type=int, default=30)
	parser.add_argument('--batch_size', type=int, default=32)
	parser.add_argument('--seed', type=int, default=0)
	parser.add_argument('--N_CLASSES', type=int, default=2)
	parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
	parser.add_argument('--LR_DECAY', type=float, default=0.995)
	parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
	parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
	parser.add_argument('--CUDNN', type=bool, default=True)
	args = parser.parse_args()

	torch.cuda.manual_seed(args.seed)
	cudnn.benchmark = args.CUDNN

	normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
	train_joint_transformer_img = transforms.Compose([joint_transforms.JointResize(224)])
	mask_size_list = [14, 28, 56, 112, 224]

	train_dset = saliency.Saliency(
		args.DATASET_PATH, 'train',train_joint_transformer_img, mask_size_list,
		transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=args.batch_size, shuffle=True)

	test_joint_transforms_img = transforms.Compose([joint_transforms.JointResize(224)])
	val_dset = saliency.Saliency(
		args.DATASET_PATH, 'val',test_joint_transforms_img, mask_size_list,
		transform=transforms.Compose([transforms.ToTensor(),normalize]))
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=args.batch_size, shuffle=False)

	print("TrainImages: %d" % len(train_loader.dataset.imgs))
	print("ValImages: %d" % len(val_loader.dataset.imgs))

	# example_inputs, example_targets = next(iter(train_loader))
	# print("InputsBatchSize: ", example_inputs.size())
	# print("TargetsBatchSize: ", len(example_targets))
	# print("\nInput (size, max, min) ---")
	# # input
	# i = example_inputs[0]
	# print(i.size())
	# print(i.max())
	# print(i.min())
	# print("Target (size, max, min) ---")
	# # target
	# for mask in example_targets:
	# 	print(mask.size())
	# 	print(mask.max())
	# 	print(mask.min())


	# initialize ResNet18 from the pre-trained classification model
	resnet = torchvision.models.resnet50(pretrained=True)
	pre_trained_dict = resnet.state_dict()
	model = SegNet.resnet50()
	model_dict = model.state_dict()

	# 1. filter out unnecessary keys
	pre_trained_dict = {k: v for k, v in pre_trained_dict.items() if k in model_dict}
	# 2. overwrite entries in the existing state dict
	model_dict.update(pre_trained_dict)
	# 3. load the new state dict
	model.load_state_dict(model_dict)
	model = model.cuda()
	#model = torch.nn.DataParallel(model).cuda()

	print('  + Number of params: {}'.format(
		sum([p.data.nelement() for p in model.parameters()])))
	# model.apply(utils.weights_init)
	optimizer = optim.RMSprop(model.parameters(), lr=args.LEARNING_RATE,
							  weight_decay=args.WEIGHT_DECAY, eps=1e-12)
	criterion = nn.NLLLoss2d().cuda()

	exp_dir = args.EXPERIMENT + 'test'
	if os.path.exists(exp_dir):
		shutil.rmtree(exp_dir)

	exp = experiment.Experiment('test', args.EXPERIMENT)
	exp.init()

	START_EPOCH = exp.epoch
	END_EPOCH = START_EPOCH + args.N_EPOCHS

	for epoch in range(START_EPOCH, END_EPOCH):

		since = time.time()

		# ### Train ###
		trn_loss, trn_err = utils.train(model, train_loader, optimizer, criterion, epoch)
		print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(epoch, trn_loss, trn_err))
		time_elapsed = time.time() - since
		print('Train Time {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Test ###
		val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
		print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
		time_elapsed = time.time() - since
		print('Total Time {:.0f}m {:.0f}s\n'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Save Metrics ###
		exp.save_history('train', trn_loss, trn_err)
		exp.save_history('val', val_loss, val_err)

		### Checkpoint ###
		exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
		exp.save_optimizer(optimizer, val_loss)

		## Early Stopping ##
		if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
			print(("Early stopping at epoch %d since no "
				   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
			break

		# Adjust Lr ###--old method
		utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY, optimizer,
							 epoch, args.DECAY_LR_EVERY_N_EPOCHS)

		exp.epoch += 1
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/disk2/zhangni/davis/dataset/Objectness/')
    parser.add_argument('--EXPERIMENT',
                        type=str,
                        default='/disk2/zhangni/davis/result/TrainNet/')
    parser.add_argument('--N_EPOCHS', type=int, default=200)
    parser.add_argument('--MAX_PATIENCE', type=int, default=20)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--N_CLASSES', type=int, default=10)
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-2)
    parser.add_argument('--LR_DECAY', type=float, default=0.995)
    parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    parser.add_argument('--CUDNN', type=bool, default=True)
    args = parser.parse_args()

    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = args.CUDNN

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose([
        joint_transforms.JointResize((224)),
        joint_transforms.JointRandomHorizontalFlip()
    ])

    train_dset = saliency.Saliency(args.DATASET_PATH,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    test_joint_transforms = transforms.Compose(
        [joint_transforms.JointResize(224)])
    val_dset = saliency.Saliency(args.DATASET_PATH,
                                 'val',
                                 joint_transform=test_joint_transforms,
                                 transform=transforms.Compose(
                                     [transforms.ToTensor(), normalize]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False)

    model = tiramisu.FCDenseNet57(n_classes=args.N_CLASSES)
    #model = model.cuda()
    model = torch.nn.DataParallel(model).cuda()
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model.apply(utils.weights_init)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.LEARNING_RATE,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.NLLLoss2d().cuda()

    exp_dir = args.EXPERIMENT + 'Objectness'
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment('Objectness', args.EXPERIMENT)
    exp.init()

    START_EPOCH = exp.epoch
    END_EPOCH = START_EPOCH + args.N_EPOCHS

    for epoch in range(1, END_EPOCH):

        since = time.time()

        ### Train ###
        trn_loss, trn_err = utils.train(model, train_loader, optimizer,
                                        criterion, epoch)
        print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(
            epoch, trn_loss, trn_err))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
        print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss, trn_err)
        exp.save_history('val', val_loss, val_err)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
        exp.save_optimizer(optimizer, val_loss)

        ## Early Stopping ##
        if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
            print(("Early stopping at epoch %d since no " +
                   "better loss found since epoch %.3").format(
                       epoch, exp.best_val_loss))
            break

        # Adjust Lr ###--old method
        if epoch % 4 == 0:
            utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY,
                                       optimizer, epoch,
                                       args.DECAY_LR_EVERY_N_EPOCHS)

        exp.epoch += 1
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/home/yangle/dataset/TrainNet/')
    parser.add_argument('--RESULTS_PATH',
                        type=str,
                        default='/home/yangle/result/TrainNet/results/')
    parser.add_argument('--WEIGHTS_PATH',
                        type=str,
                        default='/home/yangle/result/TrainNet/models/')
    parser.add_argument('--EXPERIMENT',
                        type=str,
                        default='/home/yangle/result/TrainNet/')
    parser.add_argument('--N_EPOCHS', type=int, default=150)
    parser.add_argument('--MAX_PATIENCE', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=10)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--N_CLASSES', type=int, default=2)
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
    parser.add_argument('--LR_DECAY', type=float, default=0.995)
    parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    parser.add_argument('--CUDNN', type=bool, default=True)
    args = parser.parse_args()

    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = args.CUDNN

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose([
        #joint_transforms.JointRandomCrop(224),
        joint_transforms.JointResize(224),
        #joint_transforms.JointRandomSizedCrop(224),
        joint_transforms.JointRandomHorizontalFlip()
    ])

    train_dset = saliency.Saliency(args.DATASET_PATH,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    test_joint_transforms = transforms.Compose(
        [joint_transforms.JointResize(224)])
    val_dset = saliency.Saliency(args.DATASET_PATH,
                                 'val',
                                 joint_transform=test_joint_transforms,
                                 transform=transforms.Compose(
                                     [transforms.ToTensor(), normalize]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False)

    # test_dset = saliency.Saliency(
    # 	args.DATASET_PATH, 'test', joint_transform=test_joint_transforms,
    # 	transform=transforms.Compose([
    # 		transforms.ToTensor(),
    # 		normalize
    # 	]))
    # test_loader = torch.utils.data.DataLoader(
    # 	test_dset, batch_size=args.batch_size, shuffle=False)

    print("TrainImages: %d" % len(train_loader.dataset.imgs))
    print("ValImages: %d" % len(val_loader.dataset.imgs))
    # print("TestImages: %d" % len(test_loader.dataset.imgs))

    example_inputs, example_targets = next(iter(train_loader))
    print("InputsBatchSize: ", example_inputs.size())
    print("TargetsBatchSize: ", example_targets.size())
    print("\nInput (size, max, min) ---")
    #input
    i = example_inputs[0]
    print(i.size())
    print(i.max())
    print(i.min())
    print("Target (size, max, min) ---")
    #target
    t = example_targets[0]
    print(t.size())
    print(t.max())
    print(t.min())

    model = tiramisu.FCDenseNet57(n_classes=args.N_CLASSES)
    model = model.cuda()
    #model = torch.nn.DataParallel(model).cuda()
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model.apply(utils.weights_init)
    #optimizer = optim.RMSprop(model.parameters(), lr=args.LEARNING_RATE, weight_decay=args.WEIGHT_DECAY)
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.LEARNING_RATE,
                              weight_decay=args.WEIGHT_DECAY,
                              eps=1e-12)
    criterion = nn.NLLLoss2d().cuda()

    exp_dir = args.EXPERIMENT + 'tiramisu_rms_prelu'
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment('tiramisu_rms_prelu', args.EXPERIMENT)
    exp.init()

    START_EPOCH = exp.epoch
    END_EPOCH = START_EPOCH + args.N_EPOCHS

    for epoch in range(START_EPOCH, END_EPOCH):

        since = time.time()

        ### Train ###
        trn_loss, trn_err = utils.train(model, train_loader, optimizer,
                                        criterion, epoch)
        print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(
            epoch, trn_loss, trn_err))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
        print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss, trn_err)
        exp.save_history('val', val_loss, val_err)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
        exp.save_optimizer(optimizer, val_loss)

        # ## Early Stopping ##
        # if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
        # 	print(("Early stopping at epoch %d since no "
        # 		   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
        # 	break

        # lr_sche.step(val_loss)
        #### print learning rate ####
        # for param_group in optimizer.param_groups:
        # 	print(param_group['lr'])

        # Adjust Lr ###--old method
        utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY,
                                   optimizer, epoch,
                                   args.DECAY_LR_EVERY_N_EPOCHS)

        exp.epoch += 1
Пример #8
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--DATASET_PATH', type=str, default='/disk5/yangle/PAMI/dataset/fc-resnet/')
	parser.add_argument('--EXPERIMENT', type=str, default='/disk5/yangle/PAMI/result/LearnModel/')
	# parser.add_argument('--DATASET_PATH', type=str, default='/disk1/hpl/segmentation/dataset/')
	# parser.add_argument('--EXPERIMENT', type=str, default='/disk1/hpl/segmentation/model/model_baselinexin/')
	parser.add_argument('--N_EPOCHS', type=int, default=200)
	parser.add_argument('--MAX_PATIENCE', type=int, default=30)
	parser.add_argument('--batch_size', type=int, default=32)
	parser.add_argument('--seed', type=int, default=0)
	parser.add_argument('--N_CLASSES', type=int, default=2)
	parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
	parser.add_argument('--LR_DECAY', type=float, default=0.995)
	parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
	parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
	parser.add_argument('--CUDNN', type=bool, default=True)
	args = parser.parse_args()

	torch.cuda.manual_seed(args.seed)
	cudnn.benchmark = args.CUDNN

	normalize = transforms.Normalize(mean=dataset.mean, std=dataset.std)
	train_joint_transformer = transforms.Compose([
		joint_transforms.JointResize(256),
		joint_transforms.JointRandomCrop(224),
		joint_transforms.JointRandomHorizontalFlip(),
        ])
	mask_size_list = [28, 28, 28, 56, 112]

	train_dset = dataset.Saliency(
		args.DATASET_PATH, 'TRain', train_joint_transformer, mask_size_list,
		transform=transforms.Compose([joint_transforms.RandomErasing_random(probability=0.5, sh=0.4, r1=0.3, ),
									  transforms.ToTensor(), normalize, ]))
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size)

	test_joint_transforms_img = transforms.Compose([joint_transforms.JointResize(224)])
	val_dset = dataset.TestData(args.DATASET_PATH, 'VAl', test_joint_transforms_img,
								transform=transforms.Compose([transforms.ToTensor(), normalize]),
								target_transform=transforms.Compose([transforms.ToTensor()]))
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=args.batch_size, shuffle=False)

	print("TrainImages: %d" % len(train_loader.dataset.imgs))
	print("ValImages: %d" % len(val_loader.dataset.imgs))

	example_inputs, example_targets = next(iter(train_loader))
	print("InputsBatchSize: ", example_inputs.size())
	print("TargetsBatchSize: ", len(example_targets))
	print("\nInput (size, max, min) ---")
	# input
	i = example_inputs[0]
	print(i.size())
	print(i.max())
	print(i.min())
	print("Target (size, max, min) ---")
	# target
	for mask in example_targets:
		print(mask.size())
		print(mask.max())
		print(mask.min())

	resnet34 = torchvision.models.resnet34(pretrained=True)
	dict_resnet34 = resnet34.state_dict()
	model = SegNet.resnet34()
	# # initialize
	model.apply(utils.weights_init)
	SegNet_dict = model.state_dict()

	pretrained_dict = {k: v for k, v in dict_resnet34.items() if k in SegNet_dict}
	# for k in pretrained_dict:
	# 	print(k)
	SegNet_dict.update(pretrained_dict)
	model.load_state_dict(SegNet_dict)

	# seperate layers, to set different lr
	param_exist = []
	param_add = []
	for k, (name, module) in enumerate(model.named_children()):
		# existing layers including: conv1 bn1 relu maxpool
		# layer1 layer2 layer3 layer4
		if k < 8:
			for param in module.parameters():
				param_exist.append(param)
		# adding layers including: bottleneck skip3 skip2 skip1 skip0
		# conv_end_1 bn_end_1 salmap Sigmoid mask0 mask4 mask3 mask2 mask1
		else:
			for param in module.parameters():
				param_add.append(param)

	model = model.cuda()
	# model = torch.nn.DataParallel(model).cuda()

	print('  + Number of params: {}'.format(
		sum([p.data.nelement() for p in model.parameters()])))
	optimizer = optim.RMSprop([{'params': param_exist, 'lr': args.LEARNING_RATE*0.1},
						   {'params': param_add}], lr=args.LEARNING_RATE,
							  weight_decay=args.WEIGHT_DECAY, eps=1e-12)
	criterion = nn.NLLLoss().cuda()

	exp_dir = args.EXPERIMENT + 'test'
	if os.path.exists(exp_dir):
		shutil.rmtree(exp_dir)

	exp = experiment.Experiment('test', args.EXPERIMENT)
	exp.init()

	START_EPOCH = exp.epoch
	END_EPOCH = START_EPOCH + args.N_EPOCHS

	for epoch in range(START_EPOCH, END_EPOCH):

		since = time.time()

		# ### Train ###
		trn_loss, trn_err = utils.train(model, train_loader, optimizer, criterion, epoch)
		print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(epoch, trn_loss, trn_err))
		time_elapsed = time.time() - since
		print('Train Time {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Test ###
		val_loss, val_err = utils.test_score(model, val_loader)
		print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
		time_elapsed = time.time() - since
		print('Total Time {:.0f}m {:.0f}s\n'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Save Metrics ###
		exp.save_history('train', trn_loss, trn_err)
		exp.save_history('val', val_loss, val_err)

		### Checkpoint ###
		exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
		exp.save_optimizer(optimizer, val_loss)

		## Early Stopping ##
		if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
			print(("Early stopping at epoch %d since no "
				   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
			break

		# Adjust Lr ###--old method
		utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY, optimizer,
							 epoch, args.DECAY_LR_EVERY_N_EPOCHS)

		exp.epoch += 1