Exemple #1
0
def data_loader(dataset_path):
    # dataset
    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose(
        [joint_transforms.JointRandomHorizontalFlip()])

    train_dset = saliency.Saliency(dataset_path,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=batch_size,
                                               shuffle=True)

    val_dset = saliency.Saliency(dataset_path,
                                 'val',
                                 transform=transforms.Compose([
                                     transforms.ToTensor(),
                                     normalize,
                                 ]))
    # decrease the validation batchsize
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=batch_size,
                                             shuffle=False)

    return train_loader, val_loader
Exemple #2
0
def data_loader_sig(dataset_path):
	# dataset
	normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
	train_joint_transformer = transforms.Compose([joint_transforms.JointRandomHorizontalFlip()])
	img_transform = transforms.Compose([transforms.ToTensor(), normalize])
	target_transform = transforms.Compose([transforms.ToTensor()])
	mask_size_list = [14, 28, 56, 112, 224]

	train_dset = saliency.Saliency(dataset_path, 'train', joint_transform=train_joint_transformer,
								   transform=img_transform, target_transform=target_transform,
								   mask_size_list=mask_size_list)
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=batch_size, shuffle=True)

	val_dset = saliency.Saliency(dataset_path, 'val',
								 transform=img_transform, target_transform=target_transform,
								 mask_size_list=mask_size_list)
	# decrease the validation batchsize
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=batch_size, shuffle=False)

	return train_loader, val_loader
Exemple #3
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--DATASET_PATH', type=str, default='/home/yangle/dataset/DUTS/')
	parser.add_argument('--RESULTS_PATH', type=str, default='/home/yangle/result/TrainNet/results/')
	parser.add_argument('--WEIGHTS_PATH', type=str, default='/home/yangle/result/TrainNet/models/')
	parser.add_argument('--EXPERIMENT', type=str, default='/home/yangle/result/TrainNet/')
	parser.add_argument('--N_EPOCHS', type=int, default=300)
	parser.add_argument('--MAX_PATIENCE', type=int, default=30)
	parser.add_argument('--batch_size', type=int, default=32)
	parser.add_argument('--seed', type=int, default=0)
	parser.add_argument('--N_CLASSES', type=int, default=2)
	parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
	parser.add_argument('--LR_DECAY', type=float, default=0.995)
	parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
	parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
	parser.add_argument('--CUDNN', type=bool, default=True)
	args = parser.parse_args()

	torch.cuda.manual_seed(args.seed)
	cudnn.benchmark = args.CUDNN

	normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
	train_joint_transformer = transforms.Compose([
		joint_transforms.JointResize(224),
		joint_transforms.JointRandomHorizontalFlip()
	])

	train_dset = saliency.Saliency(
		args.DATASET_PATH, 'train', joint_transform=train_joint_transformer,
		transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=args.batch_size, shuffle=False)

	val_joint_transformer = transforms.Compose([joint_transforms.JointResize(224)])
	val_dset = saliency.Saliency(
		args.DATASET_PATH, 'val', joint_transform=val_joint_transformer,
		transform=transforms.Compose([transforms.ToTensor(), normalize, ]))
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=8, shuffle=False)

	print("TrainImages: %d" % len(train_loader.dataset.imgs))
	print("ValImages: %d" % len(val_loader.dataset.imgs))
	# print("TestImages: %d" % len(test_loader.dataset.imgs))

	example_inputs, example_targets = next(iter(train_loader))
	print("InputsBatchSize: ", example_inputs.size())
	print("TargetsBatchSize: ", example_targets.size())
	print("\nInput (size, max, min) ---")
	#input
	i = example_inputs[0]
	print(i.size())
	print(i.max())
	print(i.min())
	print("Target (size, max, min) ---")
	#target
	t = example_targets[0]
	print(t.size())
	print(t.max())
	print(t.min())

	######################################
	# load weights from pretrained model #
	######################################

	model_pre = tiramisu_pre.FCDenseNet57(in_channels=3, n_classes=2)
	model_pre = torch.nn.DataParallel(model_pre).cuda()
	fpath = '/home/yangle/result/TrainNet/segment/weights/segment-weights-132-0.109-4.278-0.120-4.493.pth'
	state = torch.load(fpath)
	pretrained_dict = state['state_dict']

	model = tiramisu.FCDenseNet57(in_channels=3, n_classes=2)
	model = torch.nn.DataParallel(model).cuda()
	model_dict = model.state_dict()

	# 1. filter out unnecessary keys
	pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
	# 2. overwrite entries in the existing state dict
	model_dict.update(pretrained_dict)
	# 3. load the new state dict
	model.load_state_dict(model_dict)
	# convert model trained with multiple GPUs into model within single GPU
	model = model.module

	# not train existing layers
	# for k in pretrained_dict:
	count = 0
	para_optim = []
	for k in model.children():
	# for k in model.module.children():
		count += 1
		if count > 6:
			for param in k.parameters():
				para_optim.append(param)
		else:
			for param in k.parameters():
				param.requires_grad = False
		# print(k)
	print('para_optim')
	print(len(para_optim))

	optimizer = optim.RMSprop(para_optim, lr=args.LEARNING_RATE,
							  weight_decay=args.WEIGHT_DECAY, eps=1e-12)
	criterion = nn.NLLLoss2d().cuda()
	exp_dir = args.EXPERIMENT + 'GRU_test'
	if os.path.exists(exp_dir):
		shutil.rmtree(exp_dir)

	exp = experiment.Experiment('GRU_test', args.EXPERIMENT)
	exp.init()

	START_EPOCH = exp.epoch
	END_EPOCH = START_EPOCH + args.N_EPOCHS

	for epoch in range(START_EPOCH, END_EPOCH):

		since = time.time()

		### Train ###
		trn_loss, trn_err = utils.train(model, train_loader, optimizer, criterion, epoch)
		print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(epoch, trn_loss, trn_err))
		time_elapsed = time.time() - since
		print('Train Time {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Test ###
		val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
		print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
		time_elapsed = time.time() - since
		print('Total Time {:.0f}m {:.0f}s\n'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Save Metrics ###
		exp.save_history('train', trn_loss, trn_err)
		exp.save_history('val', val_loss, val_err)

		### Checkpoint ###
		exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
		exp.save_optimizer(optimizer, val_loss)

		## Early Stopping ##
		if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
			print(("Early stopping at epoch %d since no "
				   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
			break

		# Adjust Lr ###--old method
		utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY, optimizer,
							 epoch, args.DECAY_LR_EVERY_N_EPOCHS)

		exp.epoch += 1
CHECKPOINT_PATH = '/home/shenxk/Documents/nodule_seg3d/results/checkpoint'
experiment_name = 'DenseU_2d_assignCenter_noWeight_fromScratch'
experiment = train_utils.Experiment(model, criterion=criterion, optimizer=optimizer,
                                    checkpoint_path=CHECKPOINT_PATH, experiment_name=experiment_name)


# In[6]:

joint_transformer, dataset, dataloader = {}, {}, {}

# ==== Train ====
# Fine tune with AnnoRandomCrop and RandomHorizontalFlip in training set to avoid overfitting
joint_transformer['train'] = transforms.Compose([    
    joint_transforms.JointAnnoCenterCrop(crop_size),
    joint_transforms.JointRandomHorizontalFlip()
    ])

joint_transformer['val'] = transforms.Compose([    
     joint_transforms.JointAnnoCenterCrop(crop_size)
    ])

dataset['train'] = io_utils_lidc3d.lidc2d_assignCenter(
                      DATA_PATH, 'train', [num_z, crop_size, crop_size], split_files = 'train.npy',
                      slice_split_files = 'train_slices.npy',
                      slice_nodule_split_files = 'train_slices_nodule.npy',
                      transform = ['ToTensor'], joint_transform=joint_transformer['train'])
dataloader['train'] = torch.utils.data.DataLoader(dataset['train'], batch_size=batch_size, 
                                                  shuffle=True, num_workers=1)
# ==== Val ====
dataset['val'] = io_utils_lidc3d.lidc2d_assignCenter(
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/disk2/zhangni/davis/dataset/Objectness/')
    parser.add_argument('--EXPERIMENT',
                        type=str,
                        default='/disk2/zhangni/davis/result/TrainNet/')
    parser.add_argument('--N_EPOCHS', type=int, default=200)
    parser.add_argument('--MAX_PATIENCE', type=int, default=20)
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--N_CLASSES', type=int, default=10)
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-2)
    parser.add_argument('--LR_DECAY', type=float, default=0.995)
    parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    parser.add_argument('--CUDNN', type=bool, default=True)
    args = parser.parse_args()

    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = args.CUDNN

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose([
        joint_transforms.JointResize((224)),
        joint_transforms.JointRandomHorizontalFlip()
    ])

    train_dset = saliency.Saliency(args.DATASET_PATH,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    test_joint_transforms = transforms.Compose(
        [joint_transforms.JointResize(224)])
    val_dset = saliency.Saliency(args.DATASET_PATH,
                                 'val',
                                 joint_transform=test_joint_transforms,
                                 transform=transforms.Compose(
                                     [transforms.ToTensor(), normalize]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False)

    model = tiramisu.FCDenseNet57(n_classes=args.N_CLASSES)
    #model = model.cuda()
    model = torch.nn.DataParallel(model).cuda()
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model.apply(utils.weights_init)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.LEARNING_RATE,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.NLLLoss2d().cuda()

    exp_dir = args.EXPERIMENT + 'Objectness'
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment('Objectness', args.EXPERIMENT)
    exp.init()

    START_EPOCH = exp.epoch
    END_EPOCH = START_EPOCH + args.N_EPOCHS

    for epoch in range(1, END_EPOCH):

        since = time.time()

        ### Train ###
        trn_loss, trn_err = utils.train(model, train_loader, optimizer,
                                        criterion, epoch)
        print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(
            epoch, trn_loss, trn_err))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
        print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss, trn_err)
        exp.save_history('val', val_loss, val_err)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
        exp.save_optimizer(optimizer, val_loss)

        ## Early Stopping ##
        if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
            print(("Early stopping at epoch %d since no " +
                   "better loss found since epoch %.3").format(
                       epoch, exp.best_val_loss))
            break

        # Adjust Lr ###--old method
        if epoch % 4 == 0:
            utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY,
                                       optimizer, epoch,
                                       args.DECAY_LR_EVERY_N_EPOCHS)

        exp.epoch += 1
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--DATASET_PATH',
        type=str,
        default='/home/yangle/TCyb/dataset/tiramisu_7ch_corre/')
    parser.add_argument('--RESULTS_PATH',
                        type=str,
                        default='/home/yangle/TCyb/result/TrainNet/results/')
    parser.add_argument('--WEIGHTS_PATH',
                        type=str,
                        default='/home/yangle/TCyb/result/TrainNet/models/')
    parser.add_argument('--EXPERIMENT',
                        type=str,
                        default='/home/yangle/TCyb/result/TrainNet/')
    parser.add_argument('--EXPNAME', type=str, default='EncDec_7ch_corre')
    parser.add_argument('--N_EPOCHS', type=int, default=1000)
    parser.add_argument('--MAX_PATIENCE', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--N_CLASSES', type=int, default=2)
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-3)
    parser.add_argument('--LR_DECAY', type=float, default=0.9)
    parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    parser.add_argument('--CUDNN', type=bool, default=True)
    args = parser.parse_args()

    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = args.CUDNN

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose(
        [joint_transforms.JointRandomHorizontalFlip()])

    train_dset = saliency.Saliency(args.DATASET_PATH,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    val_dset = saliency.Saliency(args.DATASET_PATH,
                                 'val',
                                 transform=transforms.Compose([
                                     transforms.ToTensor(),
                                     normalize,
                                 ]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False)

    print("TrainImages: %d" % len(train_loader.dataset.imgs))
    print("ValImages: %d" % len(val_loader.dataset.imgs))

    # example_inputs, example_targets, _, _ = next(iter(train_loader))
    img, _, cont, _ = next(iter(train_loader))
    cont = Variable(cont)
    img = Variable(img)
    print('cont.size()')
    print(cont.size())
    print('img.size()')
    print(img.size())
    # correlation filter
    score_map = F.conv2d(cont, img)

    # linear normalize the score map
    score_map_norm = (score_map - score_map.min()) / (score_map.max() -
                                                      score_map.min())
    score_map = score_map_norm.data
    # score_map_e = score_map.expand(-1, 2, 64, 64)
    print(score_map.size())
Exemple #7
0
def main():

    # parameters
    # ???should we adjust the learning rate during the training process? ???
    learn_rate = 1e-4
    num_epochs = 600  # 最大迭代次数
    max_patience = 60  # 停止训练的参数
    result_rp = '../result/model/'
    exp_name = 'P3D_saliency'

    batch_size = 3
    n_threads = 16
    # 'Temporal duration of inputs'
    sample_duration = 16
    # the data for devison
    norm_value = 255
    # Height and width of inputs
    sample_size = 224
    # Number of validation samples for each activity
    n_val_samples = 3
    video_path = '/data1/guoxi/p3d_floder/resized_dataset/dataset/'
    reference_path = '/data1/guoxi/p3d_floder/resized_dataset/reference_dataset/'
    # video_path = '/data1/guoxi/p3d_floder/resized_dataset_for_test/dataset/'
    # reference_path = '/data1/guoxi/p3d_floder/resized_dataset_for_test/reference_dataset/'
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    norm_method = Normalize(mean, std)

    RandomCrop_transform = Compose([  # 图像预处理一般用Compose把多个步骤整合到一起
        joint_transforms.JointRandomCrop(
            224
        ),  # 在一个随机的位置进行裁剪  'JpegImageFile' object does not support indexing
    ])

    RandomHorizontalFlip_transform = Compose([  # 图像预处理一般用Compose把多个步骤整合到一起
        joint_transforms.JointRandomHorizontalFlip(),  # 以0.5的概率水平翻转给定的PIL图像
    ])

    RandomErase_transform = Compose(
        [joint_transforms.RandomErase(probability=0.5, sh=0.4, r1=0.3)])

    spatial_transform = Compose([ToTensor(norm_value=norm_value),
                                 norm_method])  # 空间变换

    target_transform = Compose([ToTensor(norm_value=norm_value)])

    opt = [video_path, sample_duration]

    train_data = dataset.get_training_set(opt, reference_path,
                                          spatial_transform, target_transform,
                                          RandomCrop_transform,
                                          RandomHorizontalFlip_transform,
                                          RandomErase_transform)
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=batch_size,
        shuffle=True,
        num_workers=n_threads,
        pin_memory=True
    )  # 使用多进程加载的进程数 是否将数据保存在pin memory区,pin memory中的数据转到GPU会快一些
    validation_data = dataset.get_validation_set(opt, reference_path,
                                                 spatial_transform,
                                                 target_transform, None, None,
                                                 None)
    val_loader = torch.utils.data.DataLoader(validation_data,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=n_threads,
                                             pin_memory=True)

    inputs, targets = next(iter(train_loader))
    print('inputs.size(), inputs.min(), inputs.max()', inputs.size(),
          inputs.min(), inputs.max())
    print('targets.size(), targets.min(), targets.max():', targets.size(),
          targets.min(), targets.max())

    # every time we load weights, which may be slow
    model_cla = network_cla.P3D199(pretrained=True, num_classes=400)
    cla_dict = model_cla.state_dict()

    model = network_seg.P3D199()
    # model.apply(utils.weights_init)
    seg_dict = model.state_dict()

    pretrained_dict = {k: v for k, v in cla_dict.items() if k in seg_dict}
    seg_dict.update(pretrained_dict)
    model.load_state_dict(seg_dict)
    model.cuda()

    model = nn.DataParallel(model)

    commen_layers = [
        'conv1_custom', 'bn1', 'relu', 'maxpool', 'maxpool_2', 'layer1',
        'layer2', 'layer3'
    ]
    # seperate layers, to set different lr
    param_exist = []
    param_add = []
    for k, (name, module) in enumerate(model.named_children()):
        # existing layers
        if name in commen_layers:
            # print('existing layer: ', name)
            for param in module.parameters():
                param_exist.append(param)
        # adding layers
        else:
            # print('adding layer: ', name)
            for param in module.parameters():
                param_add.append(param)
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    optimizer = optim.Adam([{
        'params': param_exist,
        'lr': learn_rate * 0.1
    }, {
        'params': param_add
    }])
    criterion = nn.BCELoss().cuda()

    exp_dir = result_rp + exp_name
    ##!!! existing directory will be removed
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment(exp_name, result_rp)
    exp.init()

    for epoch in range(num_epochs):

        since = time.time()

        ### Train ###
        trn_loss = utils.train(model, train_loader, optimizer, criterion)
        print('Epoch {:d}: Train - Loss: {:.4f}'.format(epoch, trn_loss))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss = utils.test(model, val_loader, criterion)
        print('Val - Loss: {:.4f}'.format(val_loss))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss)
        exp.save_history('val', val_loss)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss)
        exp.save_optimizer(optimizer, val_loss)

        ## Early Stopping ##
        if (epoch - exp.best_val_loss_epoch) > max_patience:
            print(("Early stopping at epoch %d since no " +
                   "better loss found since epoch %.3").format(
                       epoch, exp.best_val_loss))
            break

        exp.epoch += 1
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--DATASET_PATH',
                        type=str,
                        default='/home/yangle/dataset/TrainNet/')
    parser.add_argument('--RESULTS_PATH',
                        type=str,
                        default='/home/yangle/result/TrainNet/results/')
    parser.add_argument('--WEIGHTS_PATH',
                        type=str,
                        default='/home/yangle/result/TrainNet/models/')
    parser.add_argument('--EXPERIMENT',
                        type=str,
                        default='/home/yangle/result/TrainNet/')
    parser.add_argument('--N_EPOCHS', type=int, default=150)
    parser.add_argument('--MAX_PATIENCE', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=10)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--N_CLASSES', type=int, default=2)
    parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
    parser.add_argument('--LR_DECAY', type=float, default=0.995)
    parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
    parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
    parser.add_argument('--CUDNN', type=bool, default=True)
    args = parser.parse_args()

    torch.cuda.manual_seed(args.seed)
    cudnn.benchmark = args.CUDNN

    normalize = transforms.Normalize(mean=saliency.mean, std=saliency.std)
    train_joint_transformer = transforms.Compose([
        #joint_transforms.JointRandomCrop(224),
        joint_transforms.JointResize(224),
        #joint_transforms.JointRandomSizedCrop(224),
        joint_transforms.JointRandomHorizontalFlip()
    ])

    train_dset = saliency.Saliency(args.DATASET_PATH,
                                   'train',
                                   joint_transform=train_joint_transformer,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       normalize,
                                   ]))
    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True)

    test_joint_transforms = transforms.Compose(
        [joint_transforms.JointResize(224)])
    val_dset = saliency.Saliency(args.DATASET_PATH,
                                 'val',
                                 joint_transform=test_joint_transforms,
                                 transform=transforms.Compose(
                                     [transforms.ToTensor(), normalize]))
    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False)

    # test_dset = saliency.Saliency(
    # 	args.DATASET_PATH, 'test', joint_transform=test_joint_transforms,
    # 	transform=transforms.Compose([
    # 		transforms.ToTensor(),
    # 		normalize
    # 	]))
    # test_loader = torch.utils.data.DataLoader(
    # 	test_dset, batch_size=args.batch_size, shuffle=False)

    print("TrainImages: %d" % len(train_loader.dataset.imgs))
    print("ValImages: %d" % len(val_loader.dataset.imgs))
    # print("TestImages: %d" % len(test_loader.dataset.imgs))

    example_inputs, example_targets = next(iter(train_loader))
    print("InputsBatchSize: ", example_inputs.size())
    print("TargetsBatchSize: ", example_targets.size())
    print("\nInput (size, max, min) ---")
    #input
    i = example_inputs[0]
    print(i.size())
    print(i.max())
    print(i.min())
    print("Target (size, max, min) ---")
    #target
    t = example_targets[0]
    print(t.size())
    print(t.max())
    print(t.min())

    model = tiramisu.FCDenseNet57(n_classes=args.N_CLASSES)
    model = model.cuda()
    #model = torch.nn.DataParallel(model).cuda()
    print('  + Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model.apply(utils.weights_init)
    #optimizer = optim.RMSprop(model.parameters(), lr=args.LEARNING_RATE, weight_decay=args.WEIGHT_DECAY)
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.LEARNING_RATE,
                              weight_decay=args.WEIGHT_DECAY,
                              eps=1e-12)
    criterion = nn.NLLLoss2d().cuda()

    exp_dir = args.EXPERIMENT + 'tiramisu_rms_prelu'
    if os.path.exists(exp_dir):
        shutil.rmtree(exp_dir)

    exp = experiment.Experiment('tiramisu_rms_prelu', args.EXPERIMENT)
    exp.init()

    START_EPOCH = exp.epoch
    END_EPOCH = START_EPOCH + args.N_EPOCHS

    for epoch in range(START_EPOCH, END_EPOCH):

        since = time.time()

        ### Train ###
        trn_loss, trn_err = utils.train(model, train_loader, optimizer,
                                        criterion, epoch)
        print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(
            epoch, trn_loss, trn_err))
        time_elapsed = time.time() - since
        print('Train Time {:.0f}m {:.0f}s'.format(time_elapsed // 60,
                                                  time_elapsed % 60))

        ### Test ###
        val_loss, val_err = utils.test(model, val_loader, criterion, epoch)
        print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
        time_elapsed = time.time() - since
        print('Total Time {:.0f}m {:.0f}s\n'.format(time_elapsed // 60,
                                                    time_elapsed % 60))

        ### Save Metrics ###
        exp.save_history('train', trn_loss, trn_err)
        exp.save_history('val', val_loss, val_err)

        ### Checkpoint ###
        exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
        exp.save_optimizer(optimizer, val_loss)

        # ## Early Stopping ##
        # if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
        # 	print(("Early stopping at epoch %d since no "
        # 		   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
        # 	break

        # lr_sche.step(val_loss)
        #### print learning rate ####
        # for param_group in optimizer.param_groups:
        # 	print(param_group['lr'])

        # Adjust Lr ###--old method
        utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY,
                                   optimizer, epoch,
                                   args.DECAY_LR_EVERY_N_EPOCHS)

        exp.epoch += 1
Exemple #9
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('--DATASET_PATH', type=str, default='/disk5/yangle/PAMI/dataset/fc-resnet/')
	parser.add_argument('--EXPERIMENT', type=str, default='/disk5/yangle/PAMI/result/LearnModel/')
	# parser.add_argument('--DATASET_PATH', type=str, default='/disk1/hpl/segmentation/dataset/')
	# parser.add_argument('--EXPERIMENT', type=str, default='/disk1/hpl/segmentation/model/model_baselinexin/')
	parser.add_argument('--N_EPOCHS', type=int, default=200)
	parser.add_argument('--MAX_PATIENCE', type=int, default=30)
	parser.add_argument('--batch_size', type=int, default=32)
	parser.add_argument('--seed', type=int, default=0)
	parser.add_argument('--N_CLASSES', type=int, default=2)
	parser.add_argument('--LEARNING_RATE', type=float, default=1e-4)
	parser.add_argument('--LR_DECAY', type=float, default=0.995)
	parser.add_argument('--DECAY_LR_EVERY_N_EPOCHS', type=int, default=1)
	parser.add_argument('--WEIGHT_DECAY', type=float, default=0.0001)
	parser.add_argument('--CUDNN', type=bool, default=True)
	args = parser.parse_args()

	torch.cuda.manual_seed(args.seed)
	cudnn.benchmark = args.CUDNN

	normalize = transforms.Normalize(mean=dataset.mean, std=dataset.std)
	train_joint_transformer = transforms.Compose([
		joint_transforms.JointResize(256),
		joint_transforms.JointRandomCrop(224),
		joint_transforms.JointRandomHorizontalFlip(),
        ])
	mask_size_list = [28, 28, 28, 56, 112]

	train_dset = dataset.Saliency(
		args.DATASET_PATH, 'TRain', train_joint_transformer, mask_size_list,
		transform=transforms.Compose([joint_transforms.RandomErasing_random(probability=0.5, sh=0.4, r1=0.3, ),
									  transforms.ToTensor(), normalize, ]))
	train_loader = torch.utils.data.DataLoader(
		train_dset, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size)

	test_joint_transforms_img = transforms.Compose([joint_transforms.JointResize(224)])
	val_dset = dataset.TestData(args.DATASET_PATH, 'VAl', test_joint_transforms_img,
								transform=transforms.Compose([transforms.ToTensor(), normalize]),
								target_transform=transforms.Compose([transforms.ToTensor()]))
	val_loader = torch.utils.data.DataLoader(
		val_dset, batch_size=args.batch_size, shuffle=False)

	print("TrainImages: %d" % len(train_loader.dataset.imgs))
	print("ValImages: %d" % len(val_loader.dataset.imgs))

	example_inputs, example_targets = next(iter(train_loader))
	print("InputsBatchSize: ", example_inputs.size())
	print("TargetsBatchSize: ", len(example_targets))
	print("\nInput (size, max, min) ---")
	# input
	i = example_inputs[0]
	print(i.size())
	print(i.max())
	print(i.min())
	print("Target (size, max, min) ---")
	# target
	for mask in example_targets:
		print(mask.size())
		print(mask.max())
		print(mask.min())

	resnet34 = torchvision.models.resnet34(pretrained=True)
	dict_resnet34 = resnet34.state_dict()
	model = SegNet.resnet34()
	# # initialize
	model.apply(utils.weights_init)
	SegNet_dict = model.state_dict()

	pretrained_dict = {k: v for k, v in dict_resnet34.items() if k in SegNet_dict}
	# for k in pretrained_dict:
	# 	print(k)
	SegNet_dict.update(pretrained_dict)
	model.load_state_dict(SegNet_dict)

	# seperate layers, to set different lr
	param_exist = []
	param_add = []
	for k, (name, module) in enumerate(model.named_children()):
		# existing layers including: conv1 bn1 relu maxpool
		# layer1 layer2 layer3 layer4
		if k < 8:
			for param in module.parameters():
				param_exist.append(param)
		# adding layers including: bottleneck skip3 skip2 skip1 skip0
		# conv_end_1 bn_end_1 salmap Sigmoid mask0 mask4 mask3 mask2 mask1
		else:
			for param in module.parameters():
				param_add.append(param)

	model = model.cuda()
	# model = torch.nn.DataParallel(model).cuda()

	print('  + Number of params: {}'.format(
		sum([p.data.nelement() for p in model.parameters()])))
	optimizer = optim.RMSprop([{'params': param_exist, 'lr': args.LEARNING_RATE*0.1},
						   {'params': param_add}], lr=args.LEARNING_RATE,
							  weight_decay=args.WEIGHT_DECAY, eps=1e-12)
	criterion = nn.NLLLoss().cuda()

	exp_dir = args.EXPERIMENT + 'test'
	if os.path.exists(exp_dir):
		shutil.rmtree(exp_dir)

	exp = experiment.Experiment('test', args.EXPERIMENT)
	exp.init()

	START_EPOCH = exp.epoch
	END_EPOCH = START_EPOCH + args.N_EPOCHS

	for epoch in range(START_EPOCH, END_EPOCH):

		since = time.time()

		# ### Train ###
		trn_loss, trn_err = utils.train(model, train_loader, optimizer, criterion, epoch)
		print('Epoch {:d}: Train - Loss: {:.4f}\tErr: {:.4f}'.format(epoch, trn_loss, trn_err))
		time_elapsed = time.time() - since
		print('Train Time {:.0f}m {:.0f}s'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Test ###
		val_loss, val_err = utils.test_score(model, val_loader)
		print('Val - Loss: {:.4f}, Error: {:.4f}'.format(val_loss, val_err))
		time_elapsed = time.time() - since
		print('Total Time {:.0f}m {:.0f}s\n'.format(
			time_elapsed // 60, time_elapsed % 60))

		### Save Metrics ###
		exp.save_history('train', trn_loss, trn_err)
		exp.save_history('val', val_loss, val_err)

		### Checkpoint ###
		exp.save_weights(model, trn_loss, val_loss, trn_err, val_err)
		exp.save_optimizer(optimizer, val_loss)

		## Early Stopping ##
		if (epoch - exp.best_val_loss_epoch) > args.MAX_PATIENCE:
			print(("Early stopping at epoch %d since no "
				   +"better loss found since epoch %.3").format(epoch, exp.best_val_loss))
			break

		# Adjust Lr ###--old method
		utils.adjust_learning_rate(args.LEARNING_RATE, args.LR_DECAY, optimizer,
							 epoch, args.DECAY_LR_EVERY_N_EPOCHS)

		exp.epoch += 1