Пример #1
0
def main():
	args = parser.parse_args()
	for arg in vars(args):
		print(arg, " : ", getattr(args, arg))
	
	augment = not args.no_augment
	train_loader, val_loader = train_util.load_data(
		args.dataset, 
		args.batch_size, 
		dataset_path=args.data_dir,
		augment=augment)
	
	print("=> creating model '{}'".format(args.arch))
	model_args = {
		"num_classes": 10 if args.dataset == "cifar10" else 100	
	}
	model = models.__dict__[args.arch](**model_args)
	print("Device count", torch.cuda.device_count())
	if args.parallel:
		model = nn.DataParallel(model)

	print('Number of model parameters: {}'.format(
		sum([p.data.nelement() for p in model.parameters()])))

	model = model.cuda()

	cudnn.benchmark = True

	criterion = nn.CrossEntropyLoss().cuda()
	optim_hparams = {
		'base_lr' : args.lr, 
		'momentum' : args.momentum,
		'weight_decay' : args.weight_decay,
		'optim_type' : args.optim_type
	}

	lr_hparams = {
		'lr_sched' : args.lr_sched, 
		'use_iter': args.train_by_iters}

	lr_hparams['iters_per_epoch'] = args.iters_per_epoch if args.iters_per_epoch else 391

	inner_lr_hparams = {
		'lr_sched' : args.inner_anneal,
		'use_iter' : args.train_by_iters}

	inner_lr_hparams['iters_per_epoch'] = args.iters_per_epoch if args.iters_per_epoch else 391

	optimizer = optim_util.create_optimizer(
		model,
		optim_hparams)

	curr_iter = args.start_iter
	epoch = args.start_epoch

	best_val = 0

	inner_opt = optim_util.one_step_optim(
		model, args.inner_lr)
	while True:
		model.train()
		train_acc = train_util.AverageMeter()
		train_loss = train_util.AverageMeter()
		timer = train_util.AverageMeter()
		for i, (input_data, target) in enumerate(train_loader):
					
			lr = lr_util.adjust_lr(
				optimizer,
				epoch,
				curr_iter,
				args.lr,
				lr_hparams)

			inner_lr = lr_util.adjust_lr(
				inner_opt,
				epoch,
				curr_iter,
				args.inner_lr,
				inner_lr_hparams)

			target = target.cuda(non_blocking=True)
			input_data = input_data.cuda()

			update_hparams = {
				'update_type' : args.update_type.split('zero_switch_')[-1],
				'inner_lr' : inner_lr[0],
				'use_bn' : not args.no_bn,
				'label_noise' : 0,
				'use_norm_one' : args.use_norm_one
			}

			if args.label_noise > 0:
				label_noise = train_util.label_noise_sched(
					args.label_noise, 
					epoch, 
					curr_iter, 
					args.train_by_iters, 
					args.ln_sched, 
					iters_per_epoch=args.iters_per_epoch,
					ln_decay=args.ln_decay)
				if args.update_type != 'mean_zero_label_noise' or args.also_flip_labels:
					# if it is equal, we don't want to flip the labels
					target = train_util.apply_label_noise(
						target,
						label_noise,
						num_classes=10 if args.dataset == 'cifar10' else 100)

				update_hparams['label_noise'] = label_noise
				
			loss, output, time_taken = update_loss_util.update_step(
				criterion,
				optimizer,
				model,
				input_data,
				target,
				update_hparams)

			prec1 = accuracy(output.data, target, topk=(1,))[0]
			train_loss.update(loss, target.size(0))
			train_acc.update(prec1, target.size(0))
			timer.update(time_taken, 1)
			avg_loss = train_loss.avg
			avg_acc = train_acc.avg

			loss_str = 'Loss '
			loss_str += '{:.4f} (standard)\t'.format(avg_loss)


			if i % args.print_freq == 0:
				log_str = ('Epoch: [{0}][{1}/{2}]\t'

				  'Time {3:.3f}\t {4}'
				  'Prec@1 {5:.3f})').format(
					  epoch, i, len(train_loader), timer.avg, loss_str, avg_acc)
				print(log_str)

			curr_iter += 1

		print("Validating accuracy.")
		val_acc, val_loss = train_util.validate(
			val_loader,
			model,
			criterion,
			epoch,
			print_freq=args.print_freq)

		is_best = val_acc > best_val
		best_val = val_acc if is_best else best_val

		print('Best accuracy: ', best_val)

		epoch += 1
		if args.train_by_iters:
			if curr_iter > args.iters:
				break
		else:
			if epoch > args.epochs:
				break
Пример #2
0
def main():
    args = parser.parse_args()
    for arg in vars(args):
        print(arg, " : ", getattr(args, arg))
    save_str = "arch_%s_reg_%s" % (args.arch, args.reg_type)
    save_dir = os.path.join(args.save_dir, save_str)

    train_loader, val_loader = data_util.load_data(
        args.batch_size,
        args.dataset,
        data_path=args.data_dir,
        corrupt_prob=args.corrupt_prob,
        augment=args.augment)

    print("=> creating model '{}'".format(args.arch))
    model_args = {"num_classes": 10 if args.dataset == "cifar10" else 100}

    model = models.__dict__[args.arch](**model_args)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_val = checkpoint['best_val']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().cuda()
    optim_hparams = {
        'base_lr': args.lr,
        'momentum': args.momentum,
        'weight_decay': args.weight_decay
    }
    lr_hparams = {'lr_sched': args.lr_sched}

    optimizer = train_util.create_optimizer(model, optim_hparams)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    save_util.write_args(args, save_dir)
    scalar_summary_file = os.path.join(save_dir, "scalars.txt")
    data_dict_file = os.path.join(save_dir, "data_dict.pkl")
    scalar_dict = {}
    best_val = 0
    all_dict = {}

    for epoch in range(args.start_epoch, args.epochs):
        lr = train_util.adjust_lr(optimizer, epoch + 1, args.lr, lr_hparams)

        reg_type = 'none' if epoch + 1 < args.switch_time else args.reg_type

        train_hparams = {
            "inner_lr": args.inner_lr,
            "inner_step": args.inner_steps,
            "lr": lr,
            "reg_type": reg_type,
            "inner_wd": args.inner_wd
        }

        train_acc, train_loss, perturb_acc = train_util.train_loop(
            train_loader,
            model,
            criterion,
            optimizer,
            epoch,
            train_hparams,
            print_freq=args.print_freq)

        print("Validating clean accuracy.")
        val_acc, val_loss = train_util.validate(val_loader,
                                                model,
                                                criterion,
                                                epoch,
                                                print_freq=args.print_freq)

        scalar_epoch = {
            "lr": lr,
            "inner_lr": args.inner_lr,
            "inner_steps": args.inner_steps,
            "train_loss": train_loss,
            "train_acc": train_acc,
            "val_loss": val_loss,
            "val_acc": val_acc,
            "perturb_acc": perturb_acc,
            "reg_type": reg_type
        }

        scalar_dict[epoch + 1] = scalar_epoch

        save_util.log_scalar_file(scalar_epoch, epoch + 1, scalar_summary_file)

        is_best = val_acc > best_val
        best_val = max(val_acc, best_val)

        save_util.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_val': best_val,
            }, scalar_dict, is_best, save_dir)

        print('Best accuracy: ', best_val)
Пример #3
0
def main():
    args = parser.parse_args()
    for arg in vars(args):
        print(arg, " : ", getattr(args, arg))
    timestamp = datetime.utcnow().strftime("%H_%M_%S_%f-%d_%m_%y")
    save_str = "arch_%s_reg_%s_%s" % (args.arch, args.reg_type, timestamp)
    save_dir = os.path.join(args.save_dir, save_str)

    augment = not args.no_augment
    train_loader, val_loader = train_util.load_data(args.dataset,
                                                    args.batch_size,
                                                    dataset_path=args.data_dir,
                                                    augment=augment)

    print("=> creating model '{}'".format(args.arch))
    model_args = {"num_classes": 10 if args.dataset == "cifar10" else 100}
    model = models.__dict__[args.arch](**model_args)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    if args.resume:
        model_file = os.path.join(args.resume, "checkpoint.pth.tar")
        model = save_util.load_model_sdict(model, model_file)

    model = model.cuda()

    cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().cuda()
    optim_hparams = {
        'base_lr': args.lr,
        'momentum': args.momentum,
        'weight_decay': args.weight_decay
    }
    lr_hparams = {'lr_sched': args.lr_sched}
    optimizer = train_util.create_optimizer(model, optim_hparams)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    save_util.write_args(args, save_dir)
    scalar_summary_file = os.path.join(save_dir, "scalars.txt")
    scalar_dict = {}
    best_val = 0
    best_epoch = -1
    for epoch in range(args.start_epoch, args.epochs):
        lr = train_util.adjust_lr(optimizer, epoch + 1, args.lr, lr_hparams)

        train_hparams = {
            "reg_type": args.reg_type,
            "data_reg": args.data_reg,
            "j_thresh": args.j_thresh
        }

        train_acc, train_loss = train_util.train_loop(
            train_loader,
            model,
            criterion,
            optimizer,
            epoch,
            train_hparams,
            print_freq=args.print_freq)

        val_acc, val_loss = train_util.validate(val_loader,
                                                model,
                                                criterion,
                                                epoch,
                                                print_freq=args.print_freq)

        is_best = val_acc > best_val
        best_val = max(val_acc, best_val)
        if is_best:
            best_epoch = epoch + 1

        scalar_epoch = {
            "lr": lr,
            "train_loss": train_loss,
            "train_acc": train_acc,
            "val_loss": val_loss,
            "val_acc": val_acc,
            "best_val": best_val,
            "best_epoch": best_epoch
        }
        scalar_dict[epoch + 1] = scalar_epoch

        save_util.log_scalar_file(scalar_epoch, epoch + 1, scalar_summary_file)

        save_util.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_val': best_val,
            }, scalar_dict, is_best, save_dir)

        print('Best accuracy: ', best_val)
Пример #4
0
def train_model(model_specs, save_dir, train_loader, val_clean_loader,
                val_aug_loader, transform_only_loader):
    print(
        "***************************************************************************"
    )
    print("Training model:", model_specs["name"])
    model_args = {"num_classes": 10}
    model = models.__dict__['wideresnet16'](**model_args)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().cuda()
    optim_hparams = {
        'base_lr': model_specs['lr'],
        'momentum': 0.9,
        'weight_decay': 5e-4,
        'lr_type': 'default'
    }
    lr_hparams = {'lr_sched': model_specs['lr_sched']}
    optimizer = train_util.create_optimizer(model, optim_hparams)

    save_dir = os.path.join(save_dir, model_specs["name"])
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    scalar_summary_file = os.path.join(save_dir, "scalars.txt")
    scalar_dict = {}
    best_val = 0

    for epoch in range(model_specs['epochs']):
        lr = train_util.adjust_lr(optimizer, epoch + 1, model_specs['lr'],
                                  lr_hparams)

        train_hparams = {
            "reg_type":
            model_specs['reg_type'],
            "noise_level":
            train_util.adjust_act_noise(model_specs['act_noise_decay'],
                                        model_specs['act_noise_decay_rate'],
                                        model_specs['act_noise'], epoch + 1)
        }

        train_acc, train_loss = train_util.train_loop(train_loader,
                                                      model,
                                                      criterion,
                                                      optimizer,
                                                      epoch,
                                                      train_hparams,
                                                      print_freq=10)

        print("Validating clean accuracy.")
        val_clean_acc, val_clean_loss = train_util.validate(val_clean_loader,
                                                            model,
                                                            criterion,
                                                            epoch,
                                                            print_freq=10)

        print("Validating mixed accuracy")
        val_aug_acc, val_aug_loss = train_util.validate(val_aug_loader,
                                                        model,
                                                        criterion,
                                                        epoch,
                                                        print_freq=10)

        print("Validating additional transforms only.")
        t_acc, t_loss = train_util.validate(transform_loader,
                                            model,
                                            criterion,
                                            epoch,
                                            print_freq=2)

        scalar_epoch = {
            "lr": lr,
            "train_loss": train_loss,
            "train_acc": train_acc,
            "val_clean_loss": val_clean_loss,
            "val_clean_acc": val_clean_acc,
            "val_aug_loss": val_aug_loss,
            "val_aug_acc": val_aug_acc,
            "patch_acc": t_acc
        }

        scalar_dict[epoch + 1] = scalar_epoch

        save_util.log_scalar_file(scalar_epoch, epoch + 1, scalar_summary_file)

        save_util.make_scalar_plots(scalar_dict, save_dir)

        is_best = val_aug_acc > best_val
        best_val = max(val_aug_acc, best_val)

        save_util.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_val': best_val,
            }, scalar_dict, is_best, save_dir)

        print('Best accuracy: ', best_val)
Пример #5
0
def main():
    args = parser.parse_args()
    for arg in vars(args):
        print(arg, " : ", getattr(args, arg))
    timestamp = datetime.utcnow().strftime("%H_%M_%S_%f-%d_%m_%y")
    save_str = "arch_%s_reg_%s_%s" % (args.arch, args.reg_type, timestamp)
    save_dir = os.path.join(args.save_dir, save_str)

    augment = not args.no_augment
    train_loader, val_loader = train_util.load_data(args.dataset,
                                                    args.batch_size,
                                                    dataset_path=args.data_dir,
                                                    augment=augment)

    print("=> creating model '{}'".format(args.arch))
    model_args = {"num_classes": 10 if args.dataset == "cifar10" else 100}
    if args.reg_type == 'dropout':
        print("Using dropout.")
        model_args['dropRate'] = args.dropout
    model = models.__dict__[args.arch](**model_args)

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_val = checkpoint['best_val']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().cuda()
    optim_hparams = {
        'base_lr': args.lr,
        'momentum': args.momentum,
        'weight_decay': args.weight_decay
    }
    lr_hparams = {'lr_sched': args.lr_sched}
    optimizer = train_util.create_optimizer(model, optim_hparams)

    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    save_util.write_args(args, save_dir)
    scalar_summary_file = os.path.join(save_dir, "scalars.txt")
    scalar_dict = {}
    best_val = 0
    all_dict = {}

    for epoch in range(args.start_epoch, args.epochs):
        lr = train_util.adjust_lr(optimizer, epoch + 1, args.lr, lr_hparams)

        train_hparams = {
            "reg_type":
            args.reg_type,
            "noise_level":
            train_util.adjust_act_noise(args.act_noise_decay,
                                        args.act_noise_decay_rate,
                                        args.act_noise, epoch + 1)
        }

        train_acc, train_loss = train_util.train_loop(
            train_loader,
            model,
            criterion,
            optimizer,
            epoch,
            train_hparams,
            print_freq=args.print_freq)

        print("Validating accuracy.")
        val_acc, val_loss = train_util.validate(val_loader,
                                                model,
                                                criterion,
                                                epoch,
                                                print_freq=args.print_freq)

        scalar_epoch = {
            "lr": lr,
            "train_loss": train_loss,
            "train_acc": train_acc,
            "val_loss": val_loss,
            "val_acc": val_acc
        }

        scalar_dict[epoch + 1] = scalar_epoch

        save_util.log_scalar_file(scalar_epoch, epoch + 1, scalar_summary_file)

        is_best = val_acc > best_val
        best_val = max(val_acc, best_val)

        save_util.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_val': best_val,
            }, scalar_dict, is_best, save_dir)

        print('Best accuracy: ', best_val)
Пример #6
0
def run(args, train_data, val_data, test_data):
    tf.set_random_seed(1234)
    np.random.seed(1234)
    random.seed(1234)

    print('\n==== PARAMS ====')
    for arg in vars(args):
        print('{}={}'.format(arg, getattr(args, arg)))
    print('========\n')

    if args.exp_type == 'ours':
        net = Network(train_data.n_points, train_data.n_dim,
                      test_data.n_seg_ids, args.K, args.batch_size,
                      args.init_learning_rate, args.decay_step,
                      args.decay_rate, args.bn_decay_step,
                      args.l21_norm_weight, args.net_options)
    elif args.exp_type == 'sem_seg':
        print("## Sementic Segmentation ##")
        net = NetworkSemSeg(train_data.n_points, train_data.n_dim,
                            train_data.n_labels, args.batch_size,
                            args.init_learning_rate, args.decay_step,
                            args.decay_rate, args.bn_decay_step,
                            args.net_options)
    else:
        assert (False)

    config = tf.ConfigProto()
    config.allow_soft_placement = True
    config.gpu_options.allow_growth = True

    with tf.Session(config=config, graph=net.graph) as sess:
        sess.run(tf.global_variables_initializer(), {net.is_training: True})

        if args.in_model_dirs:
            include = ''
            for in_model_dir in args.in_model_dirs.split(','):
                assert (load_model(sess, in_model_dir, include))

        if args.train:
            train(sess,
                  net,
                  args.exp_type,
                  train_data,
                  val_data,
                  n_epochs=args.n_epochs,
                  snapshot_epoch=args.snapshot_epoch,
                  validation_epoch=args.validation_epoch,
                  model_dir=args.out_model_dir,
                  log_dir=args.log_dir,
                  data_name=train_data.name,
                  output_generator=None)

        train_loss, _ = validate(sess, net, args.exp_type, train_data)
        test_loss, _ = validate(sess, net, args.exp_type, test_data)

        msg = "|| Train Loss: {:6f}".format(train_loss)
        msg += " | Test Loss: {:6f}".format(test_loss)
        msg += " ||"
        print(msg)

        if args.train:
            # Save training result.
            if not os.path.exists(args.out_dir): os.makedirs(args.out_dir)
            out_file = os.path.join(
                args.out_dir,
                '{}.txt'.format(datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
            with open(out_file, 'w') as f:
                f.write(msg + '\n')
            print("Saved '{}'.".format(out_file))

        if args.exp_type == 'ours':
            if 'eval' in args.eval_type:
                evaluate.evaluate(sess, net, test_data, args.out_dir)
            if 'eval_keypoints' in args.eval_type:
                evaluate_keypoints.evaluate(sess, net, test_data, args.out_dir)
            if 'eval_obj_det' in args.eval_type:
                evaluate_obj_det.evaluate(sess, net, test_data, args.out_dir)
            if 'save_dict' in args.eval_type:
                P = test_data.point_clouds
                A = predict_A(P, sess, net)
                out_file = os.path.join(args.out_dir, 'dictionary.npy')
                np.save(out_file, A)
                print("Saved '{}'".format(out_file))
        elif args.exp_type == 'sem_seg':
            evaluate_sem_seg.evaluate(sess, net, test_data, args.out_dir)