コード例 #1
0
    assert (0 <= FLAGS.eps and FLAGS.eps < 0.5)
    print("== Noise that will be used for ALINK: %s ==" % (FLAGS.noise))

    # Set X_dig_post for finetuning second version of model
    if FLAGS.split_ratio > 0:
        (X_dig_pre,
         X_dig_post) = readDFW.splitDisguiseData(X_dig_raw,
                                                 pre_ratio=FLAGS.split_ratio)
    elif FLAGS.split_ratio == 1:
        X_dig_pre = X_dig_raw
    else:
        X_dig_post = X_dig_raw

    # Construct ensemble of models
    ensemble = [
        siamese.SiameseNetwork(GlobalConstants.feature_res,
                               FLAGS.ensemble_basepath + str(i), 1e-1)
        for i in range(1, FLAGS.num_ensemble_models + 1)
    ]

    # Ready low-resolution model
    lowResModel = siamese.SmallRes(GlobalConstants.low_res + (3, ),
                                   GlobalConstants.feature_res,
                                   FLAGS.lowres_basemodel + str(FLAGS.lowRes),
                                   1e-1)

    # Prepare required noises
    desired_noises = FLAGS.noise.split(',')
    ensembleNoise = [
        noise.get_relevant_noise(x)(model=lowResModel,
                                    sess=GlobalConstants.sess,
                                    feature_model=None) for x in desired_noises
コード例 #2
0
ファイル: train.py プロジェクト: klekkala/deep_stuff
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)
    # create model
    print("=> creating model '{}'".format(args.arch))
    print("=> creating model '{}'".format(args.arch))
    if args.arch.startswith('siamese'):
        model = siamese.SiameseNetwork()
        print(model)
    model = torch.nn.DataParallel(model).cuda()
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True
    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.451, 0.390, 0.348],
                                     std=[0.357, 0.350, 0.347])

    siamese_train = dataset.SiameseNetworkDataset(
        imageFolderDataset=args.data,
        csvfile=args.traincsv,
        transform=transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(siamese_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    siamese_val = dataset.SiameseNetworkDataset(imageFolderDataset=args.data,
                                                csvfile=args.valcsv,
                                                transform=transforms.Compose([
                                                    transforms.Scale(256),
                                                    transforms.CenterCrop(224),
                                                    transforms.ToTensor(),
                                                    normalize,
                                                ]))
    val_loader = torch.utils.data.DataLoader(siamese_val,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = loss.PLoss()

    #optimizer = torch.optim.Adam(model.parameters(), args.lr,
    #momentum=args.momentum,
    #weight_decay=args.weight_decay)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        #prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = best_prec1
        best_prec1 = max(best_prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best, args.arch.lower())
コード例 #3
0
	assert(0 <= FLAGS.split_ratio and FLAGS.split_ratio <= 1)
	assert(0 <= FLAGS.disparity_ratio and FLAGS.disparity_ratio <= 1)
	assert(0 <= FLAGS.eps and FLAGS.eps < 0.5)
	print("Noise that will be used for ALINK: %s" % (FLAGS.noise))

	# Set X_dig_post for finetuning second version of model
	if FLAGS.split_ratio > 0:
		(X_dig_pre, _)  = readDFW.splitDisguiseData(X_dig, pre_ratio=FLAGS.split_ratio)
		(_, X_dig_post) = readDFW.splitDisguiseData(X_dig_raw, pre_ratio=FLAGS.split_ratio)
	elif FLAGS.split_ratio == 1:
		X_dig_pre = X_dig_raw
	else:
		X_dig_post = X_dig_raw

	# Ready disguised face model
	disguisedFacesModel = siamese.SiameseNetwork(FEATURERES, FLAGS.disguised_basemodel, 0.1)

	# Prepare required noises
	desired_noises = FLAGS.noise.split(',')
	ensembleNoise = [noise.get_relevant_noise(x)(model=disguisedFacesModel, sess=sess, feature_model=conversionModel) for x in desired_noises]

	# Construct ensemble of models
	ensemble = [siamese.SiameseNetwork(FEATURERES, FLAGS.ensemble_basepath + str(i), 0.1) for i in range(1, FLAGS.num_ensemble_models+1)]
	bag = committee.Bagging(ensemble, ensembleNoise)

	if FLAGS.train_disguised_model:
		# Create generators for disguised model
		normGen = readDFW.getNormalGenerator(X_dig_pre, FLAGS.batch_size)
		normImpGen = readDFW.getNormalGenerator(X_imp, FLAGS.batch_size)
		impGenNorm  = readDFW.getImposterGenerator(X_dig_pre, X_imp, FLAGS.batch_size)
コード例 #4
0
ファイル: main.py プロジェクト: rifkiaputri/Relation-Aligner
def main():
    # Load parameters
    args = params.args()
    
    # Load train, valid, and test data
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Loading dataset')
    train_dataset = dt.MyDataset(args.train_filename, args.mode)
    valid_dataset = dt.MyDataset(args.valid_filename, args.mode)
    test_dataset = dt.MyDataset(args.test_filename, args.mode)
    gold_dataset = dt.MyDataset(args.gold_filename, args.mode)
    print('train, valid, test num:', len(train_dataset), len(valid_dataset), len(test_dataset))
    
    # Load dataset to DataLoader
    train_loader = DataLoader(dataset=train_dataset, batch_size=args.BATCH_SIZE, shuffle=True)
    valid_loader = DataLoader(dataset=valid_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    gold_loader = DataLoader(dataset=gold_dataset, batch_size=args.BATCH_SIZE, shuffle=False)
    
    # Initialize model
    model = siamese.SiameseNetwork(args)
    model.to(device)
    
    # Train model
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start training')
    try:
        train(model, train_loader, valid_loader, args)
    except KeyboardInterrupt:
        print('\n' + '-' * 89)
        print('Exit from training early')
        
    # Save final model
    save(model, args.save_dir, args.model_filename, -1)
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Training finished')
    
    
    # Test model
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start prediction')
    predict = test(test_loader, model, args)
        
    pred_filename = args.predict_dir + '/predict_result.tsv'
    with open(pred_filename, 'w') as f:
        for item in predict:
            f.write(item[0] + '\t' + item[1] + '\t' + str(item[2]) + '\t' + str(item[3]) + '\n')
    f.closed
    print('Successfully save prediction result to', pred_filename)
    
    with open(args.predict_dir + '/rel_embed_vector.tsv', 'w') as f:
        for item in predict:
            out1 = item[5].cpu().numpy().tolist()
            f.write('\t'.join(str(x) for x in out1))
            f.write('\n')
    f.closed
    
    with open(args.predict_dir + '/rel_embed_label.tsv', 'w') as f:
        for item in predict:
            f.write(item[1])
            f.write('\n')
    f.closed
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Prediction finished')
    
    
    # Gold Prediction
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Start gold prediction')
    predict = test(gold_loader, model, args)
        
    pred_filename = args.gold_dir + '/predict_result.tsv'
    with open(pred_filename, 'w') as f:
        for item in predict:
            f.write(item[0] + '\t' + item[1] + '\t' + str(item[2]) + '\t' + str(item[3]) + '\n')
    f.closed
    print('Successfully save prediction result to', pred_filename)
    
    with open(args.gold_dir + '/rel_embed_vector.tsv', 'w') as f:
        for item in predict:
            out1 = item[5].cpu().numpy().tolist()
            f.write('\t'.join(str(x) for x in out1))
            f.write('\n')
    f.closed
    
    with open(args.gold_dir + '/rel_embed_label.tsv', 'w') as f:
        for item in predict:
            f.write(item[1])
            f.write('\n')
    f.closed
    print('[' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '] Gold prediction finished')
コード例 #5
0
import tensorflow as tf
import keras

# Don't hog GPU
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
keras.backend.set_session(sess)


if __name__ == "__main__":
	import sys
	if len(sys.argv) < 2:
		print("python " + sys.argv[0] + " modelName outputFilePath")
	disguisedFacesModel = siamese.SiameseNetwork((2048,), sys.argv[1], 0.1)
	if disguisedFacesModel.maybeLoadFromMemory():
		print("Loaded model successfully!")
	else:
		print("Oops! Model not found")
		exit()
	features = np.load("processedData.npy")
	scores = []
	assert(features.shape[0] == 7771)
	for i in tqdm(range(len(features))):
		X_left, X_right = [], []
		for x in features:
			X_left.append(features[i])
			X_right.append(x)
		numbers = [ out[0] for out in disguisedFacesModel.predict([np.stack(X_left), np.stack(X_right)])]
		scores.append(numbers)