def compute_features(left_image, right_image, patch_height, patch_width, checkpoint):

    height, width = left_image.shape[:2]

    # pad images to make the final feature map size = (height, width..)
    auged_left_image = np.zeros([1, height+patch_height-1, width+patch_width-1, 1], dtype=np.float32)
    auged_right_image = np.zeros([1, height+patch_height-1, width+patch_width-1, 1], dtype=np.float32)
    row_start = (patch_height - 1)/2
    col_start = (patch_width - 1)/2
    auged_left_image[0, row_start: row_start+height, col_start: col_start+width] = left_image
    auged_right_image[0, row_start: row_start+height, col_start: col_start+width] = right_image

    # TF placeholder for graph input
    x = tf.placeholder(tf.float32, shape=[1, height+patch_height-1, width+patch_width-1, 1])  

    # Initialize model
    model = NET(x, input_patch_size = patch_height, batch_size=1)
    saver = tf.train.Saver(max_to_keep=10)

    features = model.features

    # compute features on both images
    with tf.Session(config=tf.ConfigProto(
                        log_device_placement=False, \
                        allow_soft_placement=True, \
                        gpu_options=tf.GPUOptions(allow_growth=True))) as sess:

        print "{}: restoring from {}...".format(datetime.now(), checkpoint)
        saver.restore(sess, checkpoint)

        print "{}: features computing...".format(datetime.now())
        '''
        # this is used when a whole image is too big to fit in the memory
        featureslul = sess.run(features, feed_dict = {x: auged_left_image[:, 0: height/2+patch_height-1, 0: width/2+patch_width-1]}) 
        featureslur = sess.run(features, feed_dict = {x: auged_left_image[:, 0: height/2+patch_height-1, width/2: width+patch_width-1]}) 
        featureslbl = sess.run(features, feed_dict = {x: auged_left_image[:, height/2: height+patch_height-1, 0: width/2+patch_width-1]}) 
        featureslbr = sess.run(features, feed_dict = {x: auged_left_image[:, height/2: height+patch_height-1, width/2: width+patch_width-1]}) 

        featuresrul = sess.run(features, feed_dict = {x: auged_right_image[:, 0: height/2+patch_height-1, 0: width/2+patch_width-1]}) 
        featuresrur = sess.run(features, feed_dict = {x: auged_right_image[:, 0: height/2+patch_height-1, width/2: width+patch_width-1]}) 
        featuresrbl = sess.run(features, feed_dict = {x: auged_right_image[:, height/2: height+patch_height-1, 0: width/2+patch_width-1]}) 
        featuresrbr = sess.run(features, feed_dict = {x: auged_right_image[:, height/2: height+patch_height-1, width/2: width+patch_width-1]}) 

        featuresl = np.concatenate((np.concatenate((featureslul, featureslur), axis=2), np.concatenate((featureslbl, featureslbr), axis=2)), axis=1)
        featuresr = np.concatenate((np.concatenate((featuresrul, featuresrur), axis=2), np.concatenate((featuresrbl, featuresrbr), axis=2)), axis=1)
        '''

        featuresl = sess.run(features, feed_dict = {x: auged_left_image}) 
        featuresr = sess.run(features, feed_dict = {x: auged_right_image}) 
        print featuresl.shape

        featuresl = np.squeeze(featuresl, axis=0)
        featuresr = np.squeeze(featuresr, axis=0) # (height, width, 64)
        print "{}: features computed done...".format(datetime.now())

    # clear the used gpu memory
    tf.reset_default_graph()

    return featuresl, featuresr
Beispiel #2
0
def compute_features(left_image, right_image):
    # pad images to make the final feature map size = (height, width..)
    auged_left_image = np.zeros([1, height+patch_height-1, width+patch_width-1, 3], dtype=np.float32)
    auged_right_image = np.zeros([1, height+patch_height-1, width+patch_width-1, 3], dtype=np.float32)
    row_start = (patch_height - 1)/2
    col_start = (patch_width - 1)/2
    auged_left_image[0, row_start: row_start+height, col_start: col_start+width] = left_image
    auged_right_image[0, row_start: row_start+height, col_start: col_start+width] = right_image
    # quarter size
    # TF placeholder for graph input
    #x = tf.placeholder(tf.float32, shape=[1, height/2+patch_height-1, width/2+patch_width-1, 3])  
    x = tf.placeholder(tf.float32, shape=[1, height+patch_height-1, width+patch_width-1, 3])  

    # Initialize model
    model = NET(x, batch_size=1)
    saver = tf.train.Saver()

    # Link variable to model output
    features = model.features
    #assert features.shape == (1, height/2, width/2, 112)
    assert features.shape == (1, height, width, 112)

    # compute features on both images
    with tf.Session(config=tf.ConfigProto(log_device_placement=False, \
            allow_soft_placement=True)) as sess:
            #gpu_options=tf.GPUOptions(allow_growth=True))) as sess:

        restore_path = os.path.join(old_checkpoint_path, 'model_epoch%d.ckpt'%(restore_epoch))
        print "{}: restoring from {}...".format(datetime.now(), restore_path)
        saver.restore(sess, restore_path)

        print "{}: features computing...".format(datetime.now())
        '''
        featureslul = sess.run(features, feed_dict = {x: auged_left_image[:, 0: height/2+patch_height-1, 0: width/2+patch_width-1]}) 
        featureslur = sess.run(features, feed_dict = {x: auged_left_image[:, 0: height/2+patch_height-1, width/2: width+patch_width-1]}) 
        featureslbl = sess.run(features, feed_dict = {x: auged_left_image[:, height/2: height+patch_height-1, 0: width/2+patch_width-1]}) 
        featureslbr = sess.run(features, feed_dict = {x: auged_left_image[:, height/2: height+patch_height-1, width/2: width+patch_width-1]}) 

        featuresrul = sess.run(features, feed_dict = {x: auged_right_image[:, 0: height/2+patch_height-1, 0: width/2+patch_width-1]}) 
        featuresrur = sess.run(features, feed_dict = {x: auged_right_image[:, 0: height/2+patch_height-1, width/2: width+patch_width-1]}) 
        featuresrbl = sess.run(features, feed_dict = {x: auged_right_image[:, height/2: height+patch_height-1, 0: width/2+patch_width-1]}) 
        featuresrbr = sess.run(features, feed_dict = {x: auged_right_image[:, height/2: height+patch_height-1, width/2: width+patch_width-1]}) 

        featuresl = np.concatenate((np.concatenate((featureslul, featureslur), axis=2), np.concatenate((featureslbl, featureslbr), axis=2)), axis=1)
        featuresr = np.concatenate((np.concatenate((featuresrul, featuresrur), axis=2), np.concatenate((featuresrbl, featuresrbr), axis=2)), axis=1)
        '''

        featuresl = sess.run(features, feed_dict = {x: auged_left_image}) 
        featuresr = sess.run(features, feed_dict = {x: auged_right_image}) 

        featuresl = np.squeeze(featuresl, axis=0)
        featuresr = np.squeeze(featuresr, axis=0) # (height, width, 112)
        assert featuresl.shape == (height, width, 112)
        assert featuresr.shape == (height, width, 112)
        print "{}: features computed done...".format(datetime.now())
    tf.reset_default_graph()
    return featuresl, featuresr
def load_weights(model_path, manipulate_weights, i, device, noise_state_dict):

    model = NET()
    #print(model_path)
    model.to(device)

    state_dict = torch.load(model_path)
    if manipulate_weights:
        for layer in state_dict.keys():
            if layer.find("lin") != -1:
                state_dict[layer] += noise_state_dict[layer] * i

            if layer.find("conv") != -1:
                state_dict[layer] += noise_state_dict[layer] * i
                #manipulate_conv(state_dict[layer], i)
    model.load_state_dict(state_dict)
    return model.to(device=device, dtype=torch.double)
Beispiel #4
0
def test(args, shared_model, dataset, targets, log):
    start_time = time.time()
    log.info('Test time ' +
             time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() -
                                                      start_time)) + ', ' +
             'Start testing.')
    local_model = NET()
    local_model.load_state_dict(shared_model.state_dict())
    if args.gpu:
        local_model = local_model.cuda()

    correct_cnt = 0
    predictions = np.zeros([targets.shape[0]], dtype=np.int64)

    for idx in range(targets.shape[0]):
        data = dataset[idx]
        data = Variable(torch.from_numpy(data))
        if args.gpu:
            data = data.cuda()

        target = targets[idx]
        output = local_model(data)
        if args.gpu:
            output = output.cpu()
        predict_class = output.max(0)[1].data.numpy()[0]
        predictions[idx] = predict_class
        if target == predict_class:
            correct_cnt += 1
        # else:
        #     print(predict_class)

        # if (idx + 1) % 100 == 0:
        #     log.info('Test time ' + time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - start_time)) + ', ' + 'Accuracy: %d / %d\t%0.4f' % (correct_cnt, idx + 1, correct_cnt / (idx + 1)))

    log.info('Overall f1 score = %0.4f' %
             (f1_score(list(targets), list(predictions), average='weighted')))
    log.info('Overall accuracy = %0.2f%%' %
             (100 * correct_cnt / targets.shape[0]))
    return correct_cnt / targets.shape[0]
Beispiel #5
0
def main():
    train_transform = A.compose([
        A.resize(height=image_height, width=image_width),
        A.rotate(limit=35, p=1.0),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.1),
        A.normalize(
            mean=[0.0, 0.0, 0.0],
            std=[1.0, 1.0, 1.0],
            max_pixel_value=255.0,
        ),
        ToTensorV2(),
    ], )

    val_transform = A.Compose([
        A.resize(height=image_height, width=image_width),
        A.Normalize(mean=[0.0, 0.0, 0.0, 0.0], ),
        ToTensorV2(),
    ], )

    model = NET(in_channels=3, out_channels=1).to(device)
    loss_fn = nn.BCEWithLogitsLoss()
Beispiel #6
0
def main():
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    ######################
    # directory preparation
    filewriter_path = args.tensorboard_dir
    checkpoint_path = args.checkpoint_dir

    test_mkdir(filewriter_path)
    test_mkdir(checkpoint_path)

    ######################
    # data preparation
    train_file = os.path.join(args.list_dir, "train.txt")
    val_file = os.path.join(args.list_dir, "val.txt")

    train_generator = ImageDataGenerator(train_file, shuffle=True)
    val_generator = ImageDataGenerator(val_file, shuffle=False)

    batch_size = args.batch_size
    train_batches_per_epoch = train_generator.data_size
    val_batches_per_epoch = val_generator.data_size

    ######################
    # model graph preparation
    patch_height = args.patch_size
    patch_width = args.patch_size
    batch_size = args.batch_size

    # TF placeholder for graph input
    leftx = tf.placeholder(tf.float32,
                           shape=[batch_size, patch_height, patch_width, 1])
    rightx_pos = tf.placeholder(
        tf.float32, shape=[batch_size, patch_height, patch_width, 1])
    rightx_neg = tf.placeholder(
        tf.float32, shape=[batch_size, patch_height, patch_width, 1])

    # Initialize model
    left_model = NET(leftx,
                     input_patch_size=patch_height,
                     batch_size=batch_size)
    right_model_pos = NET(rightx_pos,
                          input_patch_size=patch_height,
                          batch_size=batch_size)
    right_model_neg = NET(rightx_neg,
                          input_patch_size=patch_height,
                          batch_size=batch_size)

    featuresl = tf.squeeze(left_model.features, [1, 2])
    featuresr_pos = tf.squeeze(right_model_pos.features, [1, 2])
    featuresr_neg = tf.squeeze(right_model_neg.features, [1, 2])

    # Op for calculating cosine distance/dot product
    with tf.name_scope("correlation"):
        cosine_pos = tf.reduce_sum(tf.multiply(featuresl, featuresr_pos),
                                   axis=-1)
        cosine_neg = tf.reduce_sum(tf.multiply(featuresl, featuresr_neg),
                                   axis=-1)

    # Op for calculating the loss
    with tf.name_scope("hinge_loss"):
        margin = tf.ones(shape=[batch_size], dtype=tf.float32) * args.margin
        loss = tf.maximum(0.0, margin - cosine_pos + cosine_neg)
        loss = tf.reduce_mean(loss)

    # Train op
    with tf.name_scope("train"):
        var_list = tf.trainable_variables()
        for var in var_list:
            print "{}: {}".format(var.name, var.shape)
        # Get gradients of all trainable variables
        gradients = tf.gradients(loss, var_list)
        gradients = list(zip(gradients, var_list))

        # Create optimizer and apply gradient descent with momentum to the trainable variables
        optimizer = tf.train.MomentumOptimizer(args.learning_rate, args.beta)
        train_op = optimizer.apply_gradients(grads_and_vars=gradients)

    # summary Ops for tensorboard visualization
    with tf.name_scope("training_metric"):
        training_summary = []
        # Add loss to summary
        training_summary.append(tf.summary.scalar('hinge_loss', loss))

        # Merge all summaries together
        training_merged_summary = tf.summary.merge(training_summary)

    # validation loss
    with tf.name_scope("val_metric"):
        val_summary = []
        val_loss = tf.placeholder(tf.float32, [])

        # Add val loss to summary
        val_summary.append(tf.summary.scalar('val_hinge_loss', val_loss))
        val_merged_summary = tf.summary.merge(val_summary)

    # Initialize the FileWriter
    writer = tf.summary.FileWriter(filewriter_path)
    # Initialize an saver for store model checkpoints
    saver = tf.train.Saver(max_to_keep=10)

    ######################
    # DO training
    # Start Tensorflow session
    with tf.Session(config=tf.ConfigProto(
                        log_device_placement=False, \
                        allow_soft_placement=True, \
                        gpu_options=tf.GPUOptions(allow_growth=True))) as sess:

        # Initialize all variables
        sess.run(tf.global_variables_initializer())

        # resume from checkpoint or not
        if args.resume is None:
            # Add the model graph to TensorBoard before initial training
            writer.add_graph(sess.graph)
        else:
            saver.restore(sess, args.resume)

        print "training_batches_per_epoch: {}, val_batches_per_epoch: {}.".format(\
                train_batches_per_epoch, val_batches_per_epoch)
        print("{} Start training...".format(datetime.now()))
        print("{} Open Tensorboard at --logdir {}".format(
            datetime.now(), filewriter_path))

        # Loop training
        for epoch in range(args.start_epoch, args.end_epoch):
            print("{} Epoch number: {}".format(datetime.now(), epoch + 1))

            for batch in tqdm(range(train_batches_per_epoch)):
                # Get a batch of data
                batch_left, batch_right_pos, batch_right_neg = train_generator.next_batch(
                    batch_size)

                # And run the training op
                sess.run(train_op,
                         feed_dict={
                             leftx: batch_left,
                             rightx_pos: batch_right_pos,
                             rightx_neg: batch_right_neg
                         })

                # Generate summary with the current batch of data and write to file
                if (batch + 1) % args.print_freq == 0:
                    s = sess.run(training_merged_summary,
                                 feed_dict={
                                     leftx: batch_left,
                                     rightx_pos: batch_right_pos,
                                     rightx_neg: batch_right_neg
                                 })
                    writer.add_summary(s,
                                       epoch * train_batches_per_epoch + batch)

            if (epoch + 1) % args.save_freq == 0:
                print("{} Saving checkpoint of model...".format(
                    datetime.now()))
                # save checkpoint of the model
                checkpoint_name = os.path.join(
                    checkpoint_path, 'model_epoch' + str(epoch + 1) + '.ckpt')
                save_path = saver.save(sess, checkpoint_name)

            if (epoch + 1) % args.val_freq == 0:
                # Validate the model on the entire validation set
                print("{} Start validation".format(datetime.now()))
                val_ls = 0.
                for _ in tqdm(range(val_batches_per_epoch)):
                    batch_left, batch_right_pos, batch_right_neg = val_generator.next_batch(
                        batch_size)
                    result = sess.run(loss,
                                      feed_dict={
                                          leftx: batch_left,
                                          rightx_pos: batch_right_pos,
                                          rightx_neg: batch_right_neg
                                      })
                    val_ls += result

                val_ls = val_ls / (1. * val_batches_per_epoch)

                print 'validation loss: {}'.format(val_ls)
                s = sess.run(val_merged_summary,
                             feed_dict={val_loss: np.float32(val_ls)})
                writer.add_summary(s, train_batches_per_epoch * (epoch + 1))

            # Reset the file pointer of the image data generator
            val_generator.reset_pointer()
            train_generator.reset_pointer()
Beispiel #7
0
if not os.path.isdir(filewriter_path): 
    os.mkdir(filewriter_path)
if not os.path.isdir(checkpoint_path): 
    os.mkdir(checkpoint_path)
if not os.path.isdir(old_checkpoint_path): 
    os.mkdir(old_checkpoint_path)


# TF placeholder for graph input and output
# TODO: to change this according to different settings
x = tf.placeholder(tf.float32, shape=[batch_size, height, width, 3])
y = tf.placeholder(tf.float32, shape=[batch_size, height, width, num_classes])
keep_prob = tf.placeholder(tf.float32)

# Initialize model
model = NET(x, height, width, keep_prob, train_layers, out_channels=num_classes, do_vbp=False, batch_size=batch_size)

# Link variable to model output
# NOTE: no softmax used, should use an extra softmax layer
pred_maps = model.conv10_1
tf.Print(pred_maps, [tf.constant("pred_maps"), pred_maps])
softmax_maps = tf.nn.softmax(pred_maps, dim=-1)
tf.Print(pred_maps, [tf.constant("softmax_maps"), softmax_maps])

assert pred_maps.shape == y.shape
assert softmax_maps.shape == y.shape

# List of trainable variables of the layers we want to train
var_list = [v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers]
print "train_var num:{} list: ".format(len(var_list))
for v in var_list:
Beispiel #8
0
dataset_path = "../output/data/dataset_test.npy"
target_path = "../output/data/target_test.npy"

if __name__ == '__main__':
    args = parser.parse_args()
    torch.set_default_tensor_type('torch.DoubleTensor')
    torch.manual_seed(args.seed)
    random.seed(args.seed)

    if not os.path.exists(args.model_dir):
        os.mkdir(args.model_dir)
    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)

    if args.train:
        model = NET()
        if args.model_load:
            try:
                saved_state = torch.load(
                    os.path.join(args.model_dir, 'best_model.dat'))
                model.load_state_dict(saved_state)
            except:
                print('Cannot load existing model from file!')
        if args.gpu:
            model = model.cuda()

        loss_func = nn.CrossEntropyLoss()
        dataset = torch.from_numpy(np.load("../output/data/dataset_train.npy"))
        targets = torch.from_numpy(
            np.int64(np.load("../output/data/target_train.npy")))
        dataset_test = np.load(dataset_path)
Beispiel #9
0
#print(model)

loss_func = nn.CrossEntropyLoss()
torch.autograd.set_detect_anomaly(True)
#SW = SW(logdir="{}{}{}".format(project_path,"/runs/", exp_name))  # log_dir="./logdir/" + exp_name)
# print("save data? {}".format(save_data))

#try:
#training loop
if train:
    #sw = SW(logdir="{}{}_perturbed_{}".format(project_path, "/runs/", exp_name)) # log_dir="./logdir/" + exp_name)

    for repetition in tqdm(range(args.num_experiment_reps)):
        model = NET(  # conv_block_in_out,#last element must match first element.
            # lin_layer_in_out,
            # max_pool_size=pool_size,#first is first layer out
            # conv_filter_size = 3,
            # num_class = num_out_class
        ).to(device)
        #        for lr_ in lr:
        #           for batch_size_ in [args.batch_size]:

        # train_loader = DataLoader(
        #     dataset=train_set,
        #     batch_size=batch_size_,
        #     shuffle=shuffle_or_not,
        #     pin_memory=True,#important
        #     num_workers=0)
        #
        # test_loader = DataLoader(
        #     dataset=test_set,
        #     batch_size=batch_size_,
Beispiel #10
0
def main():
    global opt, best_mae_error

    #dataset = CIFData(*opt.dataroot)
    dataset = h5(*opt.dataroot) 

    collate_fn = collate_pool
    
    train_loader, val_loader, test_loader = get_train_val_test_loader(
            dataset=dataset,collate_fn=collate_fn,batch_size=opt.batch_size,
            train_size=opt.train_size, num_workers=opt.workers,
            val_size=opt.val_size, test_size=opt.test_size,pin_memory=opt.cuda,
            return_test=True)
    # obtain target value normalizer
    sample_data_list = [dataset[i] for i in
                        sample(range(len(dataset)), 1000)]
    input, sample_target,_ = collate_pool(sample_data_list)
    input_1=input[0]
    normalizer = Normalizer(sample_target)
    s = Normalizer(input_1)


    model=NET()

    if torch.cuda.is_available():
        print('cuda is ok')
        model = model.cuda()

    criterion = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), opt.lr,
                            momentum=opt.momentum,
                            weight_decay=opt.weight_decay)
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint['epoch']
            best_mae_error = checkpoint['best_mae_error']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            normalizer.load_state_dict(checkpoint['normalizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch']))   

        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))
    scheduler = MultiStepLR(optimizer, milestones=opt.lr_milestones,
                            gamma=0.1)
    for epoch in range(opt.start_epoch,opt.epochs):
        train(train_loader, model, criterion, optimizer, epoch,normalizer,s)

        mae_error = validate(val_loader, model, criterion, normalizer,s) 

        if mae_error != mae_error:
            print('Exit due to NaN')
            sys.exit(1)
        is_best = mae_error < best_mae_error
        best_mae_error = min(mae_error, best_mae_error)
        
        save_checkpoint({ 
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_mae_error': best_mae_error,
            'optimizer': optimizer.state_dict(),
            'normalizer': normalizer.state_dict(),
            'opt': vars(opt)
        }, is_best)
        # test bset model
    print('---------Evaluate Model on Test Set---------------')
    best_checkpoint = torch.load('model_best.pth.tar')
    model.load_state_dict(best_checkpoint['state_dict'])
    validate(test_loader, model, criterion, normalizer, s,test=True)
Beispiel #11
0
    def __getitem__(self, idx):  # 索引数据集中的某一个数据
        image = Image.open(self.file_path[idx]).convert('RGB')
        image = self.transform(image)
        return image, torch.tensor(self.labels[idx])


test_loader = torch.utils.data.DataLoader(
    MyDataset(test_inputs, test_labels, transform_test),  #先转化成torch能识别的
    batch_size=BATCH_SIZE,  # dataset 再批处理
    shuffle=False,
    num_workers=2,
    pin_memory=True)

# 实例化
net = NET()

# 定义损失函数和优化方式
loss_func = nn.CrossEntropyLoss()  # 损失函数为交叉熵 内置了softmax层
optimizer = optim.SGD(
    net.parameters(),
    lr=LR,
    momentum=0.9,  # net.parameters()可迭代的variable指定因优化哪些参数
    weight_decay=5e-4
)  # 优化方式为mini-batch momentum-SGD,weight_decay并采用L2正则化(权值衰减)

# 开始训练
if __name__ == '__main__':
    with open('model_params.txt', 'w') as f4:  # 将模型参数写入model_params.txt文件
        for parameters in net.parameters():  # 模块参数的迭代器
            f4.write(str(parameters))
Beispiel #12
0
    os.mkdir(checkpoint_path)
if not os.path.isdir(old_checkpoint_path):
    os.mkdir(old_checkpoint_path)

# TF placeholder for graph input and output
leftx = tf.placeholder(tf.float32,
                       shape=[batch_size * 10, patch_height, patch_width,
                              3])  # [batch_size*10, sh, sw, 3]
rightx = tf.placeholder(tf.float32,
                        shape=[batch_size * 10, patch_height, patch_width,
                               3])  # [batch_size*10, sh, sw, 3]
y = tf.placeholder(tf.float32, shape=[batch_size * 10])  # [batch_size*10, 1]

# Initialize model
# note padding differs
left_model = NET(leftx, batch_size=batch_size)
right_model = NET(rightx, batch_size=batch_size)

# Link variable to model output
featuresl = left_model.features
featuresr = right_model.features
assert featuresl.shape == (batch_size * 10, 1, 1, 112)
assert featuresr.shape == (batch_size * 10, 1, 1, 112)

# List of trainable variables of the layers we want to train
print "variables"
var_list = [
    v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers
]
for var in var_list:
    print "{} shape: {}".format(var.name, var.shape)