def __init__(self, model_path):
        self._model_path = model_path

        self._model_files = {
            'shape_predictor':
            os.path.join(model_path, 'shape_predictor_68_face_landmarks.dat'),
            'face_template':
            os.path.join(model_path, 'face_template.npy'),
            'mean':
            os.path.join(model_path, 'mean.npy'),
            'stddev':
            os.path.join(model_path, 'stddev.npy'),
            'cnn_weights':
            os.path.join(model_path, 'weights_cnn.h5'),
            'tpe_weights':
            os.path.join(model_path, 'weights_tpe.h5'),
        }

        for model_file in self._model_files.values():
            if not os.path.exists(model_file):
                raise FileNotFoundError(model_file)

        self._mean = np.load(self._model_files['mean'])
        self._stddev = np.load(self._model_files['stddev'])
        self._fd = FaceDetector()
        self._fa = FaceAligner(self._model_files['shape_predictor'],
                               self._model_files['face_template'])
        cnn = build_cnn(227, 266)
        cnn.load_weights(self._model_files['cnn_weights'])
        self._cnn = Bottleneck(cnn, ~1)
        _, tpe = build_tpe(256, 256)
        tpe.load_weights(self._model_files['tpe_weights'])
        self._tpe = tpe
Exemple #2
0
 def initialize_model(self):
     self._mean = np.load(self._model_files['mean'])
     self._stddev = np.load(self._model_files['stddev'])
     self._fd = FaceDetector()
     self._fa = FaceAligner(self._model_files['shape_predictor'],
                            self._model_files['face_template'])
     cnn = build_cnn(227, 266)
     cnn.load_weights(self._model_files['cnn_weights'])
     self._cnn = Bottleneck(cnn, ~1)
     _, tpe = build_tpe(256, 256)
     tpe.load_weights(self._model_files['tpe_weights'])
     self._tpe = tpe
def ff_labels_softmax():
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(
    )  #loads the mnist dataset
    #X_train=np.concatenate((X_train,X_val,X_test),axis=0)[:65000,:]
    #y_train=np.concatenate((y_train,y_val,y_test),axis=0)[:65000]
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')
    print("Building model and compiling functions...")
    network = build_cnn(input_var)
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    ff_fn = theano.function([input_var],
                            test_prediction)  #feed forward function
    s = np.empty(
        [10, 50000]
    )  #stores the indices of samples with decreasing probabilities(p1,p2,p3...,p10)
    avg = np.empty([10])  #stores the average of store
    no_of_clusters = 10

    with np.load('model_dropout_test.npz') as f:
        param_values = [f['arr_%d' % i] for i in range(len(f.files))]
    lasagne.layers.set_all_param_values(network, param_values)

    train_input = X_train

    test_prediction = ff_fn(train_input)

    ff_output = np.empty([50000])

    for i in range(50000):
        ff_output[i] = np.argmax(test_prediction[i, :])
    '''
	for i in range(10):
		for n in range(no_of_clusters):
        		s[n,:]=np.argsort(test_prediction[:,n])[::-1]
                	avg[n]=np.mean(test_prediction[np.asarray(s[n,0:5000],int),n])
		arg=np.argmax(avg)
        	print(arg)
        	for j in range(5000):
        		ff_output[s[arg,j]]=arg
                test_prediction[:,arg]=0#equivalent to deleting the prob class        
	'''
    return ff_output
Exemple #4
0
def test():
    tf.reset_default_graph()
    infer_graph = tf.Graph()

    with infer_graph.as_default():
        encoder_outputs_t, inputs_t = build_cnn(False, flags.batch_size,
                                                flags.height, flags.width,
                                                flags.channels)
        _, _, pred_ids, logits_t, decoder_inputs_t, \
        _, _ ,keep_prob_t= build_network(encoder_outputs_t,
                             True,
                             flags.batch_size,
                             flags.decoder_length,
                             flags.tgt_vocab_size,
                             flags.attn_num_hidden,
                             flags.encoder_length,
                             flags.max_gradient_norm
                             )
        infer_saver = tf.train.Saver()
    infer_sess = tf.Session(graph=infer_graph)
    model_file = tf.train.latest_checkpoint(flags.load_dir)
    infer_saver.restore(infer_sess, model_file)

    with open(flags.test_txt) as f:
        test = [line.rstrip() for line in f]
    test_len = len(test)
    test = np.array(test)
    data_test = Dataset(test)
    if flags.lex_txt != None:
        with open(flags.lex_txt) as f:
            lex = [line.rstrip().lower() for line in f]

    ti = int(test_len / flags.batch_size)
    rest = test_len % flags.batch_size

    gt = []
    predict = []

    for t in range(ti):
        batch_test = data_test.next_batch(flags.batch_size)
        path = []
        texts = []
        for line in batch_test:
            path.append(line.split(' ', 1)[0])
            texts.append(line.split(' ', 1)[1])

        images = load_img(path, flags.height, flags.width)

        testing_decoder_inputs = np.zeros(
            (flags.decoder_length, flags.batch_size), dtype=float)
        feed_dict_t = {
            inputs_t: images[:, :, :, np.newaxis],
            decoder_inputs_t: testing_decoder_inputs,
            keep_prob_t: 1
        }
        q = infer_sess.run(pred_ids, feed_dict=feed_dict_t)

        for j in range(flags.batch_size):
            gt.append(texts[j])
            ans = np.array(q).T[j]
            pd = []
            for c in ans:
                if c != -1:
                    character = tools.idx_to_word[c]
                    if character != '<EOS>':
                        pd.append(character)
            predict.append(''.join(pd))

    batch_test = data_test.next_batch(flags.batch_size)
    path = []
    texts = []
    for line in batch_test:
        path.append(line.split(' ', 1)[0])
        texts.append(line.split(' ', 1)[1])
    images = load_img(path, flags.height, flags.width)

    feed_dict_t = {
        inputs_t: images[:, :, :, np.newaxis],
        decoder_inputs_t: testing_decoder_inputs,
        keep_prob_t: 1
    }
    q = infer_sess.run(pred_ids, feed_dict=feed_dict_t)

    for k in range(rest):
        gt.append(texts[k])
        ans = np.array(q).T[k]
        pd = []
        for c in ans:
            if c != -1:
                character = tools.idx_to_word[c]
                if character != '<EOS>':
                    pd.append(character)
        predict.append(''.join(pd))

    correct = float(0)
    cnt = 0
    acc_s = 0

    for l in range(len(gt)):
        cnt = cnt + 1
        if gt[l] == predict[l]:
            correct = correct + 1

    acc_s = correct / cnt
    if flags.lex_txt != None:
        correct_l = float(0)
        cnt = 0
        for l in range(len(gt)):
            cnt = cnt + 1
            lexicon = lex[l].split(',')
            dt = editdistance.eval(predict[l], lexicon[0])
            pl = lexicon[0]
            for ll in lexicon[1:]:
                dt_temp = editdistance.eval(predict[l], ll)

                if dt_temp < dt:
                    dt = dt_temp
                    pl = ll
            if pl == gt[l]:
                correct_l = correct_l + 1

        acc_l = correct_l / cnt

    print('accuracy: ', acc_s)
    if flags.lex_txt != None:
        print('accuracy with lexicon: ', acc_l)
Exemple #5
0
def build_network(is_training, batch_size, height, width, channels,
                  decoder_length, tgt_vocab_size, num_units, beam_width,
                  encoder_length, max_gradient_norm, embedding_size,
                  initial_learning_rate):

    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    encoder_inputs, inputs = build_cnn(is_training, batch_size, height, width,
                                       channels)
    encoder_outputs, encoder_state = build_encoder(encoder_inputs,
                                                   encoder_length, num_units,
                                                   keep_prob)

    decoder_inputs = tf.placeholder(tf.int32,
                                    shape=(decoder_length, batch_size),
                                    name="decoder_inputs")
    decoder_lengths = tf.placeholder(tf.int32,
                                     shape=(batch_size),
                                     name="decoer_length")
    target_labels = tf.placeholder(tf.int32,
                                   shape=(batch_size, decoder_length),
                                   name="target_label")

    logits, sample_ids = build_decoder(tgt_vocab_size, decoder_inputs,
                                       num_units, encoder_outputs, is_training,
                                       beam_width, decoder_lengths,
                                       encoder_state, batch_size,
                                       encoder_length)

    if is_training == True:
        # Target labels
        #   As described in doc for sparse_softmax_cross_entropy_with_logits,
        #   labels should be [batch_size, decoder_lengths] instead of [batch_size, decoder_lengths, tgt_vocab_size].
        #   So labels should have indices instead of tgt_vocab_size classes.

        # Loss
        #global_step = tf.Variable(0, name='global_step', trainable=False)
        loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=target_labels, logits=logits)
        loss = tf.reduce_sum(loss)

        #        loss_l = tf.nn.softmax_cross_entropy_with_logits(labels=inputs_l,logits=o)
        #        loss_l = tf.reduce_sum(loss_l)
        #
        #        loss = loss_c + 0.1*loss_l

        # Train

        # Calculate and clip gradients
        params = tf.trainable_variables()
        gradients = tf.gradients(loss, params)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients,
                                                      max_gradient_norm)

        #learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step=global_step, decay_steps=1000000,decay_rate=0.1,staircase=True)
        #learning_rate = tf.train.cosine_decay_restarts(initial_learning_rate, global_step, 400000,
        #                  t_mul=1.0, m_mul=0.5, alpha=0.0, name=None)

        # Optimization
        #optimizer = tf.train.AdamOptimizer(learning_rate)
        #learning_rate = initial_learning_rate
        optimizer = tf.train.GradientDescentOptimizer(initial_learning_rate)
        #optimizer = tf.train.AdadeltaOptimizer(learning_rate=initial_learning_rate)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_op = optimizer.apply_gradients(zip(
                clipped_gradients, params)),  # global_step=global_step)

        #optimizer = tf.train.GradientDescentOptimizer(hparams.learning_rate)
        #train_op = optimizer.minimize(loss, global_step=global_step)

    else:

        loss = tf.no_op()
        train_op = tf.no_op()
        #learning_rate = tf.no_op()

    return train_op, loss, sample_ids, logits, inputs, decoder_inputs, decoder_lengths, target_labels, keep_prob  #, learning_rate
import numpy as np
import lasagne
from cnn import build_cnn
from cnn import iterate_minibatches
from cnn import load_dataset
import theano
import theano.tensor as T


input_var = T.tensor4('inputs')
target_var = T.ivector('targets')

print("Building model and compiling functions...")
network = build_cnn(input_var)

test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
                                                            target_var)
test_loss = test_loss.mean()

test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                  dtype=theano.config.floatX)



val_fn = theano.function([input_var, target_var], [test_loss, test_acc], allow_input_downcast=True)

# Create neural network model (depending on first command line parameter)


with np.load('model.npz') as f:
Exemple #7
0
def train():
    with open(flags.train_txt) as f:
        sample = [line.rstrip() for line in f]
    sample = np.array(sample)
    iteration = len(sample) // flags.batch_size
    data = Dataset(sample)

    tf.reset_default_graph()
    train_graph = tf.Graph()
    infer_graph = tf.Graph()

    with train_graph.as_default():
        encoder_outputs, inputs = build_cnn(True, flags.batch_size,
                                            flags.height, flags.width,
                                            flags.channels)
        train_op, loss, sample_ids, logits, decoder_inputs, \
        target_labels, learning_rate,keep_prob = build_network(encoder_outputs,
                                                     False,
                                                     flags.batch_size,
                                                     flags.decoder_length,
                                                     flags.tgt_vocab_size,
                                                     flags.attn_num_hidden,
                                                     flags.encoder_length,
                                                     flags.max_gradient_norm
                                                     )
        initializer = tf.global_variables_initializer()
        train_saver = tf.train.Saver()

    train_sess = tf.Session(graph=train_graph)
    train_sess.run(initializer)

    with infer_graph.as_default():
        encoder_outputs_t, inputs_t = build_cnn(False, flags.batch_size,
                                                flags.height, flags.width,
                                                flags.channels)
        _, _, pred_ids, logits_t, decoder_inputs_t, \
        _, _ ,keep_prob_t= build_network(encoder_outputs_t,
                             True,
                             flags.batch_size,
                             flags.decoder_length,
                             flags.tgt_vocab_size,
                             flags.attn_num_hidden,
                             flags.encoder_length,
                             flags.max_gradient_norm
                             )
        infer_saver = tf.train.Saver()
    infer_sess = tf.Session(graph=infer_graph)

    # Training

    start = time.time()
    acc_log = 0
    count = 0
    lr = flags.learning_rate
    for h in range(flags.epoch):
        for i in range(iteration):
            batch_train = data.next_batch(flags.batch_size)
            path = []
            texts = []
            for line in batch_train:
                path.append(line.split(' ')[0])
                texts.append(line.split(' ')[1])

            images = load_img(path, flags.height, flags.width)

            training_target_labels = get_label(texts, flags.decoder_length)
            training_decoder_inputs = np.delete(training_target_labels,
                                                -1,
                                                axis=1)
            training_decoder_inputs = np.c_[
                np.zeros(training_decoder_inputs.shape[0]),
                training_decoder_inputs].T

            feed_dict = {
                inputs: images[:, :, :, np.newaxis],
                decoder_inputs: training_decoder_inputs,
                target_labels: training_target_labels,
                learning_rate: lr,
                keep_prob: 0.5
            }
            _, loss_value = train_sess.run([train_op, loss],
                                           feed_dict=feed_dict)

            step = float(i + 1)
            if step % flags.display_step == 0:

                now = time.time()
                print(step, now - start, loss_value)
                start = now

            if step % flags.eval_step == 0:
                train_saver.save(train_sess, flags.save_dir)
                model_file = tf.train.latest_checkpoint(
                    flags.save_dir.rsplit('/', 1)[0])
                infer_saver.restore(infer_sess, model_file)

                gt = []
                predict = []

                images = load_img(path, flags.height, flags.width)

                testing_decoder_inputs = np.zeros(
                    (flags.decoder_length, flags.batch_size), dtype=float)
                feed_dict_t = {
                    inputs_t: images[:, :, :, np.newaxis],
                    decoder_inputs_t: testing_decoder_inputs,
                    keep_prob_t: 1
                }
                q = infer_sess.run(pred_ids, feed_dict=feed_dict_t)

                for j in range(flags.batch_size):
                    gt.append(texts[j])
                    ans = np.array(q).T[j]
                    pd = []
                    for c in ans:
                        if c != -1:
                            character = tools.idx_to_word[c]
                            if character != '<EOS>':
                                pd.append(character)
                    predict.append(''.join(pd))

                correct = float(0)
                cnt = 0
                acc_s = 0

                for l in range(len(gt)):
                    cnt = cnt + 1
                    if gt[l] == predict[l]:
                        correct = correct + 1
                count = count + 1
                acc_s = correct / cnt
                if acc_s > acc_log:
                    acc_log = acc_s
                    count = 0
                if count == (iteration // flags.eval_step):
                    lr = lr / 5
Exemple #8
0
        if shuffle:
            excerpt = indices[start_idx:start_idx + batchsize]
        else:
            excerpt = slice(start_idx, start_idx + batchsize)
        yield inputs[excerpt], targets[excerpt]


X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(
)  #loads the mnist dataset
#X_train=np.concatenate((X_train,X_val,X_test),axis=0)[:65000,:]
#y_train=np.concatenate((y_train,y_val,y_test),axis=0)[:65000]
input_var = T.tensor4('inputs')
target_var = T.fmatrix('targets')
print("Building model and compiling functions...")

network = build_cnn(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)

lambd = 10
pred = prediction.sum(axis=0)
pred = pred / pred.sum()
pred1 = pred * T.log(pred)
loss = loss.mean() + lambd * pred1.mean()
#............................Dropout++ code
lambda_1 = 1e-6
lambda_3 = 0.


def penalty(x):
    return lambda_1 * (T.sum(T.log(T.mul(x, 1. - x))) +
def main():
    with open('data/meta.json', 'r') as f:
        meta = json.load(f)

    cnn = build_cnn(227, meta['n_subjects'])
    cnn.load_weights('data/weights/weights.best.h5')
    bottleneck = Bottleneck(cnn, ~1)

    train_x, train_y = np.load('data/train_x.npy'), np.load('data/train_y.npy')
    test_x, test_y = np.load('data/test_x.npy'), np.load('data/test_y.npy')

    train_x = np.vstack([train_x, test_x])
    train_y = np.hstack([train_y, test_y])

    dev_x = np.load('data/dev_x.npy')
    dev_protocol = np.load('data/dev_protocol.npy')

    train_emb = bottleneck.predict(train_x, batch_size=256)
    dev_emb = bottleneck.predict(dev_x, batch_size=256)

    del train_x

    pca = PCA(N_OUT)
    pca.fit(train_emb)
    W_pca = pca.components_

    np.save('data/w_pca', W_pca)

    tpe, tpe_pred = build_tpe(N_IN, N_OUT, W_pca.T)

    train_y = np.array(train_y)
    subjects = list(set(train_y))

    anchors_inds = []
    positives_inds = []
    labels = []

    for subj in subjects:
        mask = train_y == subj
        inds = np.where(mask)[0]
        for a, p in itertools.permutations(inds, 2):
            anchors_inds.append(a)
            positives_inds.append(p)
            labels.append(subj)

    anchors = train_emb[anchors_inds]
    positives = train_emb[positives_inds]
    n_anchors = len(anchors_inds)

    inds = np.arange(n_anchors)

    def get_batch(hard=False):
        batch_inds = np.random.choice(inds, size=BIG_BATCH_SIZE, replace=False)

        train_emb2 = tpe_pred.predict(train_emb, batch_size=1024)
        scores = train_emb2 @ train_emb2.T
        negative_inds = []

        for i in batch_inds:
            label = labels[i]
            mask = train_y == label
            if hard:
                negative_inds.append(
                    np.ma.array(scores[label], mask=mask).argmax())
            else:
                negative_inds.append(
                    np.random.choice(np.where(np.logical_not(mask))[0],
                                     size=1)[0])

        return anchors[batch_inds], positives[batch_inds], train_emb[
            negative_inds]

    def test():
        dev_emb2 = tpe_pred.predict(dev_emb)
        tsc, isc = get_scores(dev_emb2, dev_protocol)
        eer, _, _, _ = calc_metrics(tsc, isc)
        return eer

    z = np.zeros((BIG_BATCH_SIZE, ))

    min_eer = float('inf')

    for e in range(NB_EPOCH):
        print('epoch: {}'.format(e))
        a, p, n = get_batch(e > COLD_START)
        tpe.fit([a, p, n], z, batch_size=BATCH_SIZE, nb_epoch=1)
        eer = test()
        print('EER: {:.2f}'.format(eer * 100))
        if eer < min_eer:
            min_eer = eer
            tpe.save_weights('data/weights/weights.tpe.mineer.h5')
Exemple #10
0
import numpy as np
import matplotlib.pyplot as plt

from cnn import build_cnn
from bottleneck import Bottleneck

from identification import get_scores, calc_metrics

WEIGHTS_DIR = './data/weights/'

BATCH_SIZE = 32

dev_x = np.load('data/dev_x.npy')

model = build_cnn(227, 266)

weights_to_load = WEIGHTS_DIR + 'weights.best.h5'
model.load_weights(weights_to_load)

bottleneck = Bottleneck(model, ~1)
dev_y = bottleneck.predict(dev_x, batch_size=BATCH_SIZE)

protocol = np.load('data/dev_protocol.npy')
tsc, isc = get_scores(dev_y, protocol)
eer, fars, frrs, dists = calc_metrics(tsc, isc)

print('EER: {}'.format(eer * 100))

plt.figure()
plt.hist(tsc, 20, color='g', normed=True, alpha=0.3)
plt.hist(isc, 20, color='r', normed=True, alpha=0.3)
Exemple #11
0
import itertools

import numpy as np

from cnn import build_cnn
from tpe import build_tpe
from bottleneck import Bottleneck
from identification import get_scores, calc_metrics

from sklearn.decomposition import PCA

n_in = 256
n_out = 256

cnn = build_cnn(227, 266)
cnn.load_weights('data/weights/weights.best.h5')
bottleneck = Bottleneck(cnn, ~1)

train_x, train_y = np.load('./data/train_x.npy'), np.load('./data/train_y.npy')
test_x, test_y = np.load('./data/test_x.npy'), np.load('./data/test_y.npy')

train_x = np.vstack([train_x, test_x])
train_y = np.hstack([train_y, test_y])

dev_x = np.load('./data/dev_x.npy')
dev_protocol = np.load('./data/dev_protocol.npy')

train_emb = bottleneck.predict(train_x, batch_size=256)
dev_emb = bottleneck.predict(dev_x, batch_size=256)

del train_x
Exemple #12
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 16:02:37 2019

@author: tyler
"""

from cnn import build_cnn
is_training = True
batch_size = 7
height = 32
width = 256
channels = 1

o, i = build_cnn(is_training, batch_size, height, width, channels)
def get_heatmap(image, stride=8, func=None):
    print "vafan?????"
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')

    network = build_cnn(input_var)

    prediction = lasagne.layers.get_output(network)

    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = loss.mean()
    # We could add some weight decay as well here, see lasagne.regularization.

    # Create update expressions for training, i.e., how to modify the
    # parameters at each training step. Here, we'll use Stochastic Gradient
    # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        loss, params, learning_rate=0.01, momentum=0.9)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network,
    # disabling dropout layers.
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
                                                            target_var)
    test_loss = test_loss.mean()
    # As a bonus, also create an expression for the classification accuracy:
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function([input_var, target_var], loss, updates=updates, allow_input_downcast=True)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc], allow_input_downcast=True)

    # get predictions
    get_preds = theano.function([input_var], test_prediction, allow_input_downcast=True)

    with np.load('../cnn/model.npz') as f:
        param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        lasagne.layers.set_all_param_values(network, param_values)

    print "vafan??", image.shape, np.max(image)


    width = image.shape[1]
    height = image.shape[2]
    part_width = part_height = 64
    #heat_map = np.zeros((3, width, height))

    jumpi = (width-part_width)/stride
    jumpj = (height-part_height)/stride

    heat_map = np.zeros((3, jumpi, jumpj))

    #heat_map = np.zeros((3, jumpi, jumpj))

    print width, height, heat_map.shape, jumpi, jumpj

    #image_parts = np.zeros((  jumpi*jumpj, 3, part_width, part_height))
    new_im = np.zeros((1, 3, part_width, part_height))

    ii = 0
    for i in range(jumpi):
        for j in range(jumpj):
            #image_parts[ii, :, :, :] = image[0:3, stride*i:stride*i + part_width, stride*j:stride*j + part_height]

            #testt = time.time()
            new_im[0, :, :, :] = image[0:3, stride*i:stride*i + part_width, stride*j:stride*j + part_height]

            res = get_preds(new_im)[0]
            #heat_map[0, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = res[0]
            #heat_map[1, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = res[1]
            #heat_map[2, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = res[2]

            heat_map[0, i, j] = res[0]
            heat_map[1, i, j] = res[1]
            heat_map[2, i, j] = res[2]

            #print ("one loop took: ", time.time()-testt)

            ii += 1

    #res = func(image_parts[:, 0:3, :, :])
    #ii = 0
    #for r in res:
        #print r
    #    heat_map[0, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = r[0]
    #    heat_map[1, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = r[1]
    #    heat_map[2, stride * (ii % jumpi):stride * (ii % jumpi) + stride, stride * int(ii / jumpj):stride * int(ii / jumpj) + stride] = r[2]
        # print(ii)
    #    ii += 1

    return scipy.misc.imresize(np.transpose(heat_map, axes=(2, 1, 0)), float(stride), interp='nearest')
    #return heat_map
Exemple #14
0
test_y = oh.transform(test_y.reshape(-1, 1)).todense()

print('n_train: {}'.format(n_train))
print('n_test: {}'.format(n_test))
print('n_subjects: {}'.format(n_subjects))

with open('data/meta.json', 'w') as f:
    json.dump({'n_subjects': n_subjects}, f)

mc1 = ModelCheckpoint(WEIGHTS_DIR + 'weights.best.h5',
                      monitor='val_accuracy',
                      verbose=0,
                      save_best_only=True,
                      mode='max')

model = build_cnn(227, n_subjects)
model.summary()

weights_to_load = WEIGHTS_DIR + 'weights.best.h5'

if os.path.exists(weights_to_load):
    model.load_weights(weights_to_load)

try:
    if AUGMENTATION:
        data_gen = ImageDataGenerator(rotation_range=20,
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      zoom_range=0.1,
                                      horizontal_flip=True)
import matplotlib.pyplot as plt
import numpy as np

from bottleneck import Bottleneck
from cnn import build_cnn
from identification import get_scores, calc_metrics

WEIGHTS_DIR = 'data/weights/'
BATCH_SIZE = 32

dev_x = np.load('data/dev_x.npy')

with open('data/meta.json', 'r') as f:
    meta = json.load(f)

model = build_cnn(227, meta['n_subjects'])

weights_to_load = WEIGHTS_DIR + 'weights.best.h5'
model.load_weights(weights_to_load)

bottleneck = Bottleneck(model, ~1)
dev_y = bottleneck.predict(dev_x, batch_size=BATCH_SIZE)

protocol = np.load('data/dev_protocol.npy')
tsc, isc = get_scores(dev_y, protocol)
eer, fars, frrs, dists = calc_metrics(tsc, isc)

print('EER: {}'.format(eer * 100))

plt.figure()
plt.hist(tsc, 20, color='g', normed=True, alpha=0.3)