Beispiel #1
0
                  (epoch, duration, avg_loss /
                   (total_batch), avg_r / total_batch, tr_val))

            train_embedding = model1.eval(feed_dict={
                anchor: X_train,
                dropout_f: 1.0
            })
            test_embedding = model1.eval(feed_dict={
                anchor: X_test,
                dropout_f: 1.0
            })
            val_embedding = model1.eval(feed_dict={
                anchor: X_val,
                dropout_f: 1.0
            })
            accuracy = evaluate_test_embedding(train_embedding, y_train,
                                               test_embedding, y_test)
            val_accuracy = evaluate_test_embedding(train_embedding, y_train,
                                                   val_embedding, y_val)

            print('Accuracy given NN approach %0.2f' % (100 * accuracy))
            print('Val Accuracy given NN approach %0.2f' %
                  (100 * val_accuracy))

            last_vals[(epoch / 100) % patience_window] = val_accuracy
            if last_vals.count(last_vals[0]) == len(last_vals):
                early_stopping = True
        """
        if early_stopping:
            print 'Stopping early!'
            break
        """
Beispiel #2
0

        #print('epoch %d loss %0.2f' %(epoch,avg_loss/total_batch))
        duration = time.time() - start_time
        print('epoch %d  time: %f loss %0.5f r_loss %0.5f c_loss %0.5f acc %0.2f' %(epoch,duration,avg_loss/(total_batch), avg_r/total_batch, avg_c/total_batch, avg_acc/total_batch))
    y = np.reshape(tr_y,(tr_y.shape[0],1))
    predict=distance.eval(feed_dict={images_L:tr_pairs[:,0],images_R:tr_pairs[:,1],labels:y,dropout_f:1.0})
    tr_acc = compute_accuracy(predict,y)
    print('Accuracy training set %0.2f' % (100 * tr_acc))

    # Test model
    predict=distance.eval(feed_dict={images_L:te_pairs[:,0],images_R:te_pairs[:,1],labels:y,dropout_f:1.0})
    y = np.reshape(te_y,(te_y.shape[0],1))
    te_acc = compute_accuracy(predict,y)
    print('Accuracy test set %0.2f' % (100 * te_acc))

    train_embedding=model1.eval(feed_dict={images_L:X_train,dropout_f:1.0})

    for coord, label in zip(train_embedding, y_train):
        f1.write(' '.join([str(a) for a in coord]) + "\n")
        f2.write(str(label) + "\n")

    test_embedding = model1.eval(feed_dict={images_L:X_test,dropout_f:1.0})

    for coord, label in zip(test_embedding, y_test):
        f1_t.write(' '.join([str(a) for a in coord]) + "\n")
        f2_t.write(str(label) + "\n")

    accuracy = evaluate_test_embedding(train_embedding, y_train, test_embedding, y_test)
    print('Accuracy given NN approach %0.2f' %(100*accuracy))
Beispiel #3
0
def test_model(dataset):
    tf.reset_default_graph()
    ucr_dataset = UCRDataset("../../../ucr_data/" + dataset) 

    X_train = ucr_dataset.Xtrain
    y_train = ucr_dataset.Ytrain 
    X_val = ucr_dataset.Xtest[:2]
    y_val = np.expand_dims(ucr_dataset.Ytest[:2], 1)
    X_test = ucr_dataset.Xtest[2:]
    y_test = np.expand_dims(ucr_dataset.Ytest[2:], 1)

    labels = np.unique(y_train)
    r=4
    digit_indices = [np.where(y_train == i)[0] for i in labels]
    tr_trips = create_triplets(X_train, digit_indices, labels)
    tr_trip_idxs = create_triplet_idxs(X_train, digit_indices, labels)

    digit_indices = [np.where(y_val == i)[0] for i in labels]
    val_trips = create_triplets(X_val, digit_indices, labels)
    val_trip_idxs = create_triplet_idxs(X_val, digit_indices, labels)

    digit_indices = [np.where(y_test == i)[0] for i in labels]
    te_trips = create_triplets(X_test, digit_indices, labels)

    # Epoch interval to evaluate early stopping
    es_epochs = 50

    #p = np.random.permutation(len(tr_trips))
    #tr_trips = tr_trips[p]
    #tr_trip_idxs = tr_trip_idxs[p]

    #X_train = normalize_rows(X_train)
    #X_test = normalize_rows(X_test)

    ts_length = len(X_train[0])
    batch_size = 24
    global_step = tf.Variable(0,trainable=False)
    starter_learning_rate = 0.001
    learning_rate = tf.train.exponential_decay(starter_learning_rate,global_step,10,0.1,staircase=True)
    # create training+test positive and negative pairs
    anchor = tf.placeholder(tf.float32,shape=([None,ts_length]),name='L')
    same = tf.placeholder(tf.float32,shape=([None,ts_length]),name='R')
    different = tf.placeholder(tf.float32,shape=([None,ts_length]),name='R')
    labels = tf.placeholder(tf.float32,shape=([None,1]),name='gt')
    dropout_f = tf.placeholder("float")
    with tf.variable_scope("siamese") as scope:
        model1= build_model_mlp(anchor,dropout_f, ts_length)
        scope.reuse_variables()
        model2 = build_model_mlp(same,dropout_f, ts_length)
        scope.reuse_variables()
        model3 = build_model_mlp(different,dropout_f, ts_length)


    distance  = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(model1,model2),2),1,keep_dims=True))
    loss = triplet_loss(model1, model2, model3) + regularizer(model1, model2, model3)
    regularization = regularizer(model1, model2, model3)


    t_vars = tf.trainable_variables()
    d_vars  = [var for var in t_vars if 'l' in var.name]
    batch = tf.Variable(0)
    optimizer = tf.train.AdamOptimizer(learning_rate = 0.00010).minimize(loss)

    f1 = open('X_output.txt', 'w')
    f2 = open('X_labels.txt', 'w')
    f1_t = open('X_output_test.txt', 'w')
    f2_t = open('X_labels_test.txt', 'w')
    best_val_acc = 0
    early_stopping = False 
    best_epoch = 0
    with tf.Session() as sess:
    #sess.run(init)
        tf.global_variables_initializer().run()
        # Training cycle
        for epoch in range(10000):
            #if early_stopping:
            #    break 
            avg_loss = 0.
            avg_r = 0.
            total_batch = int(np.ceil(tr_trips.shape[0]/float(batch_size)))
            anchor_embedding = model1.eval(feed_dict={anchor:tr_trips[:,0],dropout_f:1.0})
            same_embedding = model1.eval(feed_dict={anchor:tr_trips[:,1],dropout_f:1.0})
            different_embedding = model1.eval(feed_dict={anchor:tr_trips[:,2],dropout_f:1.0})
            #hard_trips = get_hardest_triplets(tr_trips, anchor_embedding, same_embedding, different_embedding, tr_trips.shape[0]/2)
            hard_trips = tr_trips
            start_time = time.time()
            # Loop over all batches
            for i in range(total_batch):
                s  = i * batch_size
                e = (i+1) *batch_size
                # Fit training using batch data
                ainput1, ainput2, ainput3 = next_batch(s, e, tr_trips)
                input1,input2,input3 =next_batch_from_idx(s,e,tr_trip_idxs,X_train)
                pdb.set_trace()

                _,loss_value,predict, r_loss=sess.run([optimizer,loss,distance, regularization], feed_dict={anchor:input1,same:input2,different:input3,dropout_f:1.0})
                if math.isnan(loss_value):
                    pdb.set_trace()
                avg_loss += loss_value
                avg_r += r_loss
            duration = time.time() - start_time


            if epoch % 500 == 0:
                train_embedding=model1.eval(feed_dict={anchor:X_train,dropout_f:1.0})
                test_embedding = model1.eval(feed_dict={anchor:X_test,dropout_f:1.0})
                val_embedding = model1.eval(feed_dict={anchor:X_val,dropout_f:1.0})

                anchor_embedding=model1.eval(feed_dict={anchor:tr_trips[:,0],dropout_f:1.0})
                same_embedding=model1.eval(feed_dict={anchor:tr_trips[:,1],dropout_f:1.0})
                diff_embedding=model1.eval(feed_dict={anchor:tr_trips[:,2],dropout_f:1.0})
                accuracy = evaluate_test_embedding(train_embedding, y_train, test_embedding, y_test)
                other_acc = compute_accuracy(hard_trips)
                te_other_acc = compute_accuracy(te_trips)
                print('epoch %d loss %0.2f' %(epoch,avg_loss/total_batch))
                print('Accuracy given NN approach %0.2f' %(100*accuracy))
                #accuracy = compute_accuracy(val_trips)
                #print 'Validation Accuracy: ', accuracy

            # Early stopping
            if epoch % es_epochs == 0:
                train_embedding=model1.eval(feed_dict={anchor:X_train,dropout_f:1.0})
                val_embedding = model1.eval(feed_dict={anchor:X_val,dropout_f:1.0})
                accuracy = evaluate_test_embedding(train_embedding, y_train, val_embedding, y_val)
                #accuracy = compute_accuracy(val_trips)
                if accuracy > best_val_acc:
                    best_val_acc = accuracy
                    best_epoch = epoch 
                    if best_val_acc == 1.0:
                        early_stopping = True




        predict_same=distance.eval(feed_dict={anchor:tr_trips[:,0],same:tr_trips[:,1],dropout_f:1.0})
        predict_diff=distance.eval(feed_dict={anchor:tr_trips[:,0],same:tr_trips[:,2],dropout_f:1.0})
        tr_acc = float(len(np.where(predict_same-predict_diff < 0)[0]))/len(predict_same)
        print "Training accuracy: ", tr_acc

        

        train_embedding=model1.eval(feed_dict={anchor:X_train,dropout_f:1.0})
        test_embedding = model1.eval(feed_dict={anchor:X_test,dropout_f:1.0})
        accuracy = evaluate_test_embedding(train_embedding, y_train, test_embedding, y_test)
        print('Accuracy given NN approach %0.2f' %(100*accuracy))
        print 'Best Epoch: ', best_epoch

        for coord, label in zip(train_embedding, y_train):
            f1.write(' '.join([str(a) for a in coord]) + "\n")
            f2.write(str(label) + "\n")

        for coord, label in zip(test_embedding, y_test):
            f1_t.write(' '.join([str(a) for a in coord]) + "\n")
            f2_t.write(str(label) + "\n")
  y_train = np.array(y_train)
  y_test = np.array(y_test)

  X_train = np.expand_dims(X_train, 3)
  X_test = np.expand_dims(X_test, 3)

  N = X_train.shape[0]
  Ntest = X_test.shape[0]
  D = X_train.shape[1]
  D_ts = X_train.shape[2]
  img_shape = X_train.shape[1:]
  learning_rate = 1e-6
  labels = np.unique(y_train)
  digit_indices = [np.where(y_train == i)[0] for i in labels]
  tr_pair_idxs, tr_y = create_pair_idxs(X_train, digit_indices, labels)

  adam = Adam(lr=0.000001)

  print(img_shape)
  t_model = get_tower_cnn_model(img_shape)
  s_model = siamese_model(img_shape, t_model)
  s_model.compile( optimizer=adam, loss=contrastive_loss)
  #s_model.fit([tr_pairs[:,0,:,:], tr_pairs[:,1,:,:]], tr_y, epochs=100, batch_size=50, validation_split=.05)
  n_batches_per_epoch = tr_pair_idxs.shape[0]/batch_size/200
  s_model.fit_generator(gen_batch(X_train, tr_pair_idxs, tr_y, batch_size),steps_per_epoch=n_batches_per_epoch, nb_epoch=1)

  train_embedding = t_model.predict(X_train)
  test_embedding = t_model.predict(X_test)
  print(evaluate_test_embedding(train_embedding, y_train, test_embedding, y_test))

Beispiel #5
0
def run_dae_stackedconv_model(dataset, embedding_size_pctg, kernel_size_pctg,
                              n_filters, pool_size_pctg):
    if dataset in UCR_DATASETS:
        UCR_DATA_DIR = os.path.expanduser(
            '~/Documents/MEng/time_series/ucr_data/')
        ucr_dataset = UCRDataset(UCR_DATA_DIR + dataset)
        X_train = ucr_dataset.Xtrain
        y_train = ucr_dataset.Ytrain

        X_val = ucr_dataset.Xtest[:2]
        y_val = ucr_dataset.Ytest[:2]
        X_test = ucr_dataset.Xtest[2:]
        y_test = ucr_dataset.Ytest[2:]
        N = X_train.shape[0]
        Ntest = X_test.shape[0]
        D = 1  # Number of varialbes represented in time series
        D_ts = X_train.shape[1]
        X_train = np.expand_dims(X_train, 2)
        X_test = np.expand_dims(X_test, 2)

        n = max([
            np.max([v.shape[0] for v in X_train]),
            np.max([v.shape[0] for v in X_test])
        ])
        stride_size = STRIDE_SIZE
        if n % stride_size != 0:
            n += (stride_size - n % stride_size)

        X_train = standardize_ts_lengths_1(X_train, n)
        X_test = standardize_ts_lengths_1(X_test, n)

    else:
        dataset_list = cv_splits_for_dataset(dataset)
        X_train = dataset_list[0].X_train
        y_train = dataset_list[0].y_train
        X_test = dataset_list[0].X_test
        y_test = dataset_list[0].y_test

        n = max([
            np.max([v.shape[0] for v in X_train]),
            np.max([v.shape[0] for v in X_test])
        ])
        if n % STRIDE_SIZE != 0:
            n += (STRIDE_SIZE - n % STRIDE_SIZE)

        X_train = standardize_ts_lengths_1(X_train, n)
        X_test = standardize_ts_lengths_1(X_test, n)
        N = X_train.shape[0]
        Ntest = X_test.shape[0]
        D = X_train.shape[1]
        D_ts = X_train.shape[2]

    #X_train = normalize_rows(X_train) - np.mean(normalize_rows(X_train), axis=0)
    #X_test = normalize_rows(X_test) - np.mean(normalize_rows(X_test), axis=0)

    all_X = np.concatenate((X_train, X_test))
    all_y = np.concatenate((y_train, y_test))

    n_classes = len(np.unique(y_train))
    #X_train = np.expand_dims(X_train, 3)
    #X_test = np.expand_dims(X_test, 3)

    N = X_train.shape[0]
    Ntest = X_test.shape[0]
    D = X_train.shape[1]
    img_shape = X_train.shape[1:]
    b_size = 10
    kernel_size = int(kernel_size_pctg * D)
    pool_size = max(int(pool_size_pctg * D), 2)
    #embedding_size = len(np.unique(all_y))
    embedding_size = max(embedding_size_pctg, 2)
    #embedding_size = int(embedding_size_pctg*D)
    #embedding_size = 4
    n_filters = len(np.unique(all_y))
    #n_filters = 20
    print 'Pool Size: ', pool_size
    print 'Kernel Size: ', kernel_size
    print 'Embedding Size: ', embedding_size

    model, encoder, filters = dae_stackedconv_model(img_shape, kernel_size,
                                                    embedding_size, n_filters,
                                                    pool_size)
    model.summary()
    adam = Adam(lr=0.0001)
    filepath = "weights-improvement-{epoch:02d}.hdf5"
    checkpointer = ModelCheckpoint(filepath,
                                   verbose=1,
                                   save_weights_only=True,
                                   period=700)
    model.compile(optimizer=adam, loss='mean_absolute_error')
    train_start = time.clock()
    model.fit(all_X,
              all_X,
              epochs=N_EPOCHS,
              batch_size=b_size,
              verbose=VERBOSE_VAL,
              callbacks=[checkpointer])
    train_finish = time.clock()

    weights = filters.layers[-1].get_weights()[0]
    #plot_filters(n_filters, weights)
    test_embedding = encoder.predict(X_test)
    train_embedding = encoder.predict(X_train)

    inf_start = time.clock()
    all_embedding = encoder.predict(all_X)
    inf_finish = time.clock()

    test_reconstruct = model.predict(X_test)
    train_reconstruct = model.predict(X_train)

    test_kmeans = KMeans(n_clusters=n_classes,
                         random_state=0).fit(test_embedding).labels_
    train_kmeans = KMeans(n_clusters=n_classes,
                          random_state=0).fit(train_embedding).labels_
    all_kmeans = KMeans(n_clusters=n_classes,
                        random_state=0).fit(all_embedding).labels_

    #debug_plots(train_embedding, y_train, test_embedding, y_test, img_name='train_vs_test.png')
    #debug_plot(all_embedding, all_y)
    all_X = np.reshape(all_X,
                       (all_X.shape[0], all_X.shape[1] * all_X.shape[2]))
    orig_X_kmeans = KMeans(n_clusters=n_classes,
                           random_state=0).fit(np.squeeze(all_X)).labels_

    print evaluate_test_embedding(train_embedding, y_train, test_embedding,
                                  y_test)
    print 'All X KMeans: '
    all_rand_ind, all_nmi = eval_clustering(all_kmeans, all_y)
    print 'Test KMeans: '
    test_rand_ind = eval_clustering(test_kmeans, y_test)
    print 'Train KMeans: '
    train_rand_ind, train_nmi = eval_clustering(train_kmeans, y_train)

    print '\n\nOriginal X KMeans: '
    orig_X_ri, orig_X_nmi = eval_clustering(orig_X_kmeans, all_y)

    D = pairwise_distances(all_embedding, metric='euclidean')
    #M, C = kmedoids.kMedoids(D, embedding_size)
    #new_labels = np.zeros(all_y.shape)
    #for i in range(len(C)):
    #  elems = C[i]
    #  for elem in elems:
    #    new_labels[elem] = i

    # print '\nK Medoids: '
    #eval_clustering(new_labels, all_y)
    return all_rand_ind, train_finish - train_start, inf_finish - inf_start
Beispiel #6
0
def test_model(pool_pctg, layer_size_1, layer_size_2):
    tf.reset_default_graph()

    X_train, y_train, X_test, y_test = loadEEG()
    X_val = X_test[:2]
    y_val = y_test[:2]
    X_test = X_test[2:]
    y_test = y_test[2:]

    labels = np.array([0, 1, 2, 3, 4, 5])
    digit_indices = [np.where(y_train == i)[0] for i in labels]
    tr_trip_idxs = create_triplet_idxs(X_train, digit_indices, labels)
    digit_indices = [np.where(y_test == i)[0] for i in labels]
    te_trip_idxs = create_triplet_idxs(X_test, digit_indices, labels)
    print 'There are ', len(tr_trip_idxs), ' training examples!'
    #p = np.random.permutation(len(tr_trip_idxs))
    #tr_trip_idxs = tr_trip_idxs[p]

    # Initializing the variables
    # the data, shuffled and split between train and test sets
    #"""
    X_train = normalize_rows(X_train)
    X_test = normalize_rows(X_test)
    #"""
    D = X_train.shape[1]
    ts_length = X_train.shape[2]
    global_step = tf.Variable(0, trainable=False)
    starter_learning_rate = 0.001
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step,
                                               10,
                                               0.1,
                                               staircase=True)
    pool_width = pool_pctg * ts_length

    # create training+test positive and negative pairs
    anchor = tf.placeholder(tf.float32, shape=([None, D, ts_length]), name='L')
    same = tf.placeholder(tf.float32, shape=([None, D, ts_length]), name='R')
    different = tf.placeholder(tf.float32,
                               shape=([None, D, ts_length]),
                               name='R')
    labels = tf.placeholder(tf.float32, shape=([None]), name='gt')

    dropout_f = tf.placeholder("float")
    bn_train = tf.placeholder(tf.bool)

    with tf.variable_scope("siamese") as scope:
        model1, filters = build_conv_net(anchor, bn_train, dropout_f,
                                         ts_length, embedding_size, pool_width,
                                         layer_size_1, layer_size_2)
        scope.reuse_variables()
        model2, _ = build_conv_net(same, bn_train, dropout_f, ts_length,
                                   embedding_size, pool_width, layer_size_1,
                                   layer_size_2)
        scope.reuse_variables()
        model3, _ = build_conv_net(different, bn_train, dropout_f, ts_length,
                                   embedding_size, pool_width, layer_size_1,
                                   layer_size_2)

    distance = tf.sqrt(
        tf.reduce_sum(tf.pow(tf.subtract(model1, model2), 2),
                      1,
                      keep_dims=True))
    loss = triplet_loss(model1, model2,
                        model3)  #+ regularizer(model1, model2, model3)
    #loss = new_new_loss(model1, model2, model3) + regularizer(model1, model2, model3)

    debug_val = debug_loss(model1, model2, model3)
    regularization = regularizer(model1, model2, model3)
    tr_loss = triplet_loss(model1, model2, model3)

    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'l' in var.name]
    batch = tf.Variable(0)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)

    f1 = open('X_output.txt', 'w')
    f2 = open('X_labels.txt', 'w')
    f1_t = open('X_output_test.txt', 'w')
    f2_t = open('X_labels_test.txt', 'w')
    patience_window = 10
    early_stopping = False
    last_vals = [0 for i in range(patience_window)]
    skippable_batches = []

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        # Training cycle
        for epoch in range(400):
            avg_loss = 0.
            avg_r = 0.
            total_batch = int(
                np.ceil(tr_trip_idxs.shape[0] / float(batch_size)))
            start_time = time.time()
            # Loop over all batches
            loss_values = []
            avg_tr = 0.
            for i in range(total_batch):
                if i in skippable_batches:
                    continue
                s = i * batch_size
                e = (i + 1) * batch_size
                # Fit training using batch data
                input1, input2, input3 = next_batch_from_idx(
                    s, e, tr_trip_idxs, X_train)
                #anchor_embedding=model1.eval(feed_dict={anchor:input1,dropout_f:1.0})
                #same_embedding=model1.eval(feed_dict={anchor:input2,dropout_f:1.0})
                #diff_embedding=model1.eval(feed_dict={anchor:input3,dropout_f:1.0})
                _, loss_value, predict, r_loss, tr_val, d = sess.run(
                    [
                        optimizer, loss, distance, regularization, tr_loss,
                        debug_val
                    ],
                    feed_dict={
                        anchor: input1,
                        same: input2,
                        different: input3,
                        dropout_f: 1.0
                    })
                print loss_value
                if loss_value < .001:
                    skippable_batches.append(i)
                    if i % 30 == 0:
                        train_embedding = model1.eval(feed_dict={
                            anchor: X_train,
                            dropout_f: 1.0
                        })
                        test_embedding = model1.eval(feed_dict={
                            anchor: X_test,
                            dropout_f: 1.0
                        })
                        #val_embedding = model1.eval(feed_dict={anchor:X_val,dropout_f:1.0})
                        accuracy = evaluate_test_embedding(
                            train_embedding, y_train, test_embedding, y_test)
                        print 'ACCURACY: ', accuracy, ' EPOCH: ', epoch

                #pdb.set_trace()
                if math.isnan(loss_value):
                    pdb.set_trace()
                avg_loss += loss_value
                loss_values.append(loss_value)
                avg_r += r_loss
                avg_tr += tr_val

            #print('epoch %d loss %0.2f' %(epoch,avg_loss/total_batch))
            print('epoch %d  time: %f loss %0.5f r_loss %0.5f tr_loss %0.5f' %
                  (epoch, duration, avg_loss /
                   (total_batch), avg_r / total_batch, tr_val))

            duration = time.time() - start_time
            if epoch % 10 == 0:
                tr_acc = compute_accuracy(tr_trip_idxs)
                print "Training accuracy: ", tr_acc

                print(
                    'epoch %d  time: %f loss %0.5f r_loss %0.5f tr_loss %0.5f'
                    % (epoch, duration, avg_loss /
                       (total_batch), avg_r / total_batch, tr_val))
                train_embedding = model1.eval(feed_dict={
                    anchor: X_train,
                    dropout_f: 1.0
                })
                test_embedding = model1.eval(feed_dict={
                    anchor: X_test,
                    dropout_f: 1.0
                })
                #val_embedding = model1.eval(feed_dict={anchor:X_val,dropout_f:1.0})
                accuracy = evaluate_test_embedding(train_embedding, y_train,
                                                   test_embedding, y_test)
                #val_accuracy = evaluate_test_embedding(train_embedding, y_train, val_embedding, y_val)

                print('Accuracy given NN approach %0.2f' % (100 * accuracy))
                #print('Val Accuracy given NN approach %0.2f' %(100*val_accuracy))

                last_vals[(epoch / 100) % patience_window] = val_accuracy
                if last_vals.count(last_vals[0]) == len(last_vals):
                    early_stopping = True
            """
            if early_stopping:
                print 'Stopping early!'
                break
            """

        train_embedding = model1.eval(feed_dict={
            anchor: X_train,
            dropout_f: 1.0
        })
        test_embedding = model1.eval(feed_dict={
            anchor: X_test,
            dropout_f: 1.0
        })
        accuracy = evaluate_test_embedding(train_embedding, y_train,
                                           test_embedding, y_test)
        print('Accuracy given NN approach %0.2f' % (100 * accuracy))

        filter1_weights = sess.run(filters[0])
        for coord, label in zip(train_embedding, y_train):
            f1.write(' '.join([str(a) for a in coord]) + "\n")
            f2.write(str(label) + "\n")

        for coord, label in zip(test_embedding, y_test):
            f1_t.write(' '.join([str(a) for a in coord]) + "\n")
            f2_t.write(str(label) + "\n")

    return accuracy
def test_model(dataset, pool_pctg=.1, layer_size=40, stride_pct=-1):
    tf.reset_default_graph()
    """
    X_train, y_train, X_test, y_test = loadEEG()
    X_val = X_test[:2]
    y_val = y_test[:2]
    X_test = X_test[2:]
    y_test = y_test[2:]
    """
    def compute_accuracy(X, triplet_idxs):
        n_correct = 0.0
        for triplet in triplet_idxs:
            a = np.expand_dims(X[triplet[0]], 0)
            s = np.expand_dims(X[triplet[1]], 0)
            d = np.expand_dims(X[triplet[2]], 0)
            predict_same = distance.eval(feed_dict={
                anchor: a,
                same: s,
                dropout_f: 1.0
            })
            predict_diff = distance.eval(feed_dict={
                anchor: a,
                same: d,
                dropout_f: 1.0
            })
            if predict_same[0][0] < predict_diff[0][0]:
                n_correct += 1.0
        return n_correct / len(triplet_idxs)

    dataset_list = cv_splits_for_dataset(dataset)
    if len(dataset_list) <= n_fold:
        n_fold = 0
    X_train = dataset_list[n_fold].X_train
    y_train = dataset_list[n_fold].y_train
    X_test = dataset_list[n_fold].X_test
    y_test = dataset_list[n_fold].y_test

    if dataset == 'trajectories':
        X_train = [g.T for g in X_train]
        X_test = [g.T for g in X_test]

    n = max([
        np.max([v.shape[0] for v in X_train]),
        np.max([v.shape[0] for v in X_test])
    ])
    X_train = standardize_ts_lengths(X_train, n)
    X_test = standardize_ts_lengths(X_test, n)
    y_train = np.array(y_train)
    y_test = np.array(y_test)

    X_train = normalize_rows(X_train)
    X_test = normalize_rows(X_test)

    labels = np.unique(y_train)
    digit_indices = [np.where(y_train == i)[0] for i in labels]
    tr_trip_idxs = create_triplet_idxs(X_train, digit_indices, labels)
    digit_indices = [np.where(y_test == i)[0] for i in labels]
    te_trip_idxs = create_triplet_idxs(X_test, digit_indices, labels)

    N = X_train.shape[0]
    Ntest = X_test.shape[0]
    D = X_train.shape[1]

    ts_length = X_train.shape[2]
    global_step = tf.Variable(0, trainable=False)
    starter_learning_rate = 0.001
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step,
                                               10,
                                               0.1,
                                               staircase=True)
    pool_width = pool_pctg * ts_length

    labels = np.unique(y_train)
    digit_indices = [np.where(y_train == i)[0] for i in labels]
    tr_pairs, tr_y = create_pairs(X_train, digit_indices, labels)
    pos_ind = np.where(tr_y == SAME_LABEL)[0]
    neg_ind = np.where(tr_y == NEG_LABEL)[0]

    # create training+test positive and negative pairs
    anchor = tf.placeholder(tf.float32, shape=([None, D, ts_length]), name='L')
    same = tf.placeholder(tf.float32, shape=([None, D, ts_length]), name='R')
    different = tf.placeholder(tf.float32,
                               shape=([None, D, ts_length]),
                               name='R')
    labels = tf.placeholder(tf.float32, shape=([None]), name='gt')

    #digit_indices = [np.where(y_val == i)[0] for i in labels]
    #val_pairs, val_y = create_pairs(X_val, digit_indices, labels)
    digit_indices = [np.where(y_test == i)[0] for i in labels]
    te_pairs, te_y = create_pairs(X_test, digit_indices, labels)

    # Initializing the variables
    r = 1
    N = tr_pairs.shape[0]
    # the data, shuffled and split between train and test sets

    batch_size = 24
    num_pos = 30
    num_neg = batch_size - num_pos
    global_step = tf.Variable(0, trainable=False)
    starter_learning_rate = 0.01
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step,
                                               10,
                                               0.1,
                                               staircase=True)
    # create training+test positive and negative pairs
    images_L = tf.placeholder(tf.float32, shape=([None, ts_length]), name='L')
    images_R = tf.placeholder(tf.float32, shape=([None, ts_length]), name='R')
    labels = tf.placeholder(tf.float32, shape=([None, 1]), name='gt')
    embedding_size = 10
    dropout_f = tf.placeholder("float")
    bn_train = tf.placeholder(tf.bool)

    pool_width = pool_pctg * ts_length
    with tf.variable_scope("siamese") as scope:
        model1, filters = build_conv_net(images_L, bn_train, dropout_f,
                                         ts_length, embedding_size, pool_width)
        scope.reuse_variables()
        model2, _ = build_conv_net(images_R, bn_train, dropout_f, ts_length,
                                   embedding_size, pool_width)

    normalize_model1 = tf.nn.l2_normalize(model1, 0)
    normalize_model2 = tf.nn.l2_normalize(model2, 0)
    cos_similarity = tf.reduce_sum(tf.multiply(normalize_model1,
                                               normalize_model1),
                                   1,
                                   keep_dims=True)

    distance = tf.sqrt(
        tf.reduce_sum(tf.pow(tf.subtract(model1, model2), 2),
                      1,
                      keep_dims=True))
    #distance = 1-scipy.spatial.distance.cosine(model1, model2)
    #loss = contrastive_loss(labels,distance) + regularizer(model1, model2, r)
    #loss = c_loss(labels, model1, model2) + regularizer(model1, model2, r)
    loss = contrastive_loss(labels, distance) + regularizer(model1, model2, r)

    #ugh = c_loss(labels, model1, model2)
    ugh = contrastive_loss(labels, distance)
    #loss = contrastive_loss(labels,distance) + regularizer(model1, model2, r)
    regularization = regularizer(model1, model2, r)
    #contrastice loss
    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'l' in var.name]
    batch = tf.Variable(0)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss)
    #optimizer = tf.train.RMSPropOptimizer(0.00001,momentum=0.9,epsilon=1e-6).minimize(loss)
    # Launch the graph

    f1 = open('X_output.txt', 'w')
    f2 = open('X_labels.txt', 'w')
    f1_t = open('X_output_test.txt', 'w')
    f2_t = open('X_labels_test.txt', 'w')

    #filter2_summary = tf.summary.image("Filter_2", filters[1])
    patience_window = 5
    last_vals = [0 for i in range(patience_window)]
    early_stopping = False
    with tf.Session() as sess:

        tf.global_variables_initializer().run()
        summary_writer = tf.summary.FileWriter('/tmp/logs', sess.graph_def)
        #merged = tf.summary.merge_all()

        # Training cycle
        step = 0
        perf_collect = [[], []]
        for epoch in range(700):

            total_c_loss = 0
            for i in range(max(int(np.ceil(N / float(batch_size))), 1)):

                batch_ind = np.arange(i * batch_size,
                                      min((i + 1) * batch_size, N - 1))
                #pos_ind = np.arange(i*num_pos, min((i+1)*num_pos,N-1))
                #neg_ind = np.arange(i*num_neg, min((i+1)*num_neg,N-1))

                #pos_ind = np.random.choice( np.arange(N), num_pos)
                #neg_ind = np.random.choice( np.arange(N), num_neg)
                #batch_ind = np.concatenate((pos_ind, neg_ind))
                #print('VAL ACCURACY %0.2f' % perf_collect[1][-1])

                input1, input2, y = tr_pairs[batch_ind, [0]], tr_pairs[
                    batch_ind, 1], tr_y[batch_ind, np.newaxis]
                _, loss_value, predict, r_loss, c_loss = sess.run(
                    [optimizer, loss, distance, regularization, ugh],
                    feed_dict={
                        images_L: input1,
                        images_R: input2,
                        labels: y,
                        dropout_f: 1.0,
                        bn_train: True
                    })
                total_c_loss += c_loss
                if math.isnan(c_loss):
                    pdb.set_trace()

            if epoch % 400 == 0:

                #tr_acc = compute_accuracy(predict,y)

                print('epoch %d loss %0.5f r_loss %0.5f c_loss %0.5f ' %
                      (epoch, loss_value, r_loss, total_c_loss))

                train_embedding = model1.eval(feed_dict={
                    images_L: X_train,
                    dropout_f: 1.0,
                    bn_train: False
                })
                test_embedding = model1.eval(feed_dict={
                    images_L: X_test,
                    dropout_f: 1.0,
                    bn_train: False
                })
                val_embedding = model1.eval(feed_dict={
                    images_L: X_val,
                    dropout_f: 1.0,
                    bn_train: False
                })
                accuracy = evaluate_test_embedding(train_embedding, y_train,
                                                   test_embedding, y_test)
                val_accuracy = evaluate_test_embedding(train_embedding,
                                                       y_train, val_embedding,
                                                       y_val)
                last_vals[(epoch / 100) % patience_window] = val_accuracy
                if last_vals.count(last_vals[0]) == len(last_vals) and i > 900:
                    early_stopping = True
                print('Accuracy given NN approach %0.2f' % (100 * accuracy))
                print('Val Accuracy given NN approach %0.2f' %
                      (100 * val_accuracy))
            """
            if early_stopping:
                print 'Stopping early'
                break
            """

            #print('epoch %d loss %0.2f' %(epoch,avg_loss/total_batch))

        # Test model
        """
        y = np.reshape(te_y,(te_y.shape[0],1))
        feature1=model1.eval(feed_dict={images_L:te_pairs[:,0],dropout_f:1.0, bn_train:False})
        feature2=model2.eval(feed_dict={images_R:te_pairs[:,1],dropout_f:1.0, bn_train:False})
        te_acc = compute_accuracy_features(feature1, feature2,te_y)
        print('Accuracy test set %0.2f' % (100 * te_acc))
        """
        train_embedding = model1.eval(feed_dict={
            images_L: X_train,
            dropout_f: 1.0,
            bn_train: False
        })
        test_embedding = model1.eval(feed_dict={
            images_L: X_test,
            dropout_f: 1.0,
            bn_train: False
        })

        accuracy = evaluate_test_embedding(train_embedding, y_train,
                                           test_embedding, y_test)
        print('Accuracy given NN approach %0.2f' % (100 * accuracy))
        return accuracy