Ejemplo n.º 1
0
print('Loading data...')
testset = np.loadtxt(DATA_DIR + '/ORIGINAL_SOUNDS_THREE_LABEL_DATA')
print('Load successful')

x_data = testset[50000:, 2:-4]  #Use frequency 100Hz~8000Hz
print(x_data.shape)

y_data = testset[50000:, 0]
y_data = np.reshape(y_data, [-1, 1])
print(y_data.shape)

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

with tf.Session() as sess:
    modelmaker = model.NetworkModel()
    _, Hypothesis_prob_load, _, saver = modelmaker.get_model(
        sess, './P2_LOADED', X, Y)
    prob_load = sess.run([Hypothesis_prob_load], feed_dict={X: x_data})

g = tf.Graph()
with g.as_default():
    X = tf.placeholder(tf.float32)
    Y = tf.placeholder(tf.float32)
    modelmaker = model.NetworkModel()

with tf.Session(graph=g) as sess:
    _, Hypothesis_prob_unload, _, _ = modelmaker.get_model(
        sess, './P2_UNLOADED', X, Y)
    prob_unload = sess.run([Hypothesis_prob_unload], feed_dict={X: x_data})
Ejemplo n.º 2
0
def main(argv=None):

    margin = 0.75
    BATCH_SIZE = 40
    ITERATION = 200000
    data_num = 16813
    train_anchor_data = tf.placeholder(tf.float32,
                                       shape=[BATCH_SIZE, 224, 224, 3],
                                       name='anchor')
    train_positive_data = tf.placeholder(tf.float32,
                                         shape=[BATCH_SIZE, 224, 224, 3],
                                         name='anchor')
    train_negative_data = tf.placeholder(tf.float32,
                                         shape=[BATCH_SIZE, 224, 224, 3],
                                         name='anchor')
    labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_probability')
    train_data = tf.concat(
        [train_anchor_data, train_positive_data, train_negative_data], axis=0)

    # vgg_train_anchor = model.NetworkModel(train_anchor_data, keep_prob=keep_prob)
    # vgg_train_positive = model.NetworkModel(train_positive_data, keep_prob=keep_prob, reuse=True)
    # vgg_train_negative = model.NetworkModel(train_negative_data, keep_prob=keep_prob, reuse=True)

    pre_logits = model.NetworkModel(train_data, keep_prob=keep_prob)
    logits = tf.nn.l2_normalize(pre_logits, 1, 1e-10, name='embeddings')
    # print(logits.get_shape().as_list())

    vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(
        tf.reshape(logits, [-1, 3, 1024]), 3, 1)
    # print(vgg_train_anchor.get_shape().as_list())
    # print(vgg_train_positive.get_shape().as_list())
    # print(vgg_train_negative.get_shape().as_list())

    loss, positives, negatives = utils.compute_triplet_loss(
        vgg_train_anchor, vgg_train_positive, vgg_train_negative, margin)

    batch = tf.Variable(0)
    learning_rate = tf.train.exponential_decay(0.001, batch * BATCH_SIZE,
                                               data_num, 0.95)

    optimizer_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss)
    Saver = tf.train.Saver()
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('positives', positives)
    tf.summary.scalar('negatives', negatives)
    merged = tf.summary.merge_all()

    data, labels = data_process.input_data()
    print(data.shape)
    print(labels.shape)
    dataShufflu = DataShuffle(data, labels)

    with tf.Session() as sess:
        train_write = tf.summary.FileWriter('./logs_tensorboard_3/',
                                            sess.graph)
        # test_write = tf.summary.FileWriter('./logs_tensorboard/triple/test/', sess.graph)

        sess.run(tf.global_variables_initializer())

        for step in range(ITERATION):
            batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
            batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)
            # print(batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive, batch_labels_negative)
            feed_dict = {
                train_anchor_data: batch_anchor,
                train_positive_data: batch_positive,
                train_negative_data: batch_negative,
                labels_anchor: batch_labels_anchor,
                labels_positive: batch_labels_positive,
                labels_negative: batch_labels_negative,
                keep_prob: 0.80
            }

            _, l, ls, summary = sess.run(
                [optimizer_op, loss, learning_rate, merged],
                feed_dict=feed_dict)

            print("%d the %s train reslut" % (step, datetime.datetime.now()))
            print('the triplet loss %g' % l)

            train_write.add_summary(summary, step)

            if step % 100 == 0:
                Saver.save(sess, './logs_tensorboard_3/')

        train_write.close()
Ejemplo n.º 3
0
def main(data, iter_num, labels, train=True):
    BATCH_SIZE = 2
    ITERATION = iter_num
    embedding_size = 1024
    CLASS_NUM = 30

    train_anchor_data = tf.placeholder(tf.float32,
                                       shape=[BATCH_SIZE, 224, 224, 3],
                                       name='anchor')

    labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_probability')

    pre_logits = model.NetworkModel(train_anchor_data, keep_prob=keep_prob)
    logits = tf.nn.l2_normalize(pre_logits, 1, 1e-12, name='embeddings')
    # print(logits.get_shape().as_list())
    data_train, train_labels = data, labels
    # data_train = data

    print(data.shape)
    print(labels.shape)
    Saver = tf.train.Saver()
    with tf.Session() as sess:
        # test_write = tf.summary.FileWriter('./logs_tensorboard/triple/test/', sess.graph)

        sess.run(tf.global_variables_initializer())
        Saver.restore(sess, './logs_tensorboard2/')

        emb_array = np.zeros(
            (ITERATION // BATCH_SIZE * BATCH_SIZE, embedding_size))
        print(np.shape(emb_array))

        for step in range(ITERATION // BATCH_SIZE):
            batch_anchor, batch_labels = data_train[step * BATCH_SIZE:(step + 1) * BATCH_SIZE], \
                                          train_labels[step * BATCH_SIZE:(step + 1) * BATCH_SIZE]
            # batch_anchor = data_train[step * BATCH_SIZE:(step + 1) * BATCH_SIZE]

            feed_dict = {
                train_anchor_data: batch_anchor,
                labels_anchor: batch_labels,
                keep_prob: 1.0
            }

            Logits = sess.run(logits, feed_dict=feed_dict)
            emb_array[step * BATCH_SIZE:(step + 1) * BATCH_SIZE, :] = Logits
            print('the triplet loss %g' % step)
    np.savetxt('valid_feature.txt', emb_array)
    if train:
        print('Training classifier')
        class_name = ['%2d' % (i) for i in range(CLASS_NUM)]
        print(class_name)
        # SVMmodel = SVC(kernel='rbf', probability=True)
        knn = KNeighborsClassifier(n_neighbors=30)
        print("fit !!!")
        # SVMmodel.fit(emb_array, labels[:ITERATION // BATCH_SIZE * BATCH_SIZE])
        knn.fit(emb_array, labels[:ITERATION // BATCH_SIZE * BATCH_SIZE])

        with open('train_model_pickle30', 'wb') as outfile:
            pickle.dump((knn, class_name), outfile)
            # pickle.dump((SVMmodel, class_name), outfile)
        print('Saved classifier model to file train_model_pickle')
    else:
        print('Testing classifier30')
        with open('train_model_pickle30', 'rb') as infile:
            (knn, class_name) = pickle.load(infile)
        print('Loaded classifier model from file train_model_pickle')

        predictions = knn.predict_proba(emb_array)
        np.savetxt('test_k_30_image_result.csv', predictions, fmt='%g')
        best_class_indices = np.argmax(predictions, axis=1)
        best_class_probabilities = predictions[
            np.arange(len(best_class_indices)), best_class_indices]

        for i in range(len(best_class_indices)):
            print('%4d %s %0.3f' % (i, class_name[best_class_indices[i]],
                                    best_class_probabilities[i]))
        accuracy = np.mean(np.equal(best_class_indices, labels))
        print("Accuracy %.3f" % accuracy)
Ejemplo n.º 4
0
def main(argv=None):
    with tf.Graph().as_default() as graph:

        Margin = 0.25
        BATCH_SIZE = 40
        ITERATION = 2000000
        data_num = 16640
        train_anchor_data = tf.placeholder(tf.float32,
                                           shape=[BATCH_SIZE, 224, 224, 3],
                                           name='anchor')
        train_positive_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 224, 224, 3],
                                             name='anchor')
        train_negative_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 224, 224, 3],
                                             name='anchor')
        # labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        # labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        # labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        tf.summary.image('input_image', train_anchor_data, 10)

        train_data = tf.concat(
            [train_anchor_data, train_positive_data, train_negative_data],
            axis=0)

        pre_logits1 = model.NetworkModel(train_data, keep_prob=0.80)
        # pre_logits2 = model.NetworkModel(train_positive_data, keep_prob=0.80, reuse=True)
        # pre_logits3 = model.NetworkModel(train_negative_data, keep_prob=0.80, reuse=True)

        pre_logits1 = tf.nn.l2_normalize(pre_logits1,
                                         1,
                                         1e-10,
                                         name='embeddings')
        # pre_logits2 = tf.nn.l2_normalize(pre_logits2, 1, 1e-10, name='embeddings')
        # pre_logits3 = tf.nn.l2_normalize(pre_logits3, 1, 1e-10, name='embeddings')
        # print(logits.get_shape().as_list())

        vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(
            tf.reshape(pre_logits1, [-1, 3, 256]), 3, 1)

        loss, positives, negatives = utils.compute_triplet_loss(
            vgg_train_anchor, vgg_train_positive, vgg_train_negative, Margin)
        regularizer_losses = tf.add_n(tf.get_collection("losses"))
        total_loss = loss + regularizer_losses
        # print(total_loss)

        # num_batches_per_epoch = int(data_num / BATCH_SIZE * 3)
        # num_steps_per_epoch = num_batches_per_epoch  # Because one step is one batch processed
        # decay_steps = int(2 * num_steps_per_epoch)
        global_step = tf.Variable(tf.constant(0, dtype=tf.int64))
        lr = tf.train.exponential_decay(learning_rate=0.001,
                                        global_step=global_step,
                                        decay_steps=data_num // BATCH_SIZE,
                                        decay_rate=0.96,
                                        staircase=True)

        train_op = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(total_loss, global_step=global_step)

        Saver = tf.train.Saver()

        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('learning_rate', lr)
        tf.summary.scalar('positives', positives)
        tf.summary.scalar('negatives', negatives)
        # tf.summary.scalar('pos', pos)
        # tf.summary.scalar('neg', neg)
        merged = tf.summary.merge_all()

        data, labels = data_process.input_data()
        print(data.shape)
        print(labels.shape)
        dataShufflu = DataShuffle(data, labels)

        with tf.Session() as sess:
            train_write = tf.summary.FileWriter(
                '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/',
                sess.graph)

            sess.run(tf.global_variables_initializer())
            # Saver.restore(sess, '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/')
            for step in range(ITERATION):
                batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
                batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)

                feed_dict = {
                    train_anchor_data: batch_anchor,
                    train_positive_data: batch_positive,
                    train_negative_data: batch_negative
                    # labels_anchor: batch_labels_anchor,
                    # labels_positive: batch_labels_positive,
                    # labels_negative: batch_labels_negative
                }

                _, l, summary, Loss = sess.run(
                    [train_op, total_loss, merged, loss], feed_dict=feed_dict)

                print("%d the %s train reslut" %
                      (step, datetime.datetime.now()))
                print('the triplet loss %g' % Loss)
                # print('the triplet total loss %g' % l)

                train_write.add_summary(summary, step)

                if step % 200 == 0:
                    Saver.save(
                        sess,
                        '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/'
                    )

            train_write.close()