Exemplo n.º 1
0
Arquivo: vgg19.py Projeto: yesyu/Pig2
def main(argv=None):
	train_samples = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, IMAGE_CHANNLES], name='train_sample')
	train_labels = tf.placeholder(tf.float32, [None, CLASS_COUNTS], name='train_label')
	keep_prob = tf.placeholder(tf.float32, [], name='keep_probability')

	logits1 = subnet1(train_samples, keep_prob)

	clas1_loss = tf.reduce_mean(
		tf.nn.softmax_cross_entropy_with_logits(labels=train_labels, logits=logits1))

	clas1_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(train_labels, 1), tf.argmax(logits1, 1)), tf.float32), name='clas1_acc')
	classifier1_loss = clas1_loss

	# classifier1_op = tf.train.AdamOptimizer(0.0005).minimize(classifier1_loss)
	classifier1_op = tf.train.GradientDescentOptimizer(0.0005).minimize(clas1_loss)

	train_data, train_label = data_process.input_data()

	Saver = tf.train.Saver()
	with tf.Session() as sess:
		train_writer = tf.summary.FileWriter('./checkpoint/', sess.graph)
		sess.run(tf.global_variables_initializer())
		Saver.restore(sess, './checkpoint/')
		print('Initialized!')
		for step in range(int(RAW_NUM * EPOCH) // BATCH_SIZE):

			train_offset = (step * BATCH_SIZE) % (TRAIN_NUM - BATCH_SIZE)
			batch_data = train_data[train_offset:(train_offset + BATCH_SIZE)]
			batch_label = train_label[train_offset:(train_offset + BATCH_SIZE), :]
			np.random.seed(step)
			np.random.shuffle(batch_data)
			np.random.seed(step)
			np.random.shuffle(batch_label)

			feed_dict = {train_samples: batch_data, train_labels: batch_label, keep_prob: 0.50}

			_, Clas1_loss = sess.run([classifier1_op, classifier1_loss], feed_dict=feed_dict)

			Clas1_acc = sess.run(clas1_acc, feed_dict=feed_dict)

			print("%d the %s train reslut" % (step, datetime.datetime.now()))
			print('the classifier one loss %g' % Clas1_loss)
			print('the classifier1 accuracy is %g' % (Clas1_acc))

			if step % 500 == 0:
				Saver.save(sess, './checkpoint/')
Exemplo n.º 2
0
                   label_positive, label_positive, label_negative

        target_data = self.train_data
        target_labels = self.train_labels
        # print(target_labels)

        # c = target_data.shape[3]
        # w = target_data.shape[1]
        # h = target_data.shape[2]

        data_a = np.zeros(shape=(n_triplet, 299, 299, 3), dtype=np.float32)
        data_p = np.zeros(shape=(n_triplet, 299, 299, 3), dtype=np.float32)
        data_n = np.zeros(shape=(n_triplet, 299, 299, 3), dtype=np.float32)
        labels_a = np.zeros(shape=n_triplet, dtype=np.float32)
        labels_p = np.zeros(shape=n_triplet, dtype=np.float32)
        labels_n = np.zeros(shape=n_triplet, dtype=np.float32)

        for i in range(n_triplet):
            data_a[i, :, :, :], data_p[i, :, :, :], data_n[i, :, :, :], \
            labels_a[i], labels_p[i], labels_n[i] = _get_one_triplet(target_data, target_labels)

        return data_a, data_p, data_n, labels_a, labels_p, labels_n


if __name__ == '__main__':
    BATCH_SIZE = 32
    data, labels = data_process.input_data()
    dataShuffle = DataShuffle(data, labels)
    batch_anchor, batch_positive, batch_negative, \
    batch_labels_anchor, batch_labels_positive, \
    batch_labels_negative = dataShuffle.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)
Exemplo n.º 3
0
def main(argv=None):
    with tf.Graph().as_default() as graph:

        margin = 0.25
        BATCH_SIZE = 30
        ITERATION = 200000
        data_num = 16813
        train_anchor_data = tf.placeholder(tf.float32,
                                           shape=[BATCH_SIZE, 299, 299, 3],
                                           name='anchor')
        train_positive_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 299, 299, 3],
                                             name='anchor')
        train_negative_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 299, 299, 3],
                                             name='anchor')
        labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        # keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_probability')
        train_data = tf.concat(
            [train_anchor_data, train_positive_data, train_negative_data],
            axis=0)

        # vgg_train_anchor = model.NetworkModel(train_anchor_data, keep_prob=keep_prob)
        # vgg_train_positive = model.NetworkModel(train_positive_data, keep_prob=keep_prob, reuse=True)
        # vgg_train_negative = model.NetworkModel(train_negative_data, keep_prob=keep_prob, reuse=True)
        with slim.arg_scope(inception_resnet_v2_slim_version.
                            inception_resnet_v2_arg_scope()):
            pre_logits, logits = inception_resnet_v2_slim_version.inception_resnet_v2(
                train_data, num_classes=512, is_training=True)

        pre_logits = tf.nn.l2_normalize(pre_logits,
                                        1,
                                        1e-10,
                                        name='embeddings')
        # print(logits.get_shape().as_list())

        exclude = ['InceptionResnetV2/Logits', 'InceptionResnetV2/AuxLogits']
        variables_to_restore = slim.get_variables_to_restore(exclude=exclude)

        vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(
            tf.reshape(pre_logits, [-1, 3, 512]), 3, 1)
        # print(vgg_train_anchor.get_shape().as_list())
        # print(vgg_train_positive.get_shape().as_list())
        # print(vgg_train_negative.get_shape().as_list())

        loss, positives, negatives = utils.compute_triplet_loss(
            vgg_train_anchor, vgg_train_positive, vgg_train_negative, margin)

        total_loss = tf.losses.get_total_loss() + loss

        num_batches_per_epoch = int(data_num / BATCH_SIZE * 3)
        num_steps_per_epoch = num_batches_per_epoch  # Because one step is one batch processed
        decay_steps = int(2 * num_steps_per_epoch)
        global_step = tf.Variable(0)
        lr = tf.train.exponential_decay(learning_rate=0.001,
                                        global_step=global_step,
                                        decay_steps=decay_steps,
                                        decay_rate=0.7,
                                        staircase=True)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr)

        # optimizer_op = tf.train.GradientDescentOptimizer(learning_rate)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        Saver = tf.train.Saver(variables_to_restore)

        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('learning_rate', lr)
        tf.summary.scalar('positives', positives)
        tf.summary.scalar('negatives', negatives)
        merged = tf.summary.merge_all()

        data, labels = data_process.input_data()
        print(data.shape)
        print(labels.shape)
        dataShufflu = DataShuffle(data, labels)

        with tf.Session() as sess:
            train_write = tf.summary.FileWriter('./logs_tensorboard_3/',
                                                sess.graph)
            # test_write = tf.summary.FileWriter('./logs_tensorboard/triple/test/', sess.graph)

            sess.run(tf.global_variables_initializer())
            Saver.restore(sess, 'inception_resnet_v2_2016_08_30.ckpt')
            for step in range(ITERATION):
                batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
                batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)
                # print(batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive, batch_labels_negative)
                feed_dict = {
                    train_anchor_data: batch_anchor,
                    train_positive_data: batch_positive,
                    train_negative_data: batch_negative,
                    labels_anchor: batch_labels_anchor,
                    labels_positive: batch_labels_positive,
                    labels_negative: batch_labels_negative
                }

                _, l, ls, summary, Loss = sess.run(
                    [train_op, total_loss, lr, merged, loss],
                    feed_dict=feed_dict)

                print("%d the %s train reslut" %
                      (step, datetime.datetime.now()))
                print('the triplet loss %g' % Loss)
                print('the triplet total loss %g' % l)

                train_write.add_summary(summary, step)

                if step % 100 == 0:
                    Saver.save(sess, './logs_tensorboard_3/')

            train_write.close()
Exemplo n.º 4
0
def main(argv=None):

    margin = 0.75
    BATCH_SIZE = 40
    ITERATION = 200000
    data_num = 16813
    train_anchor_data = tf.placeholder(tf.float32,
                                       shape=[BATCH_SIZE, 224, 224, 3],
                                       name='anchor')
    train_positive_data = tf.placeholder(tf.float32,
                                         shape=[BATCH_SIZE, 224, 224, 3],
                                         name='anchor')
    train_negative_data = tf.placeholder(tf.float32,
                                         shape=[BATCH_SIZE, 224, 224, 3],
                                         name='anchor')
    labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
    keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_probability')
    train_data = tf.concat(
        [train_anchor_data, train_positive_data, train_negative_data], axis=0)

    # vgg_train_anchor = model.NetworkModel(train_anchor_data, keep_prob=keep_prob)
    # vgg_train_positive = model.NetworkModel(train_positive_data, keep_prob=keep_prob, reuse=True)
    # vgg_train_negative = model.NetworkModel(train_negative_data, keep_prob=keep_prob, reuse=True)

    pre_logits = model.NetworkModel(train_data, keep_prob=keep_prob)
    logits = tf.nn.l2_normalize(pre_logits, 1, 1e-10, name='embeddings')
    # print(logits.get_shape().as_list())

    vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(
        tf.reshape(logits, [-1, 3, 1024]), 3, 1)
    # print(vgg_train_anchor.get_shape().as_list())
    # print(vgg_train_positive.get_shape().as_list())
    # print(vgg_train_negative.get_shape().as_list())

    loss, positives, negatives = utils.compute_triplet_loss(
        vgg_train_anchor, vgg_train_positive, vgg_train_negative, margin)

    batch = tf.Variable(0)
    learning_rate = tf.train.exponential_decay(0.001, batch * BATCH_SIZE,
                                               data_num, 0.95)

    optimizer_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss)
    Saver = tf.train.Saver()
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('positives', positives)
    tf.summary.scalar('negatives', negatives)
    merged = tf.summary.merge_all()

    data, labels = data_process.input_data()
    print(data.shape)
    print(labels.shape)
    dataShufflu = DataShuffle(data, labels)

    with tf.Session() as sess:
        train_write = tf.summary.FileWriter('./logs_tensorboard_3/',
                                            sess.graph)
        # test_write = tf.summary.FileWriter('./logs_tensorboard/triple/test/', sess.graph)

        sess.run(tf.global_variables_initializer())

        for step in range(ITERATION):
            batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
            batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)
            # print(batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive, batch_labels_negative)
            feed_dict = {
                train_anchor_data: batch_anchor,
                train_positive_data: batch_positive,
                train_negative_data: batch_negative,
                labels_anchor: batch_labels_anchor,
                labels_positive: batch_labels_positive,
                labels_negative: batch_labels_negative,
                keep_prob: 0.80
            }

            _, l, ls, summary = sess.run(
                [optimizer_op, loss, learning_rate, merged],
                feed_dict=feed_dict)

            print("%d the %s train reslut" % (step, datetime.datetime.now()))
            print('the triplet loss %g' % l)

            train_write.add_summary(summary, step)

            if step % 100 == 0:
                Saver.save(sess, './logs_tensorboard_3/')

        train_write.close()
Exemplo n.º 5
0
def main(argv=None):
    with tf.Graph().as_default() as graph:

        margin = 0.20
        BATCH_SIZE = 30
        ITERATION = 2000000
        data_num = 16640
        train_anchor_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3], name='anchor')
        train_positive_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3], name='anchor')
        train_negative_data = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 224, 224, 3], name='anchor')
        labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        tf.summary.image('input_image', train_anchor_data, 10)
        train_data = tf.concat([train_anchor_data, train_positive_data, train_negative_data], axis=0)

        # checkpoint_file = tf.train.latest_checkpoint('/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard/')
        # print(checkpoint_file)

        with slim.arg_scope(inception_resnet_v2_slim_version.inception_resnet_v2_arg_scope()):
            pre_logits, logits = inception_resnet_v2_slim_version.inception_resnet_v2(train_data,
                                                                                      num_classes=128, is_training=False)

        pre_logits = tf.nn.l2_normalize(pre_logits, 1, 1e-12, name='embeddings')
        # print(logits.get_shape().as_list())

        # exclude = ['InceptionResnetV2/Logits', 'InceptionResnetV2/AuxLogits']
        variables_to_restore = slim.get_variables_to_restore(exclude=)

        vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(tf.reshape(pre_logits, [-1, 3, 128]), 3, 1)

        loss, positives, negatives = utils.compute_triplet_loss(vgg_train_anchor,
                                                               vgg_train_positive, vgg_train_negative, margin)
        slim.losses.add_loss(loss)
        total_loss = slim.losses.get_total_loss()

        num_batches_per_epoch = int(data_num / BATCH_SIZE * 3)
        num_steps_per_epoch = num_batches_per_epoch  # Because one step is one batch processed
        decay_steps = int(2 * num_steps_per_epoch)
        global_step = tf.Variable(0)
        lr = tf.train.exponential_decay(
            0.01,
            global_step * BATCH_SIZE,
            data_num,
            0.95
            )

        train_op = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(total_loss)

        Saver = tf.train.Saver(variables_to_restore)

        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('loss', loss)
        # tf.summary.scalar('learning_rate', lr)
        tf.summary.scalar('positives', positives)
        tf.summary.scalar('negatives', negatives)
        merged = tf.summary.merge_all()

        data, labels = data_process.input_data()
        print(data.shape)
        print(labels.shape)
        dataShufflu = DataShuffle(data, labels)

        with tf.Session() as sess:
            train_write = tf.summary.FileWriter('/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard/logs_tensorboard', sess.graph)

            sess.run(tf.global_variables_initializer())
            Saver.restore(sess, '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard/logs_tensorboard/model-ckpt-1')
            for step in range(ITERATION):
                batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
                batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)

                feed_dict = {
                    train_anchor_data: batch_anchor,
                    train_positive_data: batch_positive,
                    train_negative_data: batch_negative,
                    labels_anchor: batch_labels_anchor,
                    labels_positive: batch_labels_positive,
                    labels_negative: batch_labels_negative
                }

                l,summary, Loss = sess.run([total_loss, merged, loss], feed_dict=feed_dict)

                print("%d the %s train reslut" % (step, datetime.datetime.now()))
                print('the triplet loss %g' % Loss)
                print('the triplet total loss %g' % l)

                train_write.add_summary(summary, step)

                # if step % 100 == 0:
                #     Saver.save(sess, '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard/logs_tensorboard/'
                #                + 'model.ckpt', global_step=step+1)

            train_write.close()
def main(argv=None):

    with tf.Graph().as_default():

        global_step = tf.Variable(0, trainable=False)
        CENTER_LOSS_ALPHA = 1.25
        BATCH_SIZE = 256
        ITERATION = 2000000
        data_num = 16640
        NUM_CLASSES = 30
        train_anchor_data = tf.placeholder(tf.float32,
                                           shape=[BATCH_SIZE, 128, 128, 3],
                                           name='anchor')

        labels_anchor = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
        tf.summary.image('input_image', train_anchor_data, 10)

        features, prelogits, logits = inception_resnet_v1.inference(
            train_anchor_data,
            keep_probability=0.8,
            phase_train=True,
            bottleneck_layer_size=30,
            weight_decay=0.0005)
        print(features, prelogits, logits)
        with tf.name_scope('loss'):
            with tf.name_scope('center_loss'):
                center_loss, centers, centers_update_op = utils.get_center_loss(
                    features, labels_anchor, CENTER_LOSS_ALPHA, NUM_CLASSES)
            with tf.name_scope('softmax_loss'):
                softmax_loss = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        labels=labels_anchor, logits=prelogits))
            with tf.name_scope('total_loss'):
                total_loss = softmax_loss + center_loss

        # global_step = tf.Variable(tf.constant(431, dtype=tf.int64))
        lr = tf.train.exponential_decay(learning_rate=0.001,
                                        global_step=global_step,
                                        decay_steps=data_num // BATCH_SIZE,
                                        decay_rate=0.99,
                                        staircase=True)
        with tf.control_dependencies([centers_update_op]):
            train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(
                total_loss, global_step=global_step)
        Saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)

        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('learning_rate', lr)
        tf.summary.scalar('softmax_loss', softmax_loss)
        tf.summary.scalar('center_loss', center_loss)
        merged = tf.summary.merge_all()

        data, labels = data_process.input_data()
        print(data.shape)
        print(labels.shape)
        dataShufflu = DataShuffle(data, labels)
        with tf.Session() as sess:
            train_write = tf.summary.FileWriter(
                '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard4/',
                sess.graph)

            sess.run(tf.global_variables_initializer())
            # sess.run(tf.local_variables_initializer())
            # Saver.restore(sess, '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/')

            for step in range(ITERATION):
                batch_anchor, batch_labels_anchor = dataShufflu.get_triplet(
                    n_labels=30, n_triplet=BATCH_SIZE)

                feed_dict = {
                    train_anchor_data: batch_anchor,
                    labels_anchor: batch_labels_anchor
                }

                _, l, summary, Loss = sess.run(
                    [train_op, total_loss, merged, softmax_loss],
                    feed_dict=feed_dict)

                print("%d the %s train reslut" %
                      (step, datetime.datetime.now()))
                print('the softmax loss %g' % Loss)
                print('the total loss %g' % l)

                train_write.add_summary(summary, step)

                if step % 500 == 0:
                    Saver.save(
                        sess,
                        '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard4/'
                    )

            train_write.close()
Exemplo n.º 7
0
def main(argv=None):
    with tf.Graph().as_default() as graph:

        Margin = 0.25
        BATCH_SIZE = 40
        ITERATION = 2000000
        data_num = 16640
        train_anchor_data = tf.placeholder(tf.float32,
                                           shape=[BATCH_SIZE, 224, 224, 3],
                                           name='anchor')
        train_positive_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 224, 224, 3],
                                             name='anchor')
        train_negative_data = tf.placeholder(tf.float32,
                                             shape=[BATCH_SIZE, 224, 224, 3],
                                             name='anchor')
        # labels_anchor = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        # labels_positive = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        # labels_negative = tf.placeholder(tf.float32, shape=[BATCH_SIZE])
        tf.summary.image('input_image', train_anchor_data, 10)

        train_data = tf.concat(
            [train_anchor_data, train_positive_data, train_negative_data],
            axis=0)

        pre_logits1 = model.NetworkModel(train_data, keep_prob=0.80)
        # pre_logits2 = model.NetworkModel(train_positive_data, keep_prob=0.80, reuse=True)
        # pre_logits3 = model.NetworkModel(train_negative_data, keep_prob=0.80, reuse=True)

        pre_logits1 = tf.nn.l2_normalize(pre_logits1,
                                         1,
                                         1e-10,
                                         name='embeddings')
        # pre_logits2 = tf.nn.l2_normalize(pre_logits2, 1, 1e-10, name='embeddings')
        # pre_logits3 = tf.nn.l2_normalize(pre_logits3, 1, 1e-10, name='embeddings')
        # print(logits.get_shape().as_list())

        vgg_train_anchor, vgg_train_positive, vgg_train_negative = tf.unstack(
            tf.reshape(pre_logits1, [-1, 3, 256]), 3, 1)

        loss, positives, negatives = utils.compute_triplet_loss(
            vgg_train_anchor, vgg_train_positive, vgg_train_negative, Margin)
        regularizer_losses = tf.add_n(tf.get_collection("losses"))
        total_loss = loss + regularizer_losses
        # print(total_loss)

        # num_batches_per_epoch = int(data_num / BATCH_SIZE * 3)
        # num_steps_per_epoch = num_batches_per_epoch  # Because one step is one batch processed
        # decay_steps = int(2 * num_steps_per_epoch)
        global_step = tf.Variable(tf.constant(0, dtype=tf.int64))
        lr = tf.train.exponential_decay(learning_rate=0.001,
                                        global_step=global_step,
                                        decay_steps=data_num // BATCH_SIZE,
                                        decay_rate=0.96,
                                        staircase=True)

        train_op = tf.train.GradientDescentOptimizer(
            learning_rate=lr).minimize(total_loss, global_step=global_step)

        Saver = tf.train.Saver()

        tf.summary.scalar('total_loss', total_loss)
        tf.summary.scalar('loss', loss)
        tf.summary.scalar('learning_rate', lr)
        tf.summary.scalar('positives', positives)
        tf.summary.scalar('negatives', negatives)
        # tf.summary.scalar('pos', pos)
        # tf.summary.scalar('neg', neg)
        merged = tf.summary.merge_all()

        data, labels = data_process.input_data()
        print(data.shape)
        print(labels.shape)
        dataShufflu = DataShuffle(data, labels)

        with tf.Session() as sess:
            train_write = tf.summary.FileWriter(
                '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/',
                sess.graph)

            sess.run(tf.global_variables_initializer())
            # Saver.restore(sess, '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/')
            for step in range(ITERATION):
                batch_anchor, batch_positive, batch_negative, batch_labels_anchor, batch_labels_positive,\
                batch_labels_negative = dataShufflu.get_triplet(n_labels=30, n_triplet=BATCH_SIZE)

                feed_dict = {
                    train_anchor_data: batch_anchor,
                    train_positive_data: batch_positive,
                    train_negative_data: batch_negative
                    # labels_anchor: batch_labels_anchor,
                    # labels_positive: batch_labels_positive,
                    # labels_negative: batch_labels_negative
                }

                _, l, summary, Loss = sess.run(
                    [train_op, total_loss, merged, loss], feed_dict=feed_dict)

                print("%d the %s train reslut" %
                      (step, datetime.datetime.now()))
                print('the triplet loss %g' % Loss)
                # print('the triplet total loss %g' % l)

                train_write.add_summary(summary, step)

                if step % 200 == 0:
                    Saver.save(
                        sess,
                        '/home/lenovo/yql/pig_data/triplet_model/logs_tensorboard2/'
                    )

            train_write.close()
Exemplo n.º 8
0
def main(argv=None):

    train_samples = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE * IMAGE_CHANNLES], name='train_sample')
    raw_samples = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE * IMAGE_CHANNLES], name='raw_sample')
    train_labels = tf.placeholder(tf.float32, [None, CLASS_COUNTS], name='train_label')
    raw_labels = tf.placeholder(tf.float32, [None, 2], name='raw_label')
    grad_scale = tf.placeholder(tf.float32, name='grad_scale')
    keep_prob = tf.placeholder(tf.float32, [], name='keep_probability')

    train_feature1 = subnet1(train_samples)

    clas1_train_feature1_logits = classifier1(train_feature1, keep_prob)

    raw_feature1 = subnet1(raw_samples, reuse=True)

    dom1_raw_feature1_logits = Domain1(raw_feature1, grad_scale, keep_prob)

    ##WGANs loss function about discriminator model
    # d_var = [var for var in tf.trainable_variables() if var.name.startswith('Discriminator1')]
    # disc_extr1_loss = -1.0 * tf.reduce_mean(disc1_train_feature1_logits)
    # disc_extr2_loss = -1.0 * tf.reduce_mean(disc1_train_feature2_logits)
    # disc_loss = -1.0 * (tf.reduce_mean(disc1_train_feature1_logits) - tf.reduce_mean(disc1_train_feature2_logits))
    # d_clip = [v.assign(tf.clip_by_value(v, -0.01, 0.01)) for v in d_var]

    clas1_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=train_labels, logits=clas1_train_feature1_logits))

    dom1_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=raw_labels, logits=dom1_raw_feature1_logits))

    clas1_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(train_labels, 1),
                                                tf.argmax(clas1_train_feature1_logits, 1)), tf.float32), name='clas1_acc')
    classifier1_loss = clas1_loss + dom1_loss


    classifier1_op = tf.train.AdamOptimizer(0.0002).minimize(classifier1_loss)


    train_data, train_label, t_test, t_label, raw_data, raw_label = data_process.input_data()
    print(np.shape(t_test))
    print(np.shape(t_label))

    Saver = tf.train.Saver()
    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('./checkpoint/', sess.graph)
        sess.run(tf.global_variables_initializer())
        # Saver.restore(sess, './checkpoint/')
        print('Initialized!')
        for step in range(int(RAW_NUM * EPOCH) // BATCH_SIZE):
            print('begin path classifier train')
            train_offset = (step * BATCH_SIZE) % (TRAIN_NUM - BATCH_SIZE)
            batch_data = train_data[train_offset:(train_offset + BATCH_SIZE), :]
            batch_label = train_label[train_offset:(train_offset + BATCH_SIZE), :]

            raw_offset = (step * BATCH_SIZE) % (RAW_NUM - BATCH_SIZE)
            raw_batch_data = raw_data[raw_offset:(raw_offset + BATCH_SIZE), :]
            raw_batch_label = raw_label[raw_offset:(raw_offset + BATCH_SIZE), :]

            feed_dict = {train_samples: batch_data, train_labels: batch_label,
                         raw_samples: raw_batch_data, raw_labels: raw_batch_label, grad_scale: -1.0, keep_prob: 0.85}

            # sess.run(d_clip)
            _, Clas1_loss = sess.run([classifier1_op, classifier1_loss], feed_dict=feed_dict)

            Clas1_acc, Clas2_acc = sess.run([clas1_acc], feed_dict=feed_dict)

            print("%d the %s train reslut" % (step, datetime.datetime.now()))
            print('the classifier one loss %g' % Clas1_loss)

            print('the classifier1 accuracy is %g, the classifier2 accuracy is %g' % (Clas1_acc, Clas2_acc))

            if step % 500 == 0:
                Saver.save(sess, './checkpoint/')
Exemplo n.º 9
0
def train_neural_network(continune_train=False):
    if continune_train is False:  # 训练
        global_step = tf.Variable(0)
        global f
        learning_rate = tf.train.exponential_decay(0.0001,
                                                   global_step,
                                                   decay_steps=500,
                                                   decay_rate=0.98,
                                                   staircase=True)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
        epochs = 10001

        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            saver = tf.train.Saver()
            for epoch in range(epochs):
                global_step = epoch
                epoch_loss = 0
                for _ in range(int(f.train_num / batch_size) + 1):
                    epoch_x, epoch_y = f.get_train_data()
                    _, c = sess.run([optimizer, cost],
                                    feed_dict={
                                        x: epoch_x,
                                        y: epoch_y
                                    })
                    epoch_loss += c
                # print epoch_loss
                print('Epoch', epoch, 'completed out of', epochs, 'loss:',
                      epoch_loss)
                if epoch % 2000 == 0:
                    saver.save(sess, 'model2/dnn_' + str(epoch) + '.ckpt',
                               global_step)
                if epoch % 500 == 0:
                    all_acc = 0
                    all_acc = accuracy.eval(
                        {
                            x: f.get_test_data()[0],
                            y: f.get_test_data()[1]
                        },
                        session=sess)
                    print "all accuracy=" + str(all_acc)
    else:
        with tf.Session() as sess:
            global_step = tf.Variable(2000)

            learning_rate = tf.train.exponential_decay(0.0001,
                                                       global_step,
                                                       decay_steps=1000,
                                                       decay_rate=0.9,
                                                       staircase=True)
            optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
            epochs = 10001

            saver = tf.train.Saver()
            saver.restore(sess, 'model2/dnn_2000.ckpt-2000')  # 恢复保存的模型,继续训练
            f = dt.input_data()
            for epoch in range(0, epochs):
                global_step = epoch
                epoch_loss = 0
                for _ in range(int(f.train_num / batch_size) + 1):
                    epoch_x, epoch_y = f.get_train_data()
                    _, c = sess.run([optimizer, cost],
                                    feed_dict={
                                        x: epoch_x,
                                        y: epoch_y
                                    })
                    epoch_loss += c
                    # print epoch_loss
                print('Epoch', epoch, 'completed out of', epochs, 'loss:',
                      epoch_loss)
                if epoch % 1000 == 0:
                    saver.save(sess, 'model2/dnn_' + str(epoch) + '.ckpt',
                               global_step)
                if epoch % 500 == 0:
                    all_acc = 0
                    all_acc = accuracy.eval(
                        {
                            x: f.get_test_data()[0],
                            y: f.get_test_data()[1]
                        },
                        session=sess)
                    print "all accuracy=" + str(all_acc)
Exemplo n.º 10
0
# -*- coding: UTF-8 -*-
import tensorflow as tf
import numpy as np
import data_process as dt
'''
    yxr 4/18/2018
    '''

batch_size = 128
# input*data*weights + biases
f = dt.input_data()


def neural_network_model(data, activation, hiddenLayerNodes):  # 模型主体
    # data输入数据,
    # activition激活函数,
    # hiddenLayerNodes每层节点数,如[3,10,10], 注意第一个数为输入层,宽为3
    a = data
    z = 0
    for i in range(len(hiddenLayerNodes) - 1):  # 前馈传递,通过每个隐含层
        z = tf.add(
            tf.matmul(
                a,
                tf.Variable(
                    tf.random_normal(
                        [hiddenLayerNodes[i], hiddenLayerNodes[i + 1]],
                        mean=0))),
            tf.Variable(tf.random_normal([hiddenLayerNodes[i + 1]])))
        a = activation(z)  # rectified

    softMax_layer = {