Example #1
0
def main(argv=None):

    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    images = tf.placeholder(
        tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')

    images_total = tf.placeholder(
        tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images_total')

    labels = tf.placeholder(tf.float32, [FLAGS.batch_size], name='labels')

    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(
            FLAGS.data_dir, 'train')
        print(
            tarin_num_id,
            '               11111111111111111111               1111111111111111'
        )
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')
    #images1, images2,images3 = preprocess(images, is_train)

    # Create the model and an embedding head.
    model = import_module('nets.' + 'mobilenet_v1_1_224')
    head = import_module('heads.' + 'fc1024')

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=True)

    with tf.name_scope('head'):
        endpoints = head.head(endpoints, FLAGS.embedding_dim, is_training=True)

    print 'model_output : ', endpoints['model_output']  # (bt,2048)
    print 'global_pool : ', endpoints['global_pool']  # (bt,2048)
    print 'resnet_v1_50/block4 : ', endpoints[
        'Conv2d_12_pointwise']  # (bt,7,7,2048)
    #  see   net.resnet_V1   line 258
    print ' 1\n'

    train_mode = tf.placeholder(tf.bool)

    print('Build network')

    feat = endpoints['Conv2d_12_pointwise']  # (bt,7,7,2048)

    #feat = tf.convert_to_tensor(feat, dtype=tf.float32)
    # global
    feature = global_pooling(feat, weight_decay)
    #loss_triplet,PP,NN = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    loss_triplet, PP, NN = batch_hard_triplet_loss(labels, feature, 0.3)

    loss = loss_triplet * FLAGS.global_rate

    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        body_prefix)

    #optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

    #optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    #train = optimizer.minimize(loss, global_step=global_step)

    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        #train_op = optimizer.minimize(loss_mean, global_step=global_step)

        optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train = optimizer.minimize(loss, global_step=global_step)

    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9

    with tf.Session(config=config) as sess:

        print '\n'
        #print model_variables
        print '\n'
        #sess.run(tf.global_variables_initializer())
        #saver = tf.train.Saver()

        #checkpoint_saver = tf.train.Saver(max_to_keep=0)
        checkpoint_saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)

        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size, FLAGS.ID_num,
                    FLAGS.IMG_PER_ID)

                feed_dict = {
                    learning_rate: lr,
                    is_train: True,
                    train_mode: True,
                    images_total: batch_images_total,
                    labels: batch_labels
                }

                _, train_loss = sess.run([train, loss], feed_dict=feed_dict)

                print('Step: %d, Learning rate: %f, Train loss: %f ' %
                      (i, lr, train_loss))

                gtoloss, gp, gn = sess.run([loss_triplet, PP, NN],
                                           feed_dict=feed_dict)
                print 'global hard: ', gtoloss
                print 'global P: ', gp
                print 'global N: ', gn

                #lr = FLAGS.learning_rate / ((2) ** (i/160000)) * 0.1
                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 100 == 0:
                    #saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)

                    checkpoint_saver.save(sess, FLAGS.logs_dir + 'model.ckpt',
                                          i)

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'val', val_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    images: batch_images,
                    labels: batch_labels,
                    is_train: False
                }
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))
            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''

        elif FLAGS.mode == 'cmc':
            do_times = 1
            cmc_sum = np.zeros((100, 100), dtype='f')
            for times in xrange(do_times):
                path = 'data'
                set = 'train'

                cmc_array = np.ones((100, 100), dtype='f')

                batch_images = []
                batch_labels = []
                index_gallery_array = np.ones((1, 100), dtype='f')
                gallery_bool = True
                probe_bool = True
                for j in xrange(100):
                    id_probe = j
                    for i in xrange(100):
                        batch_images = []
                        batch_labels = []
                        filepath = ''

                        #filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                        #filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)

                        if gallery_bool == True:
                            while True:
                                index_gallery = int(random.random() * 10)
                                index_gallery_array[0, i] = index_gallery

                                filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                                    path, set, i, index_gallery)
                                if not os.path.exists(filepath_gallery):
                                    continue
                                break
                        if i == 99:
                            gallery_bool = False
                        if gallery_bool == False:
                            index_gallery = index_gallery_array[0, i]
                            filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                                path, set, i, index_gallery)

                        if probe_bool == True:
                            while True:
                                index_probe = int(random.random() * 10)
                                filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (
                                    path, set, id_probe, index_probe)
                                if not os.path.exists(filepath_probe):
                                    continue
                                if index_gallery_array[
                                        0, id_probe] == index_probe:
                                    continue
                                probe_bool = False
                                break
                        if i == 99:
                            probe_bool = True
                        '''
                              while True:
                                    index_probe = int(random.random() * 10)
                                    filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)
                                    if not os.path.exists(filepath_gallery):
                                        continue
                                    if index_gallery_array[1,id_probe] == index_probe:
                                        continue
                                    break
                              '''

                        #filepath_gallery = 'data/labeled/val/0000_01.jpg'
                        #filepath_probe   = 'data/labeled/val/0000_02.jpg'

                        image1 = cv2.imread(filepath_gallery)
                        image1 = cv2.resize(image1,
                                            (IMAGE_WIDTH, IMAGE_HEIGHT))
                        image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                        image1 = np.reshape(
                            image1,
                            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)

                        image2 = cv2.imread(filepath_probe)
                        image2 = cv2.resize(image2,
                                            (IMAGE_WIDTH, IMAGE_HEIGHT))
                        image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
                        image2 = np.reshape(
                            image2,
                            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)

                        test_images = np.array([image1, image2, image2])

                        #print (filepath_gallery)
                        #print (filepath_probe)
                        #print ('1111111111111111111111')

                        if i == j:
                            batch_labels = [1., 0.]
                        if i != j:
                            batch_labels = [0., 1.]
                        batch_labels = np.array(batch_labels)
                        print('test  img :', test_images.shape)

                        feed_dict = {images: test_images, is_train: False}
                        prediction = sess.run(DD, feed_dict=feed_dict)
                        #print (prediction, prediction[0][1])

                        print(filepath_gallery, filepath_probe)

                        #print(bool(not np.argmax(prediction[0])))
                        print(prediction)

                        cmc_array[j, i] = prediction

                        #print(i,j)

                        #prediction = sess.run(inference, feed_dict=feed_dict)
                        #prediction = np.argmax(prediction, axis=1)
                        #label = np.argmax(batch_labels, axis=1)

                cmc_score = cmc.cmc(cmc_array)
                cmc_sum = cmc_score + cmc_sum
                print(cmc_score)
            cmc_sum = cmc_sum / do_times
            print(cmc_sum)
            print('final cmc')

        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(
                image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(
                image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2, image2])

            feed_dict = {
                images: test_images,
                is_train: False,
                droup_is_training: False
            }
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print(np.argmax(prediction[0]) + 1)
Example #2
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'top1':
        FLAGS.batch_size = 100

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    images = tf.placeholder(
        tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')

    images_total = tf.placeholder(
        tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images_total')

    images_one = tf.placeholder(tf.float32, [1, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
                                name='images_one')

    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(
            FLAGS.data_dir, 'train')
        print(
            tarin_num_id,
            '               11111111111111111111               1111111111111111'
        )
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')

    images1, images2, images3 = preprocess(images, is_train)
    img_combine = tf.concat([images1, images2, images3], 0)

    train_mode = tf.placeholder(tf.bool)

    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=False)

    feat = endpoints['resnet_v1_50/block4']  # (bt,7,7,2048)

    print('Build network')

    # global
    anchor_feature = global_pooling(feat, weight_decay)

    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.55

    with tf.Session(config=config) as sess:
        checkpoint_saver = tf.train.Saver(max_to_keep=0)

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)

        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size, FLAGS.ID_num,
                    FLAGS.IMG_PER_ID)

                feed_dict = {
                    learning_rate: lr,
                    is_train: True,
                    train_mode: True,
                    images_total: batch_images_total
                }

                _, train_loss = sess.run([train, loss], feed_dict=feed_dict)

                print('Step: %d, Learning rate: %f, Train loss: %f ' %
                      (i, lr, train_loss))

                h, p, l = sess.run([NN, PP, loss], feed_dict=feed_dict)
                print 'n:', h
                print 'p:', p
                print 'hard loss', l

                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 100 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)

        elif FLAGS.mode == 'top1':
            path = 'data_eye'
            set = 'val'
            cmc_sum = np.zeros((100, 100), dtype='f')

            cmc_total = []
            do_times = 20

            for times in xrange(do_times):
                query_feature = []
                test_feature = []

                for i in range(100):
                    while True:
                        index_gallery = int(random.random() * 10)
                        index_temp = index_gallery
                        filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                            path, set, i, index_gallery)
                        if not os.path.exists(filepath_gallery):
                            continue
                        break
                    image1 = cv2.imread(filepath_gallery)
                    image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                    image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                    query_feature.append(image1)

                    while True:
                        index_gallery = int(random.random() * 10)
                        if index_temp == index_gallery:
                            continue

                        filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                            path, set, i, index_gallery)
                        if not os.path.exists(filepath_gallery):
                            continue
                        break
                    image1 = cv2.imread(filepath_gallery)
                    image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                    image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                    test_feature.append(image1)
                    #print filepath_gallery,'\n'
                query_feature = np.array(query_feature)
                test_feature = np.array(test_feature)

                feed_dict = {images_total: query_feature, is_train: False}
                q_feat = sess.run(anchor_feature, feed_dict=feed_dict)

                feed_dict = {images_total: test_feature, is_train: False}
                test_feat = sess.run(anchor_feature, feed_dict=feed_dict)

                cmc_array = []
                tf_q_feat = tf.constant(q_feat)
                tf_test_feat = tf.constant(test_feat)

                h = tf.placeholder(tf.int32)
                pick = tf_q_feat[h]
                tf_q_feat = tf.reshape(pick, [1, 2048])
                feat1 = tf.tile(tf_q_feat, [100, 1])
                f = tf.square(tf.subtract(feat1, tf_test_feat))
                d = tf.sqrt(tf.reduce_sum(f, 1))  # What about the axis ???
                print d, 'f\n'
                for t in range(100):

                    feed_dict = {h: t}
                    D = sess.run(d, feed_dict=feed_dict)
                    cmc_array.append(D)
                cmc_array = np.array(cmc_array)
                cmc_score = cmc.cmc(cmc_array)
                cmc_sum = cmc_score + cmc_sum
                cmc_total.append(cmc_score)
                #top1=single_query(q_feat,test_feat,labels,labels,test_num=10)
                print cmc_score
            cmc_sum = cmc_sum / do_times
            print(cmc_sum)
            print('final cmc')
            print('\n')
            print cmc_total
Example #3
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1
    
    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    
    images = tf.placeholder(tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    
    images_total = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images_total')
    
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size], name='labels')
 
    

    
    label_var = tf.placeholder(tf.int64, (None,))
    
    

    
    
    
    is_train = tf.placeholder(tf.bool, name='is_train')
    #global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'train')
        print(tarin_num_id, '               11111111111111111111               1111111111111111')
    
    
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')
    #images1, images2,images3 = preprocess(images, is_train)
    
    
    
    
    
    
    img_temp = []
    for t in range(FLAGS.batch_size):
        
        
        images_total_ex = tf.image.random_flip_left_right(images_total[t])
        images_total_ex = tf.image.random_brightness(images_total_ex, max_delta=32. / 255.)
        img_temp.append(images_total_ex)
    images_total = tf.cast(img_temp,tf.float32)

    
    
    
    
    
    
    
    network_factory = net.create_network_factory(
            is_training=True, num_classes=1500 + 1,
            add_logits= "cosine-softmax")
            
    feature_var, logit_var = network_factory(images_total)
    _create_loss(feature_var, logit_var, label_var, mode="cosine-softmax")
   

    
    
    '''
    trainable_scopes=None
    if trainable_scopes is None:
        variables_to_train = tf.trainable_variables()
    else:
        variables_to_train = []
        for scope in trainable_scopes:
            variables = tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES, scope)
            variables_to_train.extend(variables)
    '''
    
    
    
    
    global_step = tf.train.get_or_create_global_step()

    loss_var = tf.losses.get_total_loss()
   
   
   
   
    '''
    train_op = slim.learning.create_train_op(
        loss_var, tf.train.AdamOptimizer(learning_rate=learning_rate),
        global_step, summarize_gradients=False,
        variables_to_train=variables_to_train)
    '''
    
    
    
    tf.summary.scalar("total_loss", loss_var)
    tf.summary.scalar("learning_rate", learning_rate)

    regularization_var = tf.reduce_sum(tf.losses.get_regularization_loss())
    tf.summary.scalar("weight_loss", regularization_var)
    
    

    #optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    #train = optimizer.minimize(loss, global_step=global_step)
    
    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss_var, global_step=global_step)
    
   

    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) 
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.4
    
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            saver.restore(sess, ckpt.model_checkpoint_path)


        
        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size,FLAGS.ID_num,FLAGS.IMG_PER_ID)
                
             
                feed_dict = {learning_rate: lr,  is_train: True , images_total: batch_images_total, labels: batch_labels, label_var : batch_labels}
              
                
                start_time = time.time()
                #_,train_loss = sess.run([train,loss], feed_dict=feed_dict) 
                _,train_loss = sess.run([train,loss_var], feed_dict=feed_dict) 
                duration = time.time() - start_time
                train_loss = np.array(train_loss)
                print   duration,'duration'  
                print('Step: %d, Learning rate: %f, Train loss: %f ' % (i, lr, train_loss))

                

                
                
                #lr = FLAGS.learning_rate * ((0.1) ** (i/160000)) 
                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
                  
                
                
                
        

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''
   
        
        
        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2,image2])

            feed_dict = {images: test_images, is_train: False, droup_is_training: False}
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print( np.argmax(prediction[0])+1)
Example #4
0
def main(argv=None):

    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1
    
    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    #images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    images = tf.placeholder(tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    
    images_total = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images_total')
    
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 743], name='labels')
    
    #total
    #labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 847], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 847], name='labels')
    
    #eye
    #labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 104], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 104], name='labels')

    
    
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'train')
        print(tarin_num_id, '               11111111111111111111               1111111111111111')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')

    
    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')
    
    
    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=True)

    with tf.name_scope('head'):
        endpoints = head.head(endpoints, FLAGS.embedding_dim, is_training=True)
    
    
    '''
    print endpoints['model_output'] # (bt,2048)
    print endpoints['global_pool'] # (bt,2048)
    print endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    '''

    # Create the model and an embedding head.
    model2 = import_module('nets.' + 'resnet_v1_101')
    endpoints2, body_prefix2 = model2.endpoints(images_total, is_training=True)
       
    train_mode = tf.placeholder(tf.bool)

    print('Build network')
    
    feat = endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    
    feat2 = endpoints2['resnet_v1_101/block4']# (bt,7,7,2048)

    #feat = tf.convert_to_tensor(feat, dtype=tf.float32)
    # global
    feature,feature2 = global_pooling(feat,feat2,weight_decay)
    loss_triplet ,PP,NN = batch_hard_triplet_loss(labels,feature,0.3)
    
    
    _,dis_matrix1 = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    _,dis_matrix2 = triplet_hard_loss(feature2,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    mul_loss = multual_loss(dis_matrix1,dis_matrix2)

    
    
    local_anchor_feature, local_anchor_feature2 = local_pooling(feat,feat2,weight_decay)
    local_loss_triplet ,local_pos_loss, local_neg_loss = local_triplet_hard_loss(local_anchor_feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    
    
    
    loss_triplet2 ,PP2,NN2 = batch_hard_triplet_loss(labels,feature2,0.3)
    local_loss_triplet2 ,local_pos_loss2, local_neg_loss2 = local_triplet_hard_loss(local_anchor_feature2,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    

    
    s1 = fully_connected_class(feature,feature_dim=2048,num_classes=1000)#tarin_num_id
    cross_entropy_var = slim.losses.sparse_softmax_cross_entropy(s1, tf.cast(labels, tf.int64))
    loss_softmax = cross_entropy_var
    #loss_softmax = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_softmax, logits=s1))
    inference = tf.nn.softmax(s1)
    
    s2 = fully_connected_class2(feature2,feature_dim=2048,num_classes=1000)
    cross_entropy_var2 = slim.losses.sparse_softmax_cross_entropy(s2, tf.cast(labels, tf.int64))
    loss_softmax2 = cross_entropy_var2
    
    #loss_softmax2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_softmax, logits=s2))
    inference2 = tf.nn.softmax(s2)
    
    multual_softmax1 = kl_loss_compute(s1, s2)
    multual_softmax2 = kl_loss_compute(s2, s1)
    
    
    
    
    P1= tf.reduce_mean(PP)
    P2= tf.reduce_mean(PP2)
    N1= tf.reduce_mean(NN)
    N2= tf.reduce_mean(NN2)
    
    LP1= tf.reduce_mean(local_pos_loss)
    LN1= tf.reduce_mean(local_neg_loss)
    
    
    
    '''
    
    # global
    feature2 = global_pooling(feat2,weight_decay)
    #loss_triplet,PP,NN = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    loss_triplet2 ,PP2,NN2 = batch_hard_triplet_loss(labels,feature2,0.3)

    
    #local
    local_anchor_feature2 = local_pooling(feat2,weight_decay)
    local_loss_triplet2 ,local_pos_loss2, local_neg_loss2 = local_triplet_hard_loss(local_anchor_feature2,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    '''
    
    

    loss = local_loss_triplet*FLAGS.local_rate + loss_triplet*FLAGS.global_rate + mul_loss + loss_softmax + multual_softmax1
   
    #DD = compute_euclidean_distance(anchor_feature,positive_feature)
    loss2 = local_loss_triplet2*FLAGS.local_rate + loss_triplet2*FLAGS.global_rate + mul_loss + loss_softmax2 + multual_softmax2
    

    
    
    if FLAGS.mode == 'val' or FLAGS.mode == 'cmc' or FLAGS.mode == 'test':
       loss ,pos_loss, neg_loss = triplet_loss(anchor_feature, positive_feature, negative_feature, 0.3)
       print ' ERROR                 ERROR '
       None
    

    
    
    
    
    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(
    tf.GraphKeys.GLOBAL_VARIABLES, body_prefix)
    
    model_variables2 = tf.get_collection(
    tf.GraphKeys.GLOBAL_VARIABLES, body_prefix2)
    
      
    
    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        

    
        optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train = optimizer.minimize(loss, global_step=global_step)
        
        optimizer2 = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train2 = optimizer2.minimize(loss2, global_step=global_step)
    

    tf.summary.scalar("total_loss 1", loss)
    tf.summary.scalar("total_loss 2", loss2)
    tf.summary.scalar("learning_rate", learning_rate)

    regularization_var = tf.reduce_sum(tf.losses.get_regularization_loss())
    tf.summary.scalar("weight_loss", regularization_var)
    


    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) 
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.95

    with tf.Session(config=config) as sess:
        
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("TensorBoard_1x1_a_1x7/", graph = sess.graph)

        #sess.run(tf.global_variables_initializer())
        #saver = tf.train.Saver()
        
        #checkpoint_saver = tf.train.Saver(max_to_keep=0)
        checkpoint_saver = tf.train.Saver()


        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)
                    
        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)
            
            
            saver2 = tf.train.Saver(model_variables2)
            print FLAGS.initial_checkpoint2
            saver2.restore(sess, FLAGS.initial_checkpoint2)
   
            
            
        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size,FLAGS.ID_num,FLAGS.IMG_PER_ID)
                
                #feed_dict = {learning_rate: lr,  is_train: True , labels: batch_labels, droup_is_training: False, train_mode: True, images_total: batch_images_total} #no label   images: batch_images,
              
                feed_dict = {learning_rate: lr,  is_train: True , train_mode: True, images_total: batch_images_total, labels: batch_labels}

                                              
                start = time.time()
                                
                _,_,train_loss,train_loss2 = sess.run([train,train2,loss,loss2 ], feed_dict=feed_dict) 
                    
                print('Step: %d, Learning rate: %f, Train loss: %f , Train loss2: %f' % (i, lr, train_loss,train_loss2))
                
                gtoloss,gp,gn = sess.run([loss_triplet,P1,N1], feed_dict=feed_dict)   
                print 'global hard: ',gtoloss
                print 'global P1: ',gp
                print 'global N1: ',gn
                             
                toloss,p,n = sess.run([local_loss_triplet,LP1,LN1], feed_dict=feed_dict)   
                print 'local hard: ',toloss
                print 'local P: ',p
                print 'local N: ',n
                                
                mul,p2,n2 = sess.run([mul_loss,loss_triplet2,local_loss_triplet2], feed_dict=feed_dict)   
                print 'mul loss: ',mul
                print 'loss_triplet2: ',p2
                print 'local_loss_triplet2: ',n2                               
                
                end = time.time()
                elapsed = end - start
                print "Time taken: ", elapsed, "seconds."
                                
               
                #lr = FLAGS.learning_rate / ((2) ** (i/160000)) * 0.1
                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 100 == 0:
               
                    checkpoint_saver.save(sess,FLAGS.logs_dir + 'model.ckpt', i)
                
                if i % 20 == 0:
                    result = sess.run(merged, feed_dict=feed_dict)
                    writer.add_summary(result, i)
                
                
        

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''

        
        elif FLAGS.mode == 'cmc':    
          do_times = 1
          cmc_sum=np.zeros((100, 100), dtype='f')
          for times in xrange(do_times):  
              path = 'data' 
              set = 'val'
              
              cmc_array=np.ones((100, 100), dtype='f')
              
              batch_images = []
              batch_labels = []
              index_gallery_array=np.ones((1, 100), dtype='f')
              gallery_bool = True
              probe_bool = True
              for j in xrange(100):
                      id_probe = j
                      for i in xrange(100):
                              batch_images = []
                              batch_labels = []
                              filepath = ''
                              
                              #filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                              #filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)                          
                              
                              if gallery_bool == True:
                                    while True:
                                          index_gallery = int(random.random() * 10)
                                          index_gallery_array[0,i] = index_gallery
  
                                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                                          if not os.path.exists(filepath_gallery):
                                              continue
                                          break
                              if i ==99:
                                  gallery_bool = False
                              if gallery_bool == False:
                                          index_gallery = index_gallery_array[0,i]
                                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                              
                              
                              
                              if probe_bool == True:
                                    while True:
                                          index_probe = int(random.random() * 10)
                                          filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)
                                          if not os.path.exists(filepath_probe):
                                              continue
                                          if index_gallery_array[0,id_probe] == index_probe:
                                              continue
                                          probe_bool = False
                                          break
                              if i ==99:
                                  probe_bool = True
                              
                              
                              '''
                              while True:
                                    index_probe = int(random.random() * 10)
                                    filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)
                                    if not os.path.exists(filepath_gallery):
                                        continue
                                    if index_gallery_array[1,id_probe] == index_probe:
                                        continue
                                    break
                              '''
                              
                              #filepath_gallery = 'data/labeled/val/0000_01.jpg'
                              #filepath_probe   = 'data/labeled/val/0000_02.jpg'
                                                                          
                              image1 = cv2.imread(filepath_gallery)
                              image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                              image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                              image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                              
                              image2 = cv2.imread(filepath_probe)
                              image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
                              image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
                              image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                              
                              test_images = np.array([image1, image2, image2])
                              
                              #print (filepath_gallery)
                              #print (filepath_probe)
                              #print ('1111111111111111111111')
          
                              if i == j:
                                  batch_labels = [1., 0.]
                              if i != j:    
                                  batch_labels = [0., 1.]
                              batch_labels = np.array(batch_labels)
                              print('test  img :',test_images.shape)
                              
                              feed_dict = {images: test_images, is_train: False}
                              prediction = sess.run(DD, feed_dict=feed_dict)
                              #print (prediction, prediction[0][1])
                              
                              print (filepath_gallery,filepath_probe)
                              
                              #print(bool(not np.argmax(prediction[0])))
                              print (prediction)
                              
                              cmc_array[j,i] = prediction
                              
                              #print(i,j)
                             
                              
                              #prediction = sess.run(inference, feed_dict=feed_dict)
                              #prediction = np.argmax(prediction, axis=1)
                              #label = np.argmax(batch_labels, axis=1)
                              
  
              
              cmc_score = cmc.cmc(cmc_array)
              cmc_sum = cmc_score + cmc_sum
              print(cmc_score)
          cmc_sum = cmc_sum/do_times
          print(cmc_sum)
          print('final cmc') 
        
        
        
        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2,image2])

            feed_dict = {images: test_images, is_train: False, droup_is_training: False}
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print( np.argmax(prediction[0])+1)
Example #5
0
def main(argv=None):
  
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1
    
    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    
    images_total = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images_total')
    
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size], name='labels')
  
    
    
    

    
    
    
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'train')
        print(tarin_num_id, '               11111111111111111111               1111111111111111')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')
  
    
    
    
    
    
    
    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')
    
    
    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=True)

    with tf.name_scope('head'):
        endpoints = head.head(endpoints, FLAGS.embedding_dim, is_training=True)
    
    
    '''
    print endpoints['model_output'] # (bt,2048)
    print endpoints['global_pool'] # (bt,2048)
    print endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    
    print ' 1\n'
    '''

    
    
    
    
    train_mode = tf.placeholder(tf.bool)


    print('Build network')
    
    feat = endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    
    

    #feat = tf.convert_to_tensor(feat, dtype=tf.float32)

    feat_1x1 = tf.layers.conv2d(feat, 2048, [1, 1],padding='valid',
            kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), reuse=None, name='conv1x1')     
    
    feature = part_attend(feat_1x1,weight_decay)
   
    #loss_triplet,PP,NN = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    loss_triplet ,PP,NN = batch_hard_triplet_loss(labels,feature,0.3)

    
    
   
    
    loss = loss_triplet*FLAGS.global_rate
    

    
    
    
    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(
    tf.GraphKeys.GLOBAL_VARIABLES, body_prefix)
    
    
    
    
    #optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

    #optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    #train = optimizer.minimize(loss, global_step=global_step)
    
    
    
    
    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        #train_op = optimizer.minimize(loss_mean, global_step=global_step)

    
        optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train = optimizer.minimize(loss, global_step=global_step)
    

    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) 
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    
    with tf.Session(config=config) as sess:
        
        

        
        
        print '\n'
        #print model_variables
        print '\n'
        #sess.run(tf.global_variables_initializer())
        #saver = tf.train.Saver()
        
        #checkpoint_saver = tf.train.Saver(max_to_keep=0)
        checkpoint_saver = tf.train.Saver()


        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)
                    
        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)
            
         
            

            
            
        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size,FLAGS.ID_num,FLAGS.IMG_PER_ID)
                
             
              
                feed_dict = {learning_rate: lr,  is_train: True , train_mode: True, images_total: batch_images_total, labels: batch_labels}
                
                
                
                
                _,train_loss = sess.run([train,loss], feed_dict=feed_dict) 
                    
                print('Step: %d, Learning rate: %f, Train loss: %f ' % (i, lr, train_loss))
                
                gtoloss,gp,gn = sess.run([loss_triplet,PP,NN], feed_dict=feed_dict)   
                print 'global hard: ',gtoloss
                print 'global P: ',gp
                print 'global N: ',gn
                
                
            
                
                
                #lr = FLAGS.learning_rate / ((2) ** (i/160000)) * 0.1
                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 100 == 0:
                    #saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
                    # test save
                    #vgg.save_npy(sess, './big.npy')
                    
                    checkpoint_saver.save(sess,FLAGS.logs_dir + 'model.ckpt', i)
                
                
                
                
        

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''

        
        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2,image2])

            feed_dict = {images: test_images, is_train: False, droup_is_training: False}
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print( np.argmax(prediction[0])+1)
Example #6
0
def main(argv=None):

    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    #images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    images = tf.placeholder(
        tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')

    images_total = tf.placeholder(
        tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images_total')

    labels = tf.placeholder(tf.float32, [FLAGS.batch_size], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 743], name='labels')

    #total
    #labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 847], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 847], name='labels')

    #eye
    #labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 104], name='labels')
    #labels_neg = tf.placeholder(tf.float32, [FLAGS.batch_size, 104], name='labels')

    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(
            FLAGS.data_dir, 'train')
        print(
            tarin_num_id,
            '               11111111111111111111               1111111111111111'
        )
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')

    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=True)

    with tf.name_scope('head'):
        endpoints = head.head(endpoints, FLAGS.embedding_dim, is_training=True)
    '''
    print endpoints['model_output'] # (bt,2048)
    print endpoints['global_pool'] # (bt,2048)
    print endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    '''

    # Create the model and an embedding head.
    model2 = import_module('nets.' + 'resnet_v1_101')
    endpoints2, body_prefix2 = model2.endpoints(images_total, is_training=True)

    train_mode = tf.placeholder(tf.bool)

    print('Build network')

    feat = endpoints['resnet_v1_50/block4']  # (bt,7,7,2048)

    feat2 = endpoints2['resnet_v1_101/block4']  # (bt,7,7,2048)

    #feat = tf.convert_to_tensor(feat, dtype=tf.float32)
    # global
    feature, feature2 = global_pooling(feat, feat2, weight_decay)
    loss_triplet, PP, NN = batch_hard_triplet_loss(labels, feature, 0.3)

    _, dis_matrix1 = triplet_hard_loss(feature, FLAGS.ID_num, FLAGS.IMG_PER_ID)
    _, dis_matrix2 = triplet_hard_loss(feature2, FLAGS.ID_num,
                                       FLAGS.IMG_PER_ID)
    mul_loss = multual_loss(dis_matrix1, dis_matrix2)

    local_anchor_feature, local_anchor_feature2 = local_pooling(
        feat, feat2, weight_decay)
    local_loss_triplet, local_pos_loss, local_neg_loss = local_triplet_hard_loss(
        local_anchor_feature, FLAGS.ID_num, FLAGS.IMG_PER_ID)

    loss_triplet2, PP2, NN2 = batch_hard_triplet_loss(labels, feature2, 0.3)
    local_loss_triplet2, local_pos_loss2, local_neg_loss2 = local_triplet_hard_loss(
        local_anchor_feature2, FLAGS.ID_num, FLAGS.IMG_PER_ID)

    s1 = fully_connected_class(feature, feature_dim=2048,
                               num_classes=1000)  #tarin_num_id
    cross_entropy_var = slim.losses.sparse_softmax_cross_entropy(
        s1, tf.cast(labels, tf.int64))
    loss_softmax = cross_entropy_var
    #loss_softmax = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_softmax, logits=s1))
    inference = tf.nn.softmax(s1)

    s2 = fully_connected_class2(feature2, feature_dim=2048, num_classes=1000)
    cross_entropy_var2 = slim.losses.sparse_softmax_cross_entropy(
        s2, tf.cast(labels, tf.int64))
    loss_softmax2 = cross_entropy_var2

    #loss_softmax2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_softmax, logits=s2))
    inference2 = tf.nn.softmax(s2)

    multual_softmax1 = kl_loss_compute(s1, s2)
    multual_softmax2 = kl_loss_compute(s2, s1)

    P1 = tf.reduce_mean(PP)
    P2 = tf.reduce_mean(PP2)
    N1 = tf.reduce_mean(NN)
    N2 = tf.reduce_mean(NN2)

    LP1 = tf.reduce_mean(local_pos_loss)
    LN1 = tf.reduce_mean(local_neg_loss)
    '''
    
    # global
    feature2 = global_pooling(feat2,weight_decay)
    #loss_triplet,PP,NN = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    loss_triplet2 ,PP2,NN2 = batch_hard_triplet_loss(labels,feature2,0.3)

    
    #local
    local_anchor_feature2 = local_pooling(feat2,weight_decay)
    local_loss_triplet2 ,local_pos_loss2, local_neg_loss2 = local_triplet_hard_loss(local_anchor_feature2,FLAGS.ID_num,FLAGS.IMG_PER_ID)
    '''

    loss = local_loss_triplet * FLAGS.local_rate + loss_triplet * FLAGS.global_rate + mul_loss + loss_softmax + multual_softmax1

    #DD = compute_euclidean_distance(anchor_feature,positive_feature)
    loss2 = local_loss_triplet2 * FLAGS.local_rate + loss_triplet2 * FLAGS.global_rate + mul_loss + loss_softmax2 + multual_softmax2

    if FLAGS.mode == 'val' or FLAGS.mode == 'cmc' or FLAGS.mode == 'test':
        loss, pos_loss, neg_loss = triplet_loss(anchor_feature,
                                                positive_feature,
                                                negative_feature, 0.3)
        print ' ERROR                 ERROR '
        None

    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        body_prefix)

    model_variables2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         body_prefix2)

    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):

        optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train = optimizer.minimize(loss, global_step=global_step)

        optimizer2 = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
        train2 = optimizer2.minimize(loss2, global_step=global_step)

    tf.summary.scalar("total_loss 1", loss)
    tf.summary.scalar("total_loss 2", loss2)
    tf.summary.scalar("learning_rate", learning_rate)

    regularization_var = tf.reduce_sum(tf.losses.get_regularization_loss())
    tf.summary.scalar("weight_loss", regularization_var)

    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.95

    with tf.Session(config=config) as sess:

        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter("TensorBoard_1x1_a_1x7/",
                                       graph=sess.graph)

        #sess.run(tf.global_variables_initializer())
        #saver = tf.train.Saver()

        #checkpoint_saver = tf.train.Saver(max_to_keep=0)
        checkpoint_saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)

        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)

            saver2 = tf.train.Saver(model_variables2)
            print FLAGS.initial_checkpoint2
            saver2.restore(sess, FLAGS.initial_checkpoint2)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size, FLAGS.ID_num,
                    FLAGS.IMG_PER_ID)

                #feed_dict = {learning_rate: lr,  is_train: True , labels: batch_labels, droup_is_training: False, train_mode: True, images_total: batch_images_total} #no label   images: batch_images,

                feed_dict = {
                    learning_rate: lr,
                    is_train: True,
                    train_mode: True,
                    images_total: batch_images_total,
                    labels: batch_labels
                }

                start = time.time()

                _, _, train_loss, train_loss2 = sess.run(
                    [train, train2, loss, loss2], feed_dict=feed_dict)

                print(
                    'Step: %d, Learning rate: %f, Train loss: %f , Train loss2: %f'
                    % (i, lr, train_loss, train_loss2))

                gtoloss, gp, gn = sess.run([loss_triplet, P1, N1],
                                           feed_dict=feed_dict)
                print 'global hard: ', gtoloss
                print 'global P1: ', gp
                print 'global N1: ', gn

                toloss, p, n = sess.run([local_loss_triplet, LP1, LN1],
                                        feed_dict=feed_dict)
                print 'local hard: ', toloss
                print 'local P: ', p
                print 'local N: ', n

                mul, p2, n2 = sess.run(
                    [mul_loss, loss_triplet2, local_loss_triplet2],
                    feed_dict=feed_dict)
                print 'mul loss: ', mul
                print 'loss_triplet2: ', p2
                print 'local_loss_triplet2: ', n2

                end = time.time()
                elapsed = end - start
                print "Time taken: ", elapsed, "seconds."

                #lr = FLAGS.learning_rate / ((2) ** (i/160000)) * 0.1
                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 100 == 0:

                    checkpoint_saver.save(sess, FLAGS.logs_dir + 'model.ckpt',
                                          i)

                if i % 20 == 0:
                    result = sess.run(merged, feed_dict=feed_dict)
                    writer.add_summary(result, i)
Example #7
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    images = tf.placeholder(
        tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')

    images_total = tf.placeholder(
        tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images_total')

    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(
            FLAGS.data_dir, 'train')
        print(
            tarin_num_id,
            '               11111111111111111111               1111111111111111'
        )
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')

    images1, images2, images3 = preprocess(images, is_train)
    img_combine = tf.concat([images1, images2, images3], 0)

    train_mode = tf.placeholder(tf.bool)
    '''
    vgg = vgg19.Vgg19('./vgg19.npy')    #  fix    !!!!!!!!!!!!!!!!!!!!
    
    print images_total,'images_total'
    vgg.build(img_combine, train_mode)
    feat = vgg.pool5
    print feat,'1'
    feat1 ,feat2 ,feat3 = tf.split(feat, [FLAGS.batch_size, FLAGS.batch_size,FLAGS.batch_size])
    '''

    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(img_combine, is_training=False)

    feat = endpoints['resnet_v1_50/block4']  # (bt,7,7,2048)
    feat1, feat2, feat3 = tf.split(
        feat, [FLAGS.batch_size, FLAGS.batch_size, FLAGS.batch_size])

    print('Build network')

    # global
    anchor_feature, positive_feature, negative_feature = global_pooling(
        feat1, feat2, feat3, weight_decay)
    #feature = global_pooling(feat,weight_decay)
    #loss_triplet ,pos_loss, neg_loss = triplet_loss(feature[:4], feature[4:8], feature[8:12], 0.3 )
    #loss_triplet,NN,PP = triplet_hard_loss(feature,FLAGS.ID_num,FLAGS.IMG_PER_ID)

    #loss = loss_triplet
    DD = compute_euclidean_distance(anchor_feature, positive_feature)

    #optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    #train = optimizer.minimize(loss, global_step=global_step)
    #train_class = optimizer.minimize(loss_class, global_step=global_step)
    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.35

    with tf.Session(config=config) as sess:
        checkpoint_saver = tf.train.Saver(max_to_keep=0)

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)

        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size, FLAGS.ID_num,
                    FLAGS.IMG_PER_ID)

                #feed_dict = {learning_rate: lr,  is_train: True , labels: batch_labels, droup_is_training: False, train_mode: True, images_total: batch_images_total} #no label   images: batch_images,

                feed_dict = {
                    learning_rate: lr,
                    is_train: True,
                    train_mode: True,
                    images_total: batch_images_total
                }
                #sess.run([train], feed_dict=feed_dict)
                #train_loss,posi_loss,nega_loss = sess.run([loss,positives,negatives], feed_dict=feed_dict)

                #train_loss,posi_loss,nega_loss = sess.run([loss,positives,negatives], feed_dict=feed_dict)

                #print('Step: %d, Learning rate: %f, Train loss: %f , posi:%f, nega:%f' % (i, lr, train_loss,posi_loss,nega_loss))

                _, train_loss = sess.run([train, loss], feed_dict=feed_dict)

                print('Step: %d, Learning rate: %f, Train loss: %f ' %
                      (i, lr, train_loss))

                h, p, l = sess.run([NN, PP, loss], feed_dict=feed_dict)
                print 'n:', h
                print 'p:', p
                print 'hard loss', l

                #train_loss,pos , neg , soft , tri = sess.run([loss,pos_loss,neg_loss,loss_softmax,loss_triplet], feed_dict=feed_dict)
                #print 'triplet : ',tri
                #print("pos_loss :   ",pos)
                #print("neg_loss :   ",neg)

                #print 'softmax : ',soft
                '''            
 
                local_loss ,local_p , local_n = sess.run([local_loss_triplet,local_pos_loss,local_neg_loss], feed_dict=feed_dict)          
                print 'local_loss : ',local_loss
                print 'local p : ',local_p
                print 'local n : ',local_n
                print ' '
                
                
                a, aa = sess.run([local_pos_dis,local_neg_dis], feed_dict=feed_dict) 
                print 'a',a
                print ' '
                print 'aa',aa
                '''

                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 100 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
                    # test save
                    #vgg.save_npy(sess, './big.npy')

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(
                    FLAGS.data_dir, 'val', val_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    images: batch_images,
                    labels: batch_labels,
                    is_train: False
                }
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))
            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''

        elif FLAGS.mode == 'cmc':
            cmc_total = []
            do_times = 20
            cmc_sum = np.zeros((100, 100), dtype='f')
            for times in xrange(do_times):
                path = 'data'
                set = 'val'

                cmc_array = np.ones((100, 100), dtype='f')

                batch_images = []
                batch_labels = []
                index_gallery_array = np.ones((1, 100), dtype='f')
                gallery_bool = True
                probe_bool = True
                for j in xrange(100):
                    id_probe = j
                    for i in xrange(100):
                        batch_images = []
                        batch_labels = []
                        filepath = ''

                        #filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                        #filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)

                        if gallery_bool == True:
                            while True:
                                index_gallery = int(random.random() * 10)
                                index_gallery_array[0, i] = index_gallery

                                filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                                    path, set, i, index_gallery)
                                if not os.path.exists(filepath_gallery):
                                    continue
                                break
                        if i == 99:
                            gallery_bool = False
                        if gallery_bool == False:
                            index_gallery = index_gallery_array[0, i]
                            filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (
                                path, set, i, index_gallery)

                        if probe_bool == True:
                            while True:
                                index_probe = int(random.random() * 10)
                                filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (
                                    path, set, id_probe, index_probe)
                                if not os.path.exists(filepath_probe):
                                    continue
                                if index_gallery_array[
                                        0, id_probe] == index_probe:
                                    continue
                                probe_bool = False
                                break
                        if i == 99:
                            probe_bool = True
                        '''
                              while True:
                                    index_probe = int(random.random() * 10)
                                    filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)
                                    if not os.path.exists(filepath_gallery):
                                        continue
                                    if index_gallery_array[1,id_probe] == index_probe:
                                        continue
                                    break
                              '''

                        #filepath_gallery = 'data/labeled/val/0000_01.jpg'
                        #filepath_probe   = 'data/labeled/val/0000_02.jpg'

                        image1 = cv2.imread(filepath_gallery)
                        image1 = cv2.resize(image1,
                                            (IMAGE_WIDTH, IMAGE_HEIGHT))
                        image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                        image1 = np.reshape(
                            image1,
                            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)

                        image2 = cv2.imread(filepath_probe)
                        image2 = cv2.resize(image2,
                                            (IMAGE_WIDTH, IMAGE_HEIGHT))
                        image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
                        image2 = np.reshape(
                            image2,
                            (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)

                        test_images = np.array([image1, image2, image2])

                        #print (filepath_gallery)
                        #print (filepath_probe)
                        #print ('1111111111111111111111')

                        if i == j:
                            batch_labels = [1., 0.]
                        if i != j:
                            batch_labels = [0., 1.]
                        batch_labels = np.array(batch_labels)
                        print('test  img :', test_images.shape)

                        feed_dict = {images: test_images, is_train: False}
                        prediction = sess.run(DD, feed_dict=feed_dict)
                        #print (prediction, prediction[0][1])

                        print(filepath_gallery, filepath_probe)

                        #print(bool(not np.argmax(prediction[0])))
                        print(prediction)

                        cmc_array[j, i] = prediction

                        #print(i,j)

                        #prediction = sess.run(inference, feed_dict=feed_dict)
                        #prediction = np.argmax(prediction, axis=1)
                        #label = np.argmax(batch_labels, axis=1)

                cmc_score = cmc.cmc(cmc_array)
                cmc_sum = cmc_score + cmc_sum
                cmc_total.append(cmc_score)
                print(cmc_score)
            cmc_sum = cmc_sum / do_times
            print(cmc_sum)
            print('final cmc')
            print('\n')
            print cmc_total

        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(
                image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(
                image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2, image2])

            feed_dict = {
                images: test_images,
                is_train: False,
                droup_is_training: False
            }
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print(np.argmax(prediction[0]) + 1)
Example #8
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1
    
    if FLAGS.mode == 'cmc':
        FLAGS.batch_size = 1
        
    if FLAGS.mode == 'top1':
        FLAGS.batch_size = 100

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
   
    images = tf.placeholder(tf.float32, [3, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    
    images_total = tf.placeholder(tf.float32, [FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images_total')
    

    images_one = tf.placeholder(tf.float32, [1, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images_one')



    
    
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'train')
        print(tarin_num_id, '               11111111111111111111               1111111111111111')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset_label2.get_num_id(FLAGS.data_dir, 'val')
  
    images1, images2,images3 = preprocess(images, is_train)
    img_combine = tf.concat([images1, images2,images3], 0)
    
    train_mode = tf.placeholder(tf.bool)
    
   

    
    
    
    # Create the model and an embedding head.
    model = import_module('nets.' + 'resnet_v1_50')
    head = import_module('heads.' + 'fc1024')
    
    
    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images_total, is_training=False)

    feat = endpoints['resnet_v1_50/block4']# (bt,7,7,2048)
    #feat1 ,feat2 ,feat3 = tf.split(feat, [FLAGS.batch_size, FLAGS.batch_size,FLAGS.batch_size])
    
    
    

    print('Build network')

    feat_1x1 = tf.layers.conv2d(feat, 2048, [1, 1],padding='valid',
            kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay), reuse=None, name='conv1x1')  
  
  
    anchor_feature = part_attend(feat_1x1, weight_decay)
    
    
    
    
    lr = FLAGS.learning_rate

    #config=tf.ConfigProto(log_device_placement=True)
    #config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) 
    # GPU
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.55
    
    with tf.Session(config=config) as sess:
        checkpoint_saver = tf.train.Saver(max_to_keep=0)


        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            print ckpt.model_checkpoint_path
            #saver.restore(sess, ckpt.model_checkpoint_path)
            checkpoint_saver.restore(sess, ckpt.model_checkpoint_path)
                    
        #for first , training load imagenet
        else:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver(model_variables)
            print FLAGS.initial_checkpoint
            saver.restore(sess, FLAGS.initial_checkpoint)

        
        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):

                batch_images, batch_labels, batch_images_total = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size,FLAGS.ID_num,FLAGS.IMG_PER_ID)
                
         
                
                
                
                _,train_loss = sess.run([train,loss], feed_dict=feed_dict) 
                    
                print('Step: %d, Learning rate: %f, Train loss: %f ' % (i, lr, train_loss))
                
                
                
                
                h,p,l = sess.run([NN,PP,loss], feed_dict=feed_dict)   
                print 'n:',h
                print 'p:',p
                print 'hard loss',l
                
       
            
                
                
                
                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 100 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
                    # test save
                    #vgg.save_npy(sess, './big.npy')            
                
                
                
                
        

        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset_label2.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''

        
        elif FLAGS.mode == 'cmc':    
          cmc_total=[]  
          do_times = 20
          cmc_sum=np.zeros((100, 100), dtype='f')
          for times in xrange(do_times):  
              path = 'data' 
              set = 'val'
              
              cmc_array=np.ones((100, 100), dtype='f')
              
              batch_images = []
              batch_labels = []
              index_gallery_array=np.ones((1, 100), dtype='f')
              gallery_bool = True
              probe_bool = True
              for j in xrange(100):
                      id_probe = j
                      for i in xrange(100):
                              batch_images = []
                              batch_labels = []
                              filepath = ''
                              
                              #filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                              #filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)                          
                              
                              if gallery_bool == True:
                                    while True:
                                          index_gallery = int(random.random() * 10)
                                          index_gallery_array[0,i] = index_gallery
  
                                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                                          if not os.path.exists(filepath_gallery):
                                              continue
                                          break
                              if i ==99:
                                  gallery_bool = False
                              if gallery_bool == False:
                                          index_gallery = index_gallery_array[0,i]
                                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                              
                              
                              
                              if probe_bool == True:
                                    while True:
                                          index_probe = int(random.random() * 10)
                                          filepath_probe = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, id_probe, index_probe)
                                          if not os.path.exists(filepath_probe):
                                              continue
                                          if index_gallery_array[0,id_probe] == index_probe:
                                              continue
                                          probe_bool = False
                                          break
                              if i ==99:
                                  probe_bool = True
                    
                              image1 = cv2.imread(filepath_gallery)
                              image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                              image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                              image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                        
                              image2 = cv2.imread(filepath_probe)
                              image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
                              image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
                              image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                              
                              test_images = np.array([image1, image2, image2])
          
                              if i == j:
                                  batch_labels = [1., 0.]
                              if i != j:    
                                  batch_labels = [0., 1.]
                              batch_labels = np.array(batch_labels)
                              print('test  img :',test_images.shape)   
                              feed_dict = {images: test_images, is_train: False}
                              prediction = sess.run(DD, feed_dict=feed_dict)   
                              print (filepath_gallery,filepath_probe)
                              print (prediction)
                              
                              cmc_array[j,i] = prediction

  
              
              cmc_score = cmc.cmc(cmc_array)
              cmc_sum = cmc_score + cmc_sum
              cmc_total.append(cmc_score)
              print(cmc_score)
          cmc_sum = cmc_sum/do_times
          print(cmc_sum)
          print('final cmc') 
          print ('\n')
          print cmc_total
        
        
        
        
        
        elif FLAGS.mode == 'top1':
            path = 'data' 
            set = 'val'
            cmc_sum=np.zeros((100, 100), dtype='f')

            cmc_total = []
            do_times = 20

            for times in xrange(do_times):  
                query_feature = []
                test_feature = []

                for i in range(100):
                    while True:
                          index_gallery = int(random.random() * 10)
                          index_temp = index_gallery
                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                          if not os.path.exists(filepath_gallery):
                             continue
                          break
                    image1 = cv2.imread(filepath_gallery)
                    image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                    image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                    query_feature.append(image1)
    
                    while True:
                          index_gallery = int(random.random() * 10)
                          if index_temp == index_gallery:
                             continue
      
                          filepath_gallery = '%s/labeled/%s/%04d_%02d.jpg' % (path, set, i, index_gallery)
                          if not os.path.exists(filepath_gallery):
                             continue
                          break
                    image1 = cv2.imread(filepath_gallery)
                    image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
                    image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
                    test_feature.append(image1)
                    #print filepath_gallery,'\n'
                query_feature = np.array(query_feature)
                test_feature = np.array(test_feature)
          
                feed_dict = {images_total: query_feature, is_train: False}
                q_feat = sess.run(anchor_feature, feed_dict=feed_dict)
                
                feed_dict = {images_total: test_feature, is_train: False}
                test_feat = sess.run(anchor_feature, feed_dict=feed_dict)
    
                cmc_array = []
                tf_q_feat = tf.constant(q_feat)
                tf_test_feat = tf.constant(test_feat)
  
                h = tf.placeholder(tf.int32)
                pick = tf_q_feat[h]
                tf_q_feat = tf.reshape(pick,[1,2048])
                feat1 = tf.tile(tf_q_feat,[100,1])
                f = tf.square(tf.subtract(feat1 , tf_test_feat))
                d = tf.sqrt(tf.reduce_sum(f,1)) # What about the axis ???
                            
                for t in range(100):
                    
                    feed_dict = {h: t}
                    D = sess.run(d,feed_dict=feed_dict)
                    cmc_array.append(D)
                cmc_array = np.array(cmc_array)
                cmc_score = cmc.cmc(cmc_array)
                cmc_sum = cmc_score + cmc_sum
                cmc_total.append(cmc_score)
                #top1=single_query(q_feat,test_feat,labels,labels,test_num=10)
                print cmc_score
            cmc_sum = cmc_sum/do_times
            print(cmc_sum)
            print('final cmc') 
            print ('\n')
            print cmc_total
        
        
        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2,image2])

            feed_dict = {images: test_images, is_train: False, droup_is_training: False}
            #prediction, prediction2 = sess.run([DD,DD2], feed_dict=feed_dict)
            prediction = sess.run([inference], feed_dict=feed_dict)
            prediction = np.array(prediction)
            print prediction.shape
            print( np.argmax(prediction[0])+1)