示例#1
0
    def prepare(self):
        if FLAGS.mode == 'test':
            FLAGS.batch_size = 1

        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        self.images = tf.placeholder(
            tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
            name='images')
        self.labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2],
                                     name='labels')
        self.is_train = tf.placeholder(tf.bool, name='is_train')
        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        weight_decay = 0.0005
        self.tarin_num_id = 0
        val_num_id = 0

        if FLAGS.mode == 'train':
            self.tarin_num_id = cuhk03_dataset.get_num_id(
                FLAGS.data_dir, 'train')
        elif FLAGS.mode == 'val':
            val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
        images1, images2 = self.preprocess(self.images, self.is_train)

        print('Build network')
        logits = self.network(images1, images2, weight_decay)
        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=self.labels,
                                                    logits=logits))
        self.inference = tf.nn.softmax(logits)

        optimizer = tf.train.MomentumOptimizer(self.learning_rate,
                                               momentum=0.9)
        self.train = optimizer.minimize(self.loss,
                                        global_step=self.global_step)
        lr = FLAGS.learning_rate

        os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        self.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2], name='labels')
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'train')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
    images1, images2 = preprocess(images, is_train)

    print('Build network')
    logits = network(images1, images2, weight_decay)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    inference = tf.nn.softmax(logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss, global_step=global_step)
    lr = FLAGS.learning_rate

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            saver.restore(sess, ckpt.model_checkpoint_path)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in range(step, FLAGS.max_steps + 1):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {learning_rate: lr, images: batch_images,
                    labels: batch_labels, is_train: True}
                sess.run(train, feed_dict=feed_dict)
                train_loss = sess.run(loss, feed_dict=feed_dict)
                print('Step: %d, Learning rate: %f, Train loss: %f' % (i, lr, train_loss))

                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
        elif FLAGS.mode == 'val':
            total = 0.
            for _ in range(10):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in range(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))


        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2])

            feed_dict = {images: test_images, is_train: False}
            prediction = sess.run(inference, feed_dict=feed_dict)
            print(bool(not np.argmax(prediction[0])))
示例#3
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(
        tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2], name='labels')
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'train')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
    images1, images2 = preprocess(images, is_train)

    print('Build network')
    logits = network(images1, images2, weight_decay)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    inference = tf.nn.softmax(logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss, global_step=global_step)
    lr = FLAGS.learning_rate

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            saver.restore(sess, ckpt.model_checkpoint_path)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):
                batch_images, batch_labels = cuhk03_dataset.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    learning_rate: lr,
                    images: batch_images,
                    labels: batch_labels,
                    is_train: True
                }
                sess.run(train, feed_dict=feed_dict)
                train_loss = sess.run(loss, feed_dict=feed_dict)
                print('Step: %d, Learning rate: %f, Train loss: %f' %
                      (i, lr, train_loss))

                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset.read_data(
                    FLAGS.data_dir, 'val', val_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    images: batch_images,
                    labels: batch_labels,
                    is_train: False
                }
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))
            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''
        elif FLAGS.mode == 'test':
            #            image_path1='20180516_155201_CH05_pic_recog/person/'
            #           image_path2='../persons/'#'20180516_155203_CH19_pic_recog/person/'
            #            save_path1='20180516_sameperson/CH05/'
            #           save_path2='20180718/Preview_192.168.7.27_0_20180718_220140_3056687/'#'20180516_sameperson/CH19/'
            #            if not os.path.exists(save_path1):
            #                os.makedirs(save_path1)
            #          if not os.path.exists(save_path2):
            #             os.makedirs(save_path2)
            #            image_files1=os.listdir(image_path1)
            #        image_files2=os.listdir(image_path2)
            #            flen1=len(image_files1)
            #       flen2=len(image_files2)
            #            print(flen1)
            #      print(flen2)

            print '--------end---------'

            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(
                image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(
                image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2])

            merged_summary_op = tf.summary.merge_all()
            writer = tf.summary.FileWriter("./logs/", sess.graph)
            #  print("test_images : "+str(test_images))
            #image_shaped_input = tf.reshape(test_images, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
            #tf.summary.image('input1', image_shaped_input, 2)
            #        merged = tf.summary.merge_all()
            #           feed_dict = {images: test_images, is_train: False}
            #            summary=sess.run(merged, feed_dict=feed_dict)
            feed_dict = {images: test_images, is_train: False}
            summary, prediction = sess.run([merged_summary_op, inference],
                                           feed_dict=feed_dict)
            print(bool(not np.argmax(prediction[0])))
            writer.add_summary(summary)
            writer.close()
示例#4
0
def main(argv=None):
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2], name='labels')
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'train')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
    images1, images2 = preprocess(images, is_train)

    print('Build network')
    logits = network(images1, images2, weight_decay)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    inference = tf.nn.softmax(logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss, global_step=global_step)
    lr = FLAGS.learning_rate

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            saver.restore(sess, ckpt.model_checkpoint_path)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {learning_rate: lr, images: batch_images,
                    labels: batch_labels, is_train: True}
                sess.run(train, feed_dict=feed_dict)
                train_loss = sess.run(loss, feed_dict=feed_dict)
                print('Step: %d, Learning rate: %f, Train loss: %f' % (i, lr, train_loss))

                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''
        elif FLAGS.mode == 'test':
#            image_path1='20180516_155201_CH05_pic_recog/person/'
          #  image_path2='../persons/'#'20180516_155203_CH19_pic_recog/person/'
#            save_path1='20180516_sameperson/CH05/'
          #  save_path2='20180718/Preview_192.168.7.27_0_20180718_220140_3056687/'#'20180516_sameperson/CH19/'
#            if not os.path.exists(save_path1):
#                os.makedirs(save_path1)
          #  if not os.path.exists(save_path2):
          #      os.makedirs(save_path2)
#            image_files1=os.listdir(image_path1)
          #  image_files2=os.listdir(image_path2)
#            flen1=len(image_files1)
          #  flen2=len(image_files2)
#            print(flen1)
          #  print(flen2)
#            flags1=[0]*(flen1)
#            flags2=[0]*(flen2)

#            index1_1=1#4115#1
#            maxindex1=9230#4116#9230
#            initpath1='20180516_155201_CH05_pic_recog/initperson/'
#            init_persons1=os.listdir(initpath1)
#            initp1={}
#            for i in range(len(init_persons1)):
#                initp1[i]=initpath1+init_persons1[i]
#                new_path=save_path1+str(i)
#                if not os.path.exists(new_path):
#                    os.makedirs(new_path)
#                shutil.copyfile(initpath1+init_persons1[i],new_path+'/'+init_persons1[i])
#            if not os.path.exists(save_path1+'nones'):
#                os.makedirs(save_path1+'nones')
#            while index1_1<maxindex1:
#                maxindexes=[]
#                maxscores=[]
#                im2_names=[]
#                for j in range(flen1):
#                    index1_2=int(image_files1[j].split('.jpg')[0].split('_')[-1])
#                    if index1_2==(index1_1+1):
#                       im2_names.append(image_files1[j])
#                for nm2 in range(len(im2_names)):
#                   im2=image_path1+im2_names[nm2]
#                   im2_scores={}
#                   maxindex=-1
#                   maxscore=0.0
#                   for nm1 in range(len(initp1)):
#                       im1=initp1[nm1]
#                       y1_1=int(im1.split('_')[-6])
#                       x1_1=int(im1.split('_')[-5])
#                       y1_2=int(im1.split('_')[-4])
#                       x1_2=int(im1.split('_')[-3])
#                       y1_0=(y1_1+y1_2)/2.0
#                       x1_0=(x1_1+x1_2)/2.0
#                       y2_1=int(im2.split('_')[-6])
#                       x2_1=int(im2.split('_')[-5])
#                       y2_2=int(im2.split('_')[-4])
#                       x2_2=int(im2.split('_')[-3])
#                       y2_0=(y2_1+y2_2)/2.0
#                       x2_0=(x2_1+x2_2)/2.0
#                       dist=math.sqrt(math.pow(y1_0-y2_0,2)+math.pow(x1_0-x2_0,2))
#                       predtemp=0.0
#                       if dist>50:
#                           predtemp=0.0
#                       else:
#                           predtemp=cmpims(im1,im2,inference,images,is_train,sess)
#                       im2_scores[nm1]=predtemp
#                       if predtemp>maxscore and predtemp>0.9:
#                          maxindex=nm1
#                          maxscore=predtemp
#                   maxindexes.append(maxindex)
#                   maxscores.append(im2_scores)
#                while True:
#                    moreindexes=[]
#                    for a in maxindexes:
#                        if maxindexes.count(a)>1  and (not a in moreindexes):
#                            moreindexes.append(a)
#                    if len(moreindexes)==0:
#                        break
#                    if len(moreindexes)==1 and moreindexes[0]==-1:
#                        break

#                    for a in moreindexes:
#                        flag=0
#                        list_index=[]
#                        for n in range(maxindexes.count(a)):
#                            sec=flag
#                            flag=maxindexes[flag:].index(a)
#                            list_index.append(flag+sec)
#                            flag=list_index[-1:][0]+1
                        
#                        print str(a)+" : "+str(list_index)
#                        temp_index=list_index[0]
#                        for m in list_index:
#                            if(maxscores[m].get(a)>maxscores[temp_index].get(a)):
#                                temp_index=m
#                        for m in list_index:
#                            if m!=temp_index:  
#                                nextindex=findNextMax(maxscores[m],maxscores[m].get(a))
#                                if nextindex==-1:
#                                    maxindexes[m]=-1
#                                elif maxscores[m][nextindex] < 0.9:
#                                    maxindexes[m]=-1
#                                else:
#                                    maxindexes[m]=nextindex
#                for m in range(len(maxindexes)):
#                    maxindex=maxindexes[m]
    
#                    if maxindex!=-1:
#                       shutil.copyfile(image_path1+im2_names[m],save_path1+str(maxindex)+'/'+im2_names[m])
#                       initp1[maxindex]=save_path1+str(maxindex)+'/'+im2_names[m]
#                    else:
#                       shutil.copyfile(image_path1+im2_names[m],save_path1+'nones/'+im2_names[m]) 
#                index1_1=index1_1+1


            print '--------end---------'


            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            test_images = np.array([image1, image2])

            feed_dict = {images: test_images, is_train: False}
            prediction,pool1_2 = sess.run(inference, feed_dict=feed_dict)
            print(bool(not np.argmax(prediction[0])))
示例#5
0
def main(argv=None):
   

    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    if FLAGS.mode == 'data':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name='images')
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2], name='labels')
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'train')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
    
    images1, images2 = preprocess(images, is_train)

    print('=======================Build Network=======================')
    logits = network(images1, images2, weight_decay)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    inference = tf.nn.softmax(logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss, global_step=global_step)
    lr = FLAGS.learning_rate

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('==================================Restore model==================================')
            saver.restore(sess, ckpt.model_checkpoint_path)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in range(step, FLAGS.max_steps + 1):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'train', tarin_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {learning_rate: lr, images: batch_images,
                    labels: batch_labels, is_train: True}
                sess.run(train, feed_dict=feed_dict)
                train_loss = sess.run(loss, feed_dict=feed_dict)
                print('Step: %d, Learning rate: %f, Train loss: %f' % (i, lr, train_loss))

                lr = FLAGS.learning_rate * ((0.0001 * i + 1) ** -0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
        elif FLAGS.mode == 'val':
            total = 0.
            for _ in range(10):
                batch_images, batch_labels = cuhk03_dataset.read_data(FLAGS.data_dir, 'val', val_num_id,
                    IMAGE_WIDTH, IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {images: batch_images, labels: batch_labels, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in range(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))

            '''
            for i in range(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''
        elif FLAGS.mode == 'test':
            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            
            
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
            
            f = plt.figure()
            f.add_subplot(1,2, 1)
            plt.imshow(image1)
            f.add_subplot(1,2, 2)
            plt.imshow(image2)
            plt.show()
            print("===============================Show Images==================================================")
            
            start = time.time()

            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            
            
            
            test_images = np.array([image1, image2])
            test_images2 = np.array([image2, image1])
            
            feed_dict = {images: test_images, is_train: False}
            feed_dict2 = {images: test_images2, is_train: False}
            
            #print(feed_dict)
            
            prediction = sess.run(inference, feed_dict=feed_dict)
            prediction2 = sess.run(inference, feed_dict=feed_dict2)
            
            print("=======================Prediction1=======================")
            print(prediction)
            print(bool(not np.argmax(prediction[0])))
            #print(prediction[0])
            print("=======================Prediction2=======================")
            print(prediction2)
            print(bool(not np.argmax(prediction2[0])))
            
            end = time.time()
            print("Time in seconds: ")
            print(end - start)
        
        elif FLAGS.mode == 'data':
            print("path_test:",FLAGS.path_test)

            files = sorted(glob.glob('/home/oliver/Documentos/person-reid/video3_4/*.png'))
            print(len(files))

            image1 = cv2.imread(FLAGS.image1)
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            
            plt.imshow(image1)
            plt.show()
            
            '''
            image2 = cv2.imread(FLAGS.image2)
            image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)

            f = plt.figure()
            f.add_subplot(1,2, 1)
            plt.imshow(image1)
            f.add_subplot(1,2, 2)
            plt.imshow(image2)
            plt.show()
            
            print("===============================Show Images==================================================")
            '''
            start = time.time()
            image1 = np.reshape(image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            #image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            #list_pred=[]
            #list_bool=[]
            list_all = []
            for x in files:
                image2 = cv2.imread(x)
                image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT))
                image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
                image2 = np.reshape(image2, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                test_images = np.array([image1, image2])
                feed_dict = {images: test_images, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                #print(bool(not np.argmax(prediction[0])))
                #list_bool.append(bool(not np.argmax(prediction[0])))
                #list_pred.append(prediction[0])
                if bool(not np.argmax(prediction[0])):
                    tupl = (x, prediction[0][0], prediction[0][1])
                    list_all.append(tupl)
            list_all.sort(key = sortSecond , reverse = True)
            
            end = time.time()
            print("Time in seconds: ")
            print(end - start)

            #print (list_all)
            print ("size list: ", len(list_all))
            ####3
            #cv2.namedWindow('Person-ReID', cv2.WINDOW_FULLSCREEN)
            #cv2.resizeWindow('Person-ReID', 480, 320)
            ####
            i = 0
            list_reid = []
            for e in list_all:
                temp_img = cv2.imread(e[0])
                temp_img = cv2.cvtColor(temp_img, cv2.COLOR_BGR2RGB)
                fpath, fname = os.path.split(e[0])
                if (i > 15 ):
                    break
                #plt.imshow(temp_img)
                #plt.show()
                #cv2.namedWindow('Person-ReID', cv2.WINDOW_NORMAL)                
                #cv2.imshow('Person-ReID', temp_img)                
                cv2.imwrite("output_query/"+fname, temp_img)
                #cv2.waitKey(1)
                path_f, name_f = os.path.split(e[0])
                splits_coords = name_f.rsplit('_')
                #print("coord: ",splits_coords)
                list_reid.append(( int(splits_coords[1]), splits_coords[2], splits_coords[3], splits_coords[4], splits_coords[5]))
                i = i +1
                print (i, e[0]," - ", e[1], " - ", e[2])
            list_reid.sort(key = sortFirst)
            ## sort the coords for num of frame
            print (list_reid)

            f_frames = sorted(glob.glob('/home/oliver/Documentos/person-reid/frames/video3/*.png'))
            j = 0
            cv2.namedWindow('Person-ReID', cv2.WINDOW_NORMAL)                
            cv2.resizeWindow('Person-ReID', 640, 480)
            flag_draw = False
            k = 0
            ###PINTO EN LOS FRAMES
            for frame in f_frames:
                imgFrame = cv2.imread(frame , cv2.IMREAD_UNCHANGED)
                frame_p, frame_n = os.path.split(frame)
                temp_f = frame_n.rsplit('.')
                #cv2.imshow('Person-ReID', imgFrame)
                #cv2.waitKey(1)
                #print(int(temp_f[0]))
                if(j < len(list_reid)):
                    if (int(temp_f[0]) == list_reid[j][0]):
                        #pintar como TRUE
                        print (int(temp_f[0]) ,"--entro--",j, " ", list_reid[j])
                        #cv2.polylines(imgFrame , [np.int0([list_reid[j][1], list_reid[j][2], list_reid[j][3], list_reid[j][4]]).reshape((-1, 1, 2))], True, (0, 255, 0), 3)
                        #cv2.rectangle(imgFrame,(int(list_reid[j][4]), int(list_reid[j][3])) , (int(list_reid[j][2]),int(list_reid[j][1])), (0, 255, 0), 3)
                        #cv2.rectangle(imgFrame,(int(list_reid[j][3]), int(list_reid[j][4])),(int(list_reid[j][1]), int(list_reid[j][2])), (0, 255, 0), 3)
                        #color = cv2.cvtColor(np.uint8([[[num_random, 128, 200]]]),cv2.COLOR_HSV2RGB).squeeze().tolist()
                        #####################
                        #color = cv2.cvtColor(np.uint8([[[0, 128, 200]]]),cv2.COLOR_HSV2RGB).squeeze().tolist()
                        cv2.rectangle(imgFrame, (int(list_reid[j][3]), int(list_reid[j][1])) , (int(list_reid[j][4]),int(list_reid[j][2])) , (0,255,0), 10)
                        #cv2.imwrite('outReid/'+temp_f[0]+'.png',imgFrame)
                        flag_draw = True
                        k = 0
                        j=j+1
                    #else:
                        #cv2.imwrite('outReid/'+temp_f[0]+'.png',imgFrame)
                    #    cv2.imshow('Person-ReID', imgFrame)
                    #    cv2.waitKey(1)
                #else:
                    #cv2.imshow('Person-ReID', imgFrame)
                    #cv2.waitKey(1)
                    #cv2.imwrite('outReid/'+temp_f[0]+'.png',imgFrame)
                if (flag_draw == True):
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(imgFrame,'True',(200,200), font, 4,(0,255,0),4,cv2.LINE_AA)
                    k = k + 1
                    if (k > 15):
                        flag_draw = False
                        k = 0
                
                #cv2.imwrite('outReid/'+temp_f[0]+'.png',imgFrame)    
                cv2.imshow('Person-ReID', imgFrame)
                cv2.waitKey(1)  

            #print(e[0]," , ", e[1], "\n")
            
            #i=0
            #for x in list_bool:
            #    if x==True:
            #        print(files[i],list_pred[i],list_bool[i])
            #    i=i+1

            #test_images = np.array([image1, image2])
            #test_images2 = np.array([image2, image1])
            
            #feed_dict = {images: test_images, is_train: False}
            #feed_dict2 = {images: test_images2, is_train: False}
            
            #print(feed_dict)
            
            #prediction = sess.run(inference, feed_dict=feed_dict)
            #prediction2 = sess.run(inference, feed_dict=feed_dict2)
            
            print("=======================Prediction List=======================")
示例#6
0
def main():
    if FLAGS.mode == 'test':
        FLAGS.batch_size = 1

    learning_rate = tf.placeholder(tf.float32, name='learning_rate')
    images = tf.placeholder(
        tf.float32, [2, FLAGS.batch_size, IMAGE_HEIGHT, IMAGE_WIDTH, 3],
        name='images')
    labels = tf.placeholder(tf.float32, [FLAGS.batch_size, 2], name='labels')
    is_train = tf.placeholder(tf.bool, name='is_train')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    weight_decay = 0.0005
    tarin_num_id = 0
    val_num_id = 0

    if FLAGS.mode == 'train':
        tarin_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'train')
    elif FLAGS.mode == 'val':
        val_num_id = cuhk03_dataset.get_num_id(FLAGS.data_dir, 'val')
    images1, images2 = preprocess(images, is_train)

    print('Build network')
    logits = network(images1, images2, weight_decay)
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    inference = tf.nn.softmax(logits)

    optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
    train = optimizer.minimize(loss, global_step=global_step)
    lr = FLAGS.learning_rate

    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()

        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore model')
            saver.restore(sess, ckpt.model_checkpoint_path)

        if FLAGS.mode == 'train':
            step = sess.run(global_step)
            for i in xrange(step, FLAGS.max_steps + 1):
                batch_images, batch_labels = cuhk03_dataset.read_data(
                    FLAGS.data_dir, 'train', tarin_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    learning_rate: lr,
                    images: batch_images,
                    labels: batch_labels,
                    is_train: True
                }
                sess.run(train, feed_dict=feed_dict)
                train_loss = sess.run(loss, feed_dict=feed_dict)
                print('Step: %d, Learning rate: %f, Train loss: %f' %
                      (i, lr, train_loss))

                lr = FLAGS.learning_rate * ((0.0001 * i + 1)**-0.75)
                if i % 1000 == 0:
                    saver.save(sess, FLAGS.logs_dir + 'model.ckpt', i)
        elif FLAGS.mode == 'val':
            total = 0.
            for _ in xrange(10):
                batch_images, batch_labels = cuhk03_dataset.read_data(
                    FLAGS.data_dir, 'val', val_num_id, IMAGE_WIDTH,
                    IMAGE_HEIGHT, FLAGS.batch_size)
                feed_dict = {
                    images: batch_images,
                    labels: batch_labels,
                    is_train: False
                }
                prediction = sess.run(inference, feed_dict=feed_dict)
                prediction = np.argmax(prediction, axis=1)
                label = np.argmax(batch_labels, axis=1)

                for i in xrange(len(prediction)):
                    if prediction[i] == label[i]:
                        total += 1
            print('Accuracy: %f' % (total / (FLAGS.batch_size * 10)))
            '''
            for i in xrange(len(prediction)):
                print('Prediction: %s, Label: %s' % (prediction[i] == 0, labels[i] == 0))
                image1 = cv2.cvtColor(batch_images[0][i], cv2.COLOR_RGB2BGR)
                image2 = cv2.cvtColor(batch_images[1][i], cv2.COLOR_RGB2BGR)
                image = np.concatenate((image1, image2), axis=1)
                cv2.imshow('image', image)
                key = cv2.waitKey(0)
                if key == 1048603:  # ESC key
                    break
            '''
        elif FLAGS.mode == 'test':
            images_prob = {}

            test_query = file_name("/workspace/zyf/person_data/test_query")
            test_reference = file_name(
                "/workspace/zyf/person_data/test_reference")
            image1 = cv2.imread(test_query[1])
            image1 = cv2.resize(image1, (IMAGE_WIDTH, IMAGE_HEIGHT))
            image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
            image1 = np.reshape(
                image1, (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
            for i in range(len(test_reference)):
                image_reference = cv2.imread(test_reference[i])
                image_reference = cv2.resize(image_reference,
                                             (IMAGE_WIDTH, IMAGE_HEIGHT))
                image_reference = cv2.cvtColor(image_reference,
                                               cv2.COLOR_BGR2RGB)
                image_reference = np.reshape(
                    image_reference,
                    (1, IMAGE_HEIGHT, IMAGE_WIDTH, 3)).astype(float)
                test_images = np.array([image1, image_reference])
                feed_dict = {images: test_images, is_train: False}
                prediction = sess.run(inference, feed_dict=feed_dict)
                images_prob.setdefault(test_reference[i], (prediction[0])[0])

            images_sorted = sorted(images_prob.iteritems(),
                                   key=lambda asd: asd[1],
                                   reverse=True)
            print(images_sorted[0:15])
            print(test_query[1])


# if __name__ == '__main__':
#     tf.app.run()