Ejemplo n.º 1
0
def export():
    with tf.Graph().as_default():
        #TODO(xuesen) for serving
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)

        jpegs = tf_example['image/encoded']
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            #TODO() Export inference model using regression_signture ?
            feat_signature = exporter.regression_signature(
                input_tensor=serialized_tf_example, output_tensor=feature)
            named_graph_signature = {
                'inputs': exporter.generic_signature({'images': jpegs}),
                'outputs': exporter.generic_signature({'feats': feature})
            }
            model_exporter = exporter.Exporter(saver)
            model_exporter.init(default_graph_signature=feat_signature,
                                init_op=init_op,
                                named_graph_signatures=named_graph_signature)
            model_exporter.export('model/vgg_serving', tf.constant(150000),
                                  sess)
            print('Successfully exported model to model/.')
Ejemplo n.º 2
0
def image_test_custom():
  """Train CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    img_name = '9_363_3.BMP'
    img_normal = imread('E:/final Project/3.4.19/croped vids/matlab custom/' + img_name)

    img_normal = np.transpose(img_normal, [1, 0, 2])
    img_normal_in = np.reshape(img_normal,[1,160,160,3])

    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))
    
    graph = vgg.inference(image, gpu, tf.constant(False))
    logits = graph['s']
    logits = tf.transpose(logits)
    logits_r = tf.reshape(logits,[vgg.batch_size])
    saver = tf.train.Saver(tf.global_variables())
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.Session(config=config)  
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg19_train_holes/')
    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, ckpt)

    out_normal = sess.run(logits, feed_dict={image: img_normal_in})[0][0] 



    print('custom: ',out_normal)

    print('done')
Ejemplo n.º 3
0
def main():

    network = importlib.import_module(net_name)
    with tf.Graph().as_default():
        with tf.Session() as sess:
            input_tensor = tf.placeholder(dtype=tf.float32,
                                          shape=[1, 96, 96, 3],
                                          name='input')
            #images_placeholder = tf.placeholder(name='input', shape=[None, 96, 96, 3], dtype=tf.float32)
            # Load the model metagraph and checkpoint
            cpkt_file_path = os.path.join(
                "./ToBeConvertModels/checkpoint/0.449233.ckpt")
            # Build the inference graph

            predict_labels = vgg.inference(input_tensor, phase_train=False)

            saver = tf.train.Saver()
            tf.get_default_session().run(tf.global_variables_initializer())
            tf.get_default_session().run(tf.local_variables_initializer())
            saver.restore(tf.get_default_session(), cpkt_file_path)
            # Retrieve the protobuf graph definition and fix the batch norm nodes
            input_graph_def = sess.graph.as_graph_def()
            # Freeze the graph def
            output_graph_def = freeze_graph_def(sess, input_graph_def, [
                "vgg_16/yaw_fc8/BiasAdd", "vgg_16/pitch_fc8/BiasAdd",
                "vgg_16/roll_fc8/BiasAdd"
            ])
        # Serialize and dump the output graph to the filesystem
        with tf.gfile.GFile("./ConvertedModels/output.pb", 'wb') as f:
            f.write(output_graph_def.SerializeToString())
Ejemplo n.º 4
0
def export():
    with tf.Graph().as_default():
        # Build inference model.
        # Please refer to Tensorflow inception model for details.

        # Input transformation.
        jpegs = tf.placeholder(tf.string)
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        print(images)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=jpegs, classes_tensor=None, scores_tensor=feature)
            model_exporter.init(default_graph_signature=signature,
                                init_op=init_op)
            model_exporter.export('model', tf.constant(150000), sess)
            print('Successfully exported model to model/.')
Ejemplo n.º 5
0
def image_list_test():
  """Train CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))
    
    graph = vgg.inference(image, gpu, tf.constant(False))
    logits = graph['s']
    logits = tf.transpose(logits)
    logits_r = tf.reshape(logits,[vgg.batch_size])
    saver = tf.train.Saver(tf.global_variables())
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.Session(config=config)  
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg19_train_holes/')
    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, ckpt)
    image_list = os.listdir('E:/final Project/3.4.19/croped vids/matlab normal/')
    normal_result = []
    top_result = []
    middle_result = []
    bottom_result = []
    for img_name in image_list:

      img_normal = imread('E:/final Project/3.4.19/croped vids/matlab normal/' + img_name)
      img_top = imread('E:/final Project/3.4.19/croped vids/matlab top/' + img_name)
      img_middle = imread('E:/final Project/3.4.19/croped vids/matlab middle/' + img_name)
      img_bottom = imread('E:/final Project/3.4.19/croped vids/matlab bottom/' + img_name)


      img_normal = np.transpose(img_normal, [1, 0, 2])
      img_normal_in = np.reshape(img_normal,[1,160,160,3])

      img_top = np.transpose(img_top, [1, 0, 2])
      img_top_in = np.reshape(img_top,[1,160,160,3])

      img_middle = np.transpose(img_middle, [1, 0, 2])
      img_middle_in = np.reshape(img_middle,[1,160,160,3])

      img_bottom = np.transpose(img_bottom, [1, 0, 2])
      img_bottom_in = np.reshape(img_bottom,[1,160,160,3])

      out_normal = sess.run(logits, feed_dict={image: img_normal_in})[0][0] 
      out_top = sess.run(logits, feed_dict={image: img_top_in})[0][0] 
      out_middle = sess.run(logits, feed_dict={image: img_middle_in})[0][0] 
      out_bottom = sess.run(logits, feed_dict={image: img_bottom_in})[0][0] 

      normal_result.append(out_normal)
      top_result.append(out_top)
      middle_result.append(out_middle)
      bottom_result.append(out_bottom)
    
    print('done')
Ejemplo n.º 6
0
    def __init__(self,model_path='model/inshop.sgd.adam'):
        self.model_path = model_path
        self.x = tf.placeholder(tf.string,shape=[])
        img = tf.image.decode_jpeg(self.x,channels=3)
        img = tf.expand_dims(utils.preprocess_image(img),0)

        self.feature = vgg.inference(img)
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.3)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        assert tf.gfile.Exists(self.model_path)
        saver = tf.train.Saver()
        print('Using model from {}'.format(self.model_path))
        saver.restore(self.sess,self.model_path)
Ejemplo n.º 7
0
def batch_test():
  """Train CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    images, labels = vgg.inputs(eval_data='test')
    graph = vgg.inference(images,gpu)
    logits = graph['s']
    logits = tf.transpose(logits)
    # Calculate loss.
    logits_r = tf.reshape(logits,[vgg.batch_size])

    diff = vgg.ang_diff(logits_r,labels)

    true_count = tf.reduce_sum(tf.cast(tf.less(diff,25),tf.uint8))
    saver = tf.train.Saver(tf.global_variables())
       
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.Session(config=config)
    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)
    
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg_19_train_cont/')
    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, ckpt)

    true_count_sum = 0  # Counts the number of correct predictions.
    diffs = np.array([])
    for i in range(FLAGS.test_batch_size): 
      true_count_ ,diff_= sess.run([true_count,tf.unstack(diff)])
      true_count_sum += true_count_
      diffs = np.append(diffs,diff_)
      if diff_[0] <= 25:
        print(i," :",diff_[0])
          
    diffs_var  = np.var(diffs)
    diffs_mean = np.mean(diffs)
        
    # Compute precision @ 1.
    precision = true_count_sum / (FLAGS.test_batch_size*FLAGS.batch_size)
    print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
    print('done')
Ejemplo n.º 8
0
def cnn_vis():
  with tf.Graph().as_default():
    img_name = '9_363.BMP'
    img_normal = imread('E:/final Project/3.4.19/croped vids/matlab normal/' + img_name)
    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))      
    img_normal = np.transpose(img_normal, [1, 0, 2])
    img_normal_in = np.reshape(img_normal,[1,160,160,3])
    
    graph = vgg.inference(image, gpu, tf.constant(False))
    logits = graph['s']
    logits = tf.transpose(logits)
    logits_r = tf.reshape(logits,[vgg.batch_size])
    saver = tf.train.Saver(tf.global_variables())
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.Session(config=config)  
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg19_train_holes/')
    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, ckpt)
    cnnvis({image: img_normal_in},sess)

    print('done')
Ejemplo n.º 9
0
def walk():
  date = datetime.now()
  date_str = date.strftime("%d")+date.strftime("%m")+date.strftime("%y")+'_'+date.strftime("%H")+date.strftime("%M")+date.strftime("%S")
  kml_name = date_str + '.kml'
  _step_dist = 350
  size = 5 
  lat = 31.62489921
  lon = 34.84234767
  bearing = 0.5754
  heading = 296.9
  
  kml=simplekml.Kml()
  kml.newpoint(name='0', coords=[(lon,lat)])
  kml.save(kml_name)


  with tf.Graph().as_default() as g:
    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))
          
    graph = vgg.inference(image, gpu, tf.constant(False))
    logits = graph['s']
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.InteractiveSession(config=config)   
    init = tf.global_variables_initializer()
    sess.run(init)

    summary = tf.Summary()
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter('D:/tensorflow_fs/vgg_eval/', g)
    saver = tf.train.Saver(graph['v'])
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg_19_train_test/RG_0_622/')
    saver.restore(sess, ckpt)

    point_list = []
    cord_list = []
    ang_diff_list = []
    compare_list = []

    step = 0
    while ((dist.distance((31.669251,34.741851),(lat,lon)).m > 150) and step < 40):

      next_heading = (heading + bearing)%360


      step_dist = _step_dist

      new_lat,new_lon = destinationPoint(lat, lon, step_dist, next_heading)
      points = find_closest_points(new_lat, new_lon, next_heading,size)
      
      
      cord_list += [[new_lat, new_lon, bearing, next_heading, heading]]
      
      heading = HeadingTo([lat, lon], [new_lat, new_lon])

        
      lat, lon = new_lat, new_lon

      if (points[0] == []):
        raise 'did not find point'
      point_list += [points]
      ang_diff_list +=[[points[0][1],points[0][2]]]

      kml.newpoint(name=str(step+1), coords=[(new_lon,new_lat)])
      kml.save(kml_name)

      bearings = [[]]*len(points)
      for p in range(len(points)):
        if (points[p] != []):
          ang_diff = points[p][2]
          file_name = points[p][0][3]
          frame_num = int(points[p][0][4])
          p_lat = float(points[p][0][0])
          p_lon = float(points[p][0][1])
          if (file_name in ['33','34','35', '46', '47', '50', '51_1', '53', '54']):
            filetype = '.mp4'
          else:
            filetype = '.mov'

          filename = 'E:/Final Project/walk/vids/DJI_00' + file_name + filetype   
          vid = imageio.get_reader(filename,'ffmpeg')

          img = vid.get_data(frame_num)
          img_rsz_t = imresize(img,[160,160,3])


          if (p == 0):
            print('vid: '+ str(file_name) + ', frame num: '+ str(frame_num) + ', filetype: ' + filetype)


          img_in = np.reshape(img_rsz_t,[1,160,160,3])

          b_c = bearing_compensation([p_lat,p_lon],[lat,lon])

          bearings[p] = (sess.run(logits, feed_dict={image: img_in,is_training: False})[0][0] - ang_diff + b_c)


      bearing_avg = np.average(bearings)
      compare_list += [bearings,bearing_avg,[z[2] for z in points]]
      bearing = bearing_avg
      step += 1


  cv2.destroyAllWindows()

  print(cord_list)
Ejemplo n.º 10
0
def train():

    print(FLAGS.train_dir)

    with tf.Session() as sess:

        global_step = tf.contrib.framework.get_or_create_global_step()

        images = tf.placeholder(tf.float32,
                                shape=(FLAGS.batch_size, IMAGE_SIZE,
                                       IMAGE_SIZE, 3))
        labels = tf.placeholder(tf.int32, shape=(FLAGS.batch_size))
        indexes = tf.placeholder(tf.int32, shape=(FLAGS.batch_size))
        #mode_eval = tf.placeholder(tf.bool, shape=())
        keep_prob = tf.placeholder(tf.float32)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = vgg.inference(images, keep_prob)

        # Calculate loss.
        loss = vgg.loss(logits, labels)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        prediction = tf.argmax(logits, 1)

        #tf.summary.scalar('prediction', loss)

        cmatix = tf.contrib.metrics.confusion_matrix(prediction, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = vgg.train(loss, global_step)

        #train = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)

        tf.summary.scalar('dropout_keep_probability', keep_prob)

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        # summary_writer_validation = tf.summary.FileWriter(FLAGS.validate_dir)

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        loss_train = np.array([])
        loss_valid = np.array([])
        precision_test = np.array([])

        steps_train = np.array([])
        steps_valid = np.array([])
        steps_precision = np.array([])

        confusion_matrix_predictions = np.array([])
        confusion_matrix_labels = np.array([])

        EPOCH = 0
        start_time_global = time.time()

        for step in xrange(FLAGS.max_steps):

            #if step > 100: FLAGS.__setattr__("INITIAL_LEARNING_RATE", 0.001)

            if (step % EPOCHS_NUM == 0) and step > 300:
                print("validating")

                #assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

                if step != 0: EPOCH = EPOCH + 1

                # feeding data for validation

                images_batch, labels_batch, index_batch = sess.run(
                    [images_v, labels_v, indexs_v])

                # Run model
                _, loss_value = sess.run(
                    [train_op, loss],
                    feed_dict={
                        images: images_batch,
                        labels: labels_batch,
                        indexes: index_batch,
                        keep_prob: 1.0
                    })

                print('%s: loss = %.5f' % (datetime.now(), loss_value))

                loss_valid = np.concatenate((loss_valid, [loss_value]))
                steps_valid = np.concatenate((steps_valid, [EPOCH]))

            else:

                #print("here")
                #print (step)

                start_time = time.time()

                # feed data for training

                images_batch, labels_batch, index_batch = sess.run(
                    [images_t, labels_t, indexs_t])

                # Run model
                _, loss_value, summary_str = sess.run(
                    [train_op, loss, summary_op],
                    feed_dict={
                        images: images_batch,
                        labels: labels_batch,
                        indexes: index_batch,
                        keep_prob: 0.5
                    })

                duration = time.time() - start_time

                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 10 == 0:
                    #summary_str = sess.run([summary_op],
                    #                     feed_dict={images: images_batch, labels: labels_batch, indexes: index_batch, keep_prob: 0.5})
                    writer.add_summary(summary_str, step)

                if step % 200 == 0:
                    num_examples_per_step = FLAGS.batch_size
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = float(duration)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))

                if (step - 2) % EPOCHS_NUM == 0:
                    loss_train = np.concatenate((loss_train, [loss_value]))
                    steps_train = np.concatenate((steps_train, [EPOCH]))

                # Save the model checkpoint periodically.
                if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                    checkpoint_path = os.path.join(FLAGS.train_dir,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

                    np.savez(FLAGS.train_dir + '_losses.npz',
                             steps_train=steps_train,
                             loss_train=loss_train,
                             steps_valid=steps_valid,
                             loss_valid=loss_valid,
                             precision=precision_test,
                             steps_precision=steps_precision,
                             confusion_matrix_predictions=
                             confusion_matrix_predictions,
                             confusion_matrix_labels=confusion_matrix_labels)

            if EPOCH == 400:
                break

        final_time_global = time.time()

        print("Finish")

        print(final_time_global - start_time_global)

        sess.close()
Ejemplo n.º 11
0
def train():
  """Train CIFAR-10 for a number of steps."""
  with tf.Graph().as_default():
    global_step = tf.Variable(0, trainable=False)

    num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))

    images_train, labels_train = vgg.distorted_inputs()

    images_val, labels_val = vgg.inputs(eval_data='test')

    is_training = tf.placeholder('bool', [], name='is_training')

    images, labels = tf.cond(is_training,
        lambda: (images_train, labels_train),
        lambda: (images_val, labels_val))


    # Build a Graph that computes the logits predictions from the
    # inference model.
    graph =  vgg.inference(images,gpu,is_training)
    logits = graph['s']
    params = graph['p']
    logits = tf.transpose(logits)
    # Calculate loss.
    loss = vgg.loss(logits, labels)


    logits_r = tf.reshape(logits,[vgg.batch_size])

    diff = vgg.ang_diff(logits_r,labels)

    true_count = tf.reduce_sum(tf.cast(tf.less(diff,25),tf.uint8))

    # Build a Graph that trains the model with one batch of examples and
    # updates the model parameters.
    train_op = vgg.train(loss, global_step)

    # Create a saver.
    saver = tf.train.Saver(tf.global_variables())
    
    summary = tf.Summary()
    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    # Build an initialization operation to run below.
    init = tf.global_variables_initializer()

    # Start running operations on the Graph.

    
    config = tf.ConfigProto(allow_soft_placement = True)
    #config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)
    summary_writer = tf.compat.v1.summary.FileWriter(FLAGS.eval_dir, sess.graph)

    if (FLAGS.Resume):
      ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
      if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
        saver.restore(sess, ckpt.model_checkpoint_path)
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        
        sub_saver =  tf.train.Saver(graph['v'])
        sub_saver.save(sess, FLAGS.train_dir)
      else:
        print('No checkpoint file found')
        return
    else:
      sess.run(init,{ is_training: False })
      load_weights(params,'vgg19.npy', sess)

    test_iters = 11
    total_sample_count = test_iters * FLAGS.batch_size 
    
    for step in range(FLAGS.max_steps):
      start_time = time.time()
      _, loss_value = sess.run([train_op, loss],{ is_training: True })
      duration = time.time() - start_time
      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step > 1 and step % 250 == 0:
        num_examples_per_step = FLAGS.batch_size
        examples_per_sec = num_examples_per_step / duration
        sec_per_batch = float(duration)

        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch)')
        print (format_str % (datetime.now(), step, loss_value,
                             examples_per_sec, sec_per_batch))
        summary_str = sess.run(summary_op,{ is_training: False })
        summary_writer.add_summary(summary_str, step)
        
        summary.ParseFromString(sess.run(summary_op,{ is_training: False }))
        summary_writer.add_summary(summary, step)


      
      if step > 1 and step % 1000== 0 or (step + 1) == FLAGS.max_steps:     

        true_count_sum = 0  # Counts the number of correct predictions.
        diffs = np.array([])
        for i in range(test_iters): 
          true_count_ ,diff_= sess.run([true_count,tf.unstack(diff)],{ is_training: False })
          true_count_sum += true_count_
          diffs = np.append(diffs,diff_)
          
        diffs_var  = np.var(diffs)
        diffs_mean = np.mean(diffs)
        
        # Compute precision @ 1.
        precision = true_count_sum / total_sample_count
        print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
        summary.ParseFromString(sess.run(summary_op,{ is_training: False }))
        summary.value.add(tag='Precision @ 1', simple_value=precision)
        summary.value.add(tag='diffs_var', simple_value=diffs_var)
        summary.value.add(tag='diffs_mean', simple_value=diffs_mean)  
        summary_writer.add_summary(summary, step)
        # Save the model checkpoint periodically.
        saver.save(sess, checkpoint_path, global_step=step)
Ejemplo n.º 12
0
def run_training():

    #1.create log and model saved dir according to the datetime
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    models_dir = os.path.join("saved_models", subdir, "models")
    if not os.path.isdir(models_dir):  # Create the model directory if it doesn't exist
        os.makedirs(models_dir)
    logs_dir = os.path.join("saved_models", subdir, "logs")
    if not os.path.isdir(logs_dir):  # Create the log directory if it doesn't exist
        os.makedirs(logs_dir)
    topn_models_dir = os.path.join("saved_models", subdir, "topn")#topn dir used for save top accuracy model
    if not os.path.isdir(topn_models_dir):  # Create the topn model directory if it doesn't exist
        os.makedirs(topn_models_dir)
    topn_file=open(os.path.join(topn_models_dir,"topn_acc.txt"),"a+")
    topn_file.close()


    #2.load dataset and define placeholder
    demo=TFRecordDataset(  )
    train_iterator,train_next_element=demo.generateDataset(tfrecord_path='tfrecord_dataset/train.tfrecords',batch_size=512)


    phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
    images_placeholder = tf.placeholder(name='input', shape=[None, 96, 96, 3], dtype=tf.float32)
    binned_pose_placeholder = tf.placeholder(name='binned_pose', shape=[None,3 ], dtype=tf.int64)
    cont_labels_placeholder = tf.placeholder(name='cont_labels', shape=[None,3 ], dtype=tf.float32)

    yaw,pitch,roll = vgg.inference(images_placeholder,phase_train=phase_train_placeholder)

    yaw_logit   = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=yaw,labels=binned_pose_placeholder[:,0])
    pitch_logit = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pitch,labels=binned_pose_placeholder[:,1])
    roll_logit  = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=roll,labels=binned_pose_placeholder[:,2])


    loss_yaw   = tf.reduce_mean(yaw_logit)
    loss_pitch = tf.reduce_mean(pitch_logit)
    loss_roll  = tf.reduce_mean(roll_logit)


    softmax_yaw=tf.nn.softmax(yaw)
    softmax_pitch=tf.nn.softmax(pitch)
    softmax_roll=tf.nn.softmax(roll)

    yaw_predicted   =  tf.math.reduce_sum( (softmax_yaw * tf.linspace(0.0,66.0,67) ), 1 )* 3 - 99
    pitch_predicted =  tf.math.reduce_sum( (softmax_pitch * tf.linspace(0.0,66.0,67) ), 1 )* 3 - 99
    roll_predicted  =  tf.math.reduce_sum( (softmax_roll * tf.linspace(0.0,66.0,67) ), 1 )* 3 - 99



    yaw_mse_loss = tf.losses.mean_squared_error(labels=cont_labels_placeholder[:,0], predictions=yaw_predicted)
    pitch_mse_loss = tf.losses.mean_squared_error(labels=cont_labels_placeholder[:,1], predictions=pitch_predicted)
    roll_mse_loss = tf.losses.mean_squared_error(labels=cont_labels_placeholder[:,2], predictions=roll_predicted)

    alpha = 0.001

    total_loss_softmax=(loss_yaw+loss_pitch+loss_roll)
    total_loss_mse = alpha*(yaw_mse_loss+pitch_mse_loss+roll_mse_loss)
    total_loss = total_loss_softmax+total_loss_mse

    

    yaw_correct_prediction = tf.equal(tf.argmax(yaw,1),binned_pose_placeholder[:,0] )
    pitch_correct_prediction = tf.equal(tf.argmax(pitch,1),binned_pose_placeholder[:,1] )
    roll_correct_prediction = tf.equal(tf.argmax(roll,1),binned_pose_placeholder[:,2] )

    yaw_accuracy = tf.reduce_mean(tf.cast(yaw_correct_prediction, tf.float32))
    pitch_accuracy = tf.reduce_mean(tf.cast(pitch_correct_prediction, tf.float32))
    roll_accuracy = tf.reduce_mean(tf.cast(roll_correct_prediction, tf.float32))

    #adjust learning rate
    global_step = tf.Variable(0, trainable=False)
    #learning_rate = tf.train.exponential_decay(0.001,global_step,100000,0.98,staircase=True)
    learning_rate = tf.train.piecewise_constant(global_step, boundaries=[8000, 16000, 24000, 32000], values=[0.001, 0.0001, 0.0001, 0.00001, 0.000001],name='lr_schedule')



    #optimize loss and update
    #optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)
    optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
    #optimizer = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(total_loss,global_step=global_step)


    saver=tf.train.Saver(tf.trainable_variables(),max_to_keep=5)

    sess=utils.session()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    saver.restore(sess, "/home/hanson/work/FaceHeadpose_TF/saved_models/20190801-135403/models/0.563564.ckpt")

    minimum_loss_value=999.0
    total_loss_value = 0.0
    for epoch in range(1000):
        sess.run(train_iterator.initializer)
        while True:
            use_time=0
            try:
                images_train, binned_pose,cont_labels = sess.run(train_next_element)
                start_time=time.time()
                input_dict={phase_train_placeholder:True,images_placeholder:images_train,binned_pose_placeholder:binned_pose,cont_labels_placeholder:cont_labels}
                
                total_loss_mse_value,total_loss_softmax_value,yaw_acc,pitch_acc,roll_acc,step,lr,train_loss,_ = sess.run([
                                        total_loss_mse,
                                        total_loss_softmax,
                                        yaw_accuracy,
                                        pitch_accuracy,
                                        roll_accuracy,
                                        global_step,
                                        learning_rate,
                                        total_loss,
                                        train_op],
                                        feed_dict=input_dict)

                total_loss_value+=train_loss
                end_time=time.time()
                use_time+=(end_time-start_time)

                # display train result
                if(step%100==0):
                    use_time=0
                    average_loss_value = total_loss_value/100.0
                    total_loss_value=0
                    print ("step:%d lr:%f sloss:%f mloss%f average_loss:%f YAW_ACC:%.2f PITCH_ACC:%.2f ROLL_ACC:%.2f epoch:%d"%(step,
                                                                                                           lr,
                                                                                                           total_loss_softmax_value,
                                                                                                           total_loss_mse_value,
                                                                                                           float(average_loss_value),
                                                                                                           yaw_acc,
                                                                                                           pitch_acc,
                                                                                                           roll_acc, 
                                                                                                           epoch) )
                    if average_loss_value<minimum_loss_value:
                        print("save ckpt")
                        filename_cpkt = os.path.join(models_dir,"%f.ckpt"%average_loss_value)
                        saver.save(sess, filename_cpkt)
                        minimum_loss_value=average_loss_value

            except tf.errors.OutOfRangeError:
                print("End of epoch ")
                break
Ejemplo n.º 13
0
def run_training():

    #1.create log and model saved dir according to the datetime
    subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    models_dir = os.path.join("saved_models", subdir, "models")
    if not os.path.isdir(
            models_dir):  # Create the model directory if it doesn't exist
        os.makedirs(models_dir)
    logs_dir = os.path.join("saved_models", subdir, "logs")
    if not os.path.isdir(
            logs_dir):  # Create the log directory if it doesn't exist
        os.makedirs(logs_dir)
    topn_models_dir = os.path.join(
        "saved_models", subdir,
        "topn")  #topn dir used for save top accuracy model
    if not os.path.isdir(
            topn_models_dir
    ):  # Create the topn model directory if it doesn't exist
        os.makedirs(topn_models_dir)
    topn_file = open(os.path.join(topn_models_dir, "topn_acc.txt"), "a+")
    topn_file.close()

    #2.load dataset and define placeholder
    conf = config.get_config()
    demo = TFRecordDataset(conf)
    train_iterator, train_next_element = demo.generateDataset(
        tfrecord_path='tfrecord_dataset/train.tfrecords',
        test_mode=0,
        batch_size=256)

    phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
    images_placeholder = tf.placeholder(
        name='input',
        shape=[None, conf.input_img_height, conf.input_img_width, 3],
        dtype=tf.float32)
    binned_pose_placeholder = tf.placeholder(name='binned_pose',
                                             shape=[None, 3],
                                             dtype=tf.int64)
    cont_labels_placeholder = tf.placeholder(name='cont_labels',
                                             shape=[None, 3],
                                             dtype=tf.float32)

    yaw, pitch, roll = vgg.inference(images_placeholder,
                                     phase_train=phase_train_placeholder)

    loss_yaw = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=yaw, labels=binned_pose_placeholder[:, 0])
    loss_pitch = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=pitch, labels=binned_pose_placeholder[:, 1])
    loss_roll = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=roll, labels=binned_pose_placeholder[:, 2])

    softmax_yaw = tf.nn.softmax(yaw)
    softmax_pitch = tf.nn.softmax(pitch)
    softmax_roll = tf.nn.softmax(roll)

    yaw_predicted = tf.math.reduce_sum(
        (softmax_yaw * tf.linspace(0.0, 67.0, 68))) * 3 - 99
    pitch_predicted = tf.math.reduce_sum(
        (softmax_pitch * tf.linspace(0.0, 67.0, 68))) * 3 - 99
    roll_predicted = tf.math.reduce_sum(
        (softmax_roll * tf.linspace(0.0, 67.0, 68))) * 3 - 99

    yaw_mse_loss = tf.reduce_mean(
        tf.square(yaw_predicted - cont_labels_placeholder[:, 0]))
    pitch_mse_loss = tf.reduce_mean(
        tf.square(pitch_predicted - cont_labels_placeholder[:, 1]))
    roll_mse_loss = tf.reduce_mean(
        tf.square(roll_predicted - cont_labels_placeholder[:, 2]))

    # # Total loss
    #loss_yaw   += 0.0001 * yaw_mse_loss
    #loss_pitch += 0.0001 * pitch_mse_loss
    #loss_roll  += 0.0001 * roll_mse_loss

    #reg_loss=tf.reduce_mean(0.0001 * yaw_mse_loss+ 0.0001 * pitch_mse_loss+ 0.0001 * roll_mse_loss)
    #softmax_loss=tf.reduce_mean(yaw_predicted+pitch_predicted+roll_predicted)

    total_loss = (loss_yaw + loss_pitch + loss_roll)
    #total_loss=0.0001 * yaw_mse_loss#+ 0.0001 * pitch_mse_loss+ 0.0001 * roll_mse_loss
    total_loss = tf.reduce_mean(loss_pitch)
    print(total_loss)
    #total_loss=reg_loss+softmax_loss

    #correct_prediction = tf.equal(tf.argmax(predictions,1),labels_placeholder )
    #accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #adjust learning rate
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(0.001,
                                               global_step,
                                               100000,
                                               0.98,
                                               staircase=True)

    #optimize loss and update
    #optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)
    optimizer = tf.train.MomentumOptimizer(learning_rate,
                                           0.9,
                                           use_nesterov=True)
    #optimizer = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
    grads = optimizer.compute_gradients(total_loss)

    #with tf.name_scope('clip_grads'):
    #grads = slim.learning.clip_gradient_norms(grads, 2 )

    train_op = optimizer.apply_gradients(grads, global_step=global_step)
    #train_op=tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True).minimize(total_loss,global_step=global_step)

    saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=5)

    sess = fu.session()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    for epoch in range(conf.max_nrof_epochs):
        sess.run(train_iterator.initializer)
        while True:
            use_time = 0
            try:
                images_train, binned_pose, cont_labels = sess.run(
                    train_next_element)
                start_time = time.time()
                input_dict = {
                    phase_train_placeholder: True,
                    images_placeholder: images_train,
                    binned_pose_placeholder: binned_pose,
                    cont_labels_placeholder: cont_labels
                }
                step, lr, train_loss, _ = sess.run(
                    [global_step, learning_rate, total_loss, train_op],
                    feed_dict=input_dict)

                end_time = time.time()
                use_time += (end_time - start_time)

                #display train result
                if (step % conf.display_iter == 0):
                    print("step:%d lr:%f time:%.3f total_loss:%.3f  epoch:%d" %
                          (step, lr, use_time, float(train_loss), epoch))
                    use_time = 0

            except tf.errors.OutOfRangeError:
                print("End of epoch ")
                break
Ejemplo n.º 14
0
def neurons_test():
  size = 3 
  test_points=[[31.630185, 34.821536],[31.64212421,34.79050406],[31.6483181,34.7808723],[31.6483181,34.7808723],[31.65319346,34.76873743],[31.66124538,34.7553989],[31.66512506,34.74895476]]
  #test_points=[[31.630185, 34.821536]]
  thresh = 5
  with tf.Graph().as_default() as g:
    image = tf.compat.v1.placeholder(dtype=tf.float32,shape=(1,160,160,3))      
    graph = vgg.inference(image, gpu,tf.constant(False))
    activations = graph['a']
    act_thr = {}
    for layer in activations:
      if 'fc' in layer:
        act_thr[layer] = np.zeros([360,activations[layer].shape[1]])
   
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.InteractiveSession(config=config)   
    init = tf.global_variables_initializer()
    sess.run(init)
    saver = tf.train.Saver(graph['v'])
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg_19_train_test/')
    saver.restore(sess, ckpt)
    idx = 0
    for lat,lon in test_points:
      idx = idx + 1      
      heading_2_dest = HeadingTo ([lat, lon],[31.669243,34.742072])

      start = 1
      for heading in range(start,360):
     
        points = find_closest_points2(lat, lon,heading)
        if (points[0] == []):
          raise 'did not find point'

        bearings = [[]]*len(points)
        locations = [[]]*len(points)
        for p in range(len(points)):
          if (points[p] != []):
            ang_diff = points[p][2]
            file_name = points[p][0][3]
            frame_num = int(points[p][0][4])
            p_lat = float(points[p][0][0])
            p_lon = float(points[p][0][1])
            locations[p] = [p_lat,p_lon]
            p_heading = int(np.floor(float(points[p][0][5])))

            if (file_name in ['33','34','35', '46', '47', '50', '51_1', '53', '54']):
              filetype = '.mp4'
            else:
              filetype = '.mov'

            filename = 'E:/Final Project/walk/vids/DJI_00' + file_name + filetype   
            vid = imageio.get_reader(filename,'ffmpeg')

            img = vid.get_data(frame_num)
            img_rsz = imresize(img,[160,160,3])
            img_rsz[:,:,1] = 0
            if (p == 1):
              print('vid: '+ str(file_name) + ', frame num: '+ str(frame_num) + ', filetype: ' + filetype)

            img_in = np.reshape(img_rsz,[1,160,160,3])         
            for layer in activations:
              if 'fc' in layer:
                act_thr[layer][p_heading] += (sess.run(activations[layer],feed_dict={image: img_in})[0])/7           
        
    for layer in activations:
      if 'fc' in layer:
        scipy.io.savemat('E:/Final Project/walk/neurons/'+layer+'.mat', mdict={layer: act_thr[layer]})        
    print(str(idx) + ' done')  
Ejemplo n.º 15
0
def location_test():
  size = 8 
  lat =    31.653862
  lon =    34.768527
  heading = 10
  heading_2_dest = HeadingTo ([lat, lon],[31.669243,34.742072])
  with tf.Graph().as_default() as g:
    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))
          
    graph = vgg.inference(image, gpu, tf.constant(False))
    logits = graph['s']
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.InteractiveSession(config=config)   
    init = tf.global_variables_initializer()
    sess.run(init,{is_training: False})
    saver = tf.train.Saver(graph['v'])
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg19_train_holes/')
    saver.restore(sess, ckpt)

    points = find_closest_points(lat, lon,heading,size)
    if (points[0] == []):
      raise 'did not find point'

    bearings = [[]]*len(points)
    locations = [[]]*len(points)
    for p in range(len(points)):
      if (points[p] != []):
        ang_diff = points[p][2]
        file_name = points[p][0][3]
        frame_num = int(points[p][0][4])
        p_lat = float(points[p][0][0])
        p_lon = float(points[p][0][1])
        locations[p] = [p_lat,p_lon]
        if (file_name in ['33','34','35', '46', '47', '50', '51_1', '53', '54']):
          filetype = '.mp4'
        else:
          filetype = '.mov'

        filename = 'E:/Final Project/walk/vids/DJI_00' + file_name + filetype   
        vid = imageio.get_reader(filename,'ffmpeg')

        img = vid.get_data(frame_num)
        img_rsz = imresize(img,[160,160,3])
        img_rsz[:,:,1] = 0
        if (p == 2):
          print('vid: '+ str(file_name) + ', frame num: '+ str(frame_num) + ', filetype: ' + filetype)


        img_in = np.reshape(img_rsz,[1,160,160,3])

        b_c = bearing_compensation([p_lat,p_lon],[lat,lon])

        bearings[p] = (sess.run(logits, feed_dict={image: img_in})[0][0] - ang_diff + b_c)




    bearing_avg = np.average(bearings)

    bearing = bearing_avg

    
    
    db_heading = float(points[2][0][5])
    ang_diff_abs = np.minimum(np.abs(db_heading - heading_2_dest), 360 - np.abs(db_heading - heading_2_dest))
    db_h_shift = (db_heading + (360 - heading_2_dest))%360
    ang_diff_sign = np.sign(db_h_shift - 180)
    ang_diff = ang_diff_abs * ang_diff_sign

    print('cord: ',points[2][0][0],',',points[2][0][1],' heading: ',db_heading,', heading to dest: ',heading_2_dest,'headings diff: ',ang_diff, 'correction: ',bearing)
    print('done')
Ejemplo n.º 16
0
def batch_walk():
  with tf.Graph().as_default() as g:
    image = tf.placeholder(dtype=tf.float32,shape=(1,160,160,3))       
    graph = vgg.inference(image, gpu)
    logits = graph['s']
    config = tf.ConfigProto(allow_soft_placement = True)
    sess = tf.InteractiveSession(config=config)   
    init = tf.global_variables_initializer()
    sess.run(init)
    summary = tf.Summary()
    summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter('D:/tensorflow_fs/vgg_eval/', g)
    saver = tf.train.Saver(graph['v'])
    ckpt = tf.train.latest_checkpoint('D:/tensorflow_fs/vgg19_train_holes/')
    saver.restore(sess, ckpt)
    for run in range(50):
      date = datetime.now()
      date_str = date.strftime("%d")+date.strftime("%m")+date.strftime("%y")+'_'+date.strftime("%H")+date.strftime("%M")+date.strftime("%S")
      kml_name = date_str +'run_' + str(run) + '.kml'
      f = open("run_log_" + str(run) + '.txt', "a")
      _step_dist = 350
      size = 5 
      init_step_dist = random.randint(1000,5000)
      lat, lon = destinationPoint(31.62489921, 34.84234767, init_step_dist, 296.9) 
      bearing = 0
      heading = 296.9 + (random.randint(0,30) - 60)
      kml=simplekml.Kml()
      kml.newpoint(name='0', coords=[(lon,lat)])
      kml.save(kml_name)
      step = 0
      while ((dist.distance((31.669251,34.741851),(lat,lon)).m > 150) and step < 40):
        next_heading = (heading + bearing)%360
        step_dist = _step_dist
        new_lat,new_lon = destinationPoint(lat, lon, step_dist, next_heading)
        points = find_closest_points(new_lat, new_lon, next_heading,size)    
        heading = HeadingTo([lat, lon], [new_lat, new_lon])       
        lat, lon = new_lat, new_lon
        f.write(str(step) + ' :' + str(lat) + ',' + str(lon)+ ',' + str(heading) + '\n')
        if (points[0] == []):
          raise 'did not find point'
        kml.newpoint(name=str(step+1), coords=[(new_lon,new_lat)])
        kml.save(kml_name)
        bearings = [[]]*len(points)
        for p in range(len(points)):
          if (points[p] != []):
            ang_diff = points[p][2]
            file_name = points[p][0][3]
            frame_num = int(points[p][0][4])
            p_lat = float(points[p][0][0])
            p_lon = float(points[p][0][1])
            if (file_name in ['33','34','35', '46', '47', '50', '51_1', '53', '54']):
              filetype = '.mp4'
            else:
              filetype = '.mov'

            filename = 'E:/Final Project/walk/vids/DJI_00' + file_name + filetype   
            vid = imageio.get_reader(filename,'ffmpeg')

            img = vid.get_data(frame_num)
            img_rsz = imresize(img,[160,160,3])
            img_rsz[:,:,1] = 0
            img_in = np.reshape(img_rsz,[1,160,160,3])
            b_c = bearing_compensation([p_lat,p_lon],[lat,lon])
            bearings[p] = (sess.run(logits, feed_dict={image: img_in})[0][0] - ang_diff + b_c)
            if (p == 0):
              print('vid: '+ str(file_name) + ', frame num: '+ str(frame_num) + ', filetype: ' + filetype)

        bearing_avg = np.average(bearings)
        bearing = bearing_avg
        step += 1
      f.close()
Ejemplo n.º 17
0
def train():
    sys.stdout.write("\033[93m")  # yellow message

    print("Load and test model")

    sys.stdout.write("\033[0;0m")

    with tf.Session() as sess:

        global_step = tf.contrib.framework.get_or_create_global_step()

        images = tf.placeholder(tf.float32,
                                shape=(FLAGS.batch_size, IMAGE_SIZE,
                                       IMAGE_SIZE, 3))
        labels = tf.placeholder(tf.int32, shape=(FLAGS.batch_size))
        indexes = tf.placeholder(tf.int32, shape=(FLAGS.batch_size))
        # mode_eval = tf.placeholder(tf.bool, shape=())
        keep_prob = tf.placeholder(tf.float32)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = vgg.inference(images, keep_prob)

        # Calculate loss.
        loss = vgg.loss(logits, labels)

        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        prediction = tf.argmax(logits, 1)

        #cmatix = tf.contrib.metrics.confusion_matrix(prediction, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = vgg.train(loss, global_step)

        # train = tf.train.GradientDescentOptimizer(0.00001).minimize(loss)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Restore the moving average version of the learned variables for eval.

        #variable_averages = tf.train.ExponentialMovingAverage(
        #    vgg.MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        #saver = tf.train.Saver(variables_to_restore)

        #saver = tf.train.import_meta_graph('/home/mikelf/Datasets/T-lessV2/restore_models/model.ckpt-61000.meta')

        saver.restore(
            sess,
            "/home/mikelf/experiments/full_test/vgg_scratch/100p/checkpoint/vgg_train_rgb_16bs01lr_SGD_100p/model.ckpt-85000"
        )

        #sess.run(tf.global_variables_initializer())

        print("Model restored.")

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        ##init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        # sess = tf.Session(config=tf.ConfigProto(
        #    log_device_placement=FLAGS.log_device_placement))

        ##sess.run(init)

        coord = tf.train.Coordinator()

        # Start the queue runners.
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        # summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        summary_writer_train = tf.summary.FileWriter(FLAGS.train_dir,
                                                     sess.graph)
        # summary_writer_validation = tf.summary.FileWriter(FLAGS.validate_dir)

        loss_train = np.array([])
        loss_valid = np.array([])
        precision_test = np.array([])

        steps_train = np.array([])
        steps_valid = np.array([])
        steps_precision = np.array([])

        EPOCH = 0
        start_time_global = time.time()

        print("getting precision on test dataset")

        # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

        # feeding data for evaluation

        num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
        true_count = 0  # Counts the number of correct predictions.
        total_sample_count = num_iter * FLAGS.batch_size
        step = 0
        x = []
        cf_matrix_array = []
        labels_array = []

        while step < num_iter:
            images_batch, labels_batch, index_batch = sess.run(
                [images_p, labels_p, indexs_p])

            predictions, cf_matrix = sess.run(
                [top_k_op, prediction],
                feed_dict={
                    images: images_batch,
                    labels: labels_batch,
                    indexes: index_batch,
                    keep_prob: 1.0
                })

            true_count += np.sum(predictions)
            step += 1
            x.extend(index_batch)
            cf_matrix_array = np.append(cf_matrix_array, cf_matrix, axis=0)
            labels_array = np.append(labels_array, labels_batch, axis=0)

        print(cf_matrix_array.shape)

        print(len(x))
        dupes = [xa for n, xa in enumerate(x) if xa in x[:n]]
        # print(sorted(dupes))
        print(len(dupes))

        precision = true_count / total_sample_count

        print('%s: precision @ 1 = %.5f' % (datetime.now(), precision))

        precision_test = np.concatenate((precision_test, [precision]))
        steps_precision = np.concatenate((steps_precision, [EPOCH]))

        final_time_global = time.time()

        print("Finish")

        print(final_time_global - start_time_global)

        cnf_matrix = confusion_matrix(labels_array, cf_matrix_array)
        np.set_printoptions(precision=2)

        class_names = [
            '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
            '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23',
            '24', '25', '26', '27', '28', '29', '30'
        ]

        # Plot non-normalized confusion matrix
        plt.figure()
        plot_confusion_matrix(cnf_matrix,
                              classes=class_names,
                              title='Confusion matrix, without normalization')

        # Plot normalized confusion matrix
        #plt.figure()
        #plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
        #                      title='Normalized confusion matrix')

        plt.show()

        coord.request_stop()
        coord.join(threads)

        sess.close()
Ejemplo n.º 18
0
def train():
    with tf.Graph().as_default():

        data_set = cifar10.CIFAR10()
        images = data_set.load(FLAGS.data_path)

        global_step = tf.Variable(0, trainable=False)

        random_z = vgg.inputs()

        D_logits_real, D_logits_fake, D_logits_fake_for_G, \
        D_sigmoid_real, D_sigmoid_fake, D_sigmoid_fake_for_G = \
          vgg.inference(images, random_z)

        G_loss, D_loss = vgg.loss_l2(D_logits_real, D_logits_fake,
                                     D_logits_fake_for_G)

        t_vars = tf.trainable_variables()
        G_vars = [var for var in t_vars if 'g_' in var.name]
        D_vars = [var for var in t_vars if 'd_' in var.name]

        G_train_op, D_train_op = vgg.train(G_loss, D_loss, G_vars, D_vars,
                                           global_step)

        sampler = vgg.sampler(random_z)

        #summary_op = tf.merge_all_summaries()

        sess = sess_init()

        tf.train.start_queue_runners(sess=sess)

        #summary_writer = tf.train.SummaryWriter(FLAGS.log_dir, sess.graph)

        saver = tf.train.Saver()

        for step in xrange(1, FLAGS.max_steps + 1):
            batch_z = np.random.uniform(
                -1, 1, [FLAGS.batch_size, FLAGS.z_dim]).astype(np.float32)

            _, errD = sess.run([D_train_op, D_loss],
                               feed_dict={random_z: batch_z})

            _, errG = sess.run([G_train_op, G_loss],
                               feed_dict={random_z: batch_z})

            if step % 100 == 0:
                print "step = %d, errD = %f, errG = %f" % (step, errD, errG)

            if np.mod(step, 1000) == 0:
                samples = sess.run(sampler, feed_dict={random_z: batch_z})
                save_images(samples, [8, 8],
                            './samples/train_{:d}.bmp'.format(step))

                #      if step % 1000 == 0:
                #        summary_str = sess.run(summary_op,
                #            feed_dict={random_z: batch_z})
                #        summary_writer.add_summary(summary_str, step)

            if step % 10000 == 0:
                saver.save(
                    sess, '{0}/vgg-{1}.model'.format(FLAGS.checkpoint_dir,
                                                     step), global_step)