Exemplo n.º 1
0
def run():
    image_size = 299
    num_classes = 200
    logdir = './log'

    checkpoint_file = tf.train.latest_checkpoint(logdir)

    with tf.Graph().as_default() as graph:
        
        #images = tf.placeholder(shape=[None, image_size, image_size, 3], dtype=tf.float32, name = 'Placeholder_only')
        images = tf.placeholder("float", [1, image_size, image_size, 3], name="input")

        with slim.arg_scope(inception_v3_arg_scope()):
            logits, end_points = inception_v3(images, num_classes = num_classes, is_training = False)

        variables_to_restore = slim.get_variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        #Setup graph def
        input_graph_def = graph.as_graph_def()
        output_node_names = "InceptionV3/Predictions/Softmax"
        output_graph_name = "./frozen_model_inceptionv3.pb"

        with tf.Session() as sess:
            saver.restore(sess, checkpoint_file)

            #Exporting the graph
            print ("Exporting graph...")
            output_graph_def = graph_util.convert_variables_to_constants(
                sess,
                input_graph_def,
                output_node_names.split(","))

            with tf.gfile.GFile(output_graph_name, "wb") as f:
                f.write(output_graph_def.SerializeToString())
Exemplo n.º 2
0
    def __init__(self, is_training,input_box, targets,scope='detector',
                 need_optim=True,clip_grd=True):
        '''
        :param Param:
        :param is_training: place_holder used in running
        :param scope:
        :param input_box: shape=[None] + SHAPE_BOX   placeholder
        :param keep_prob:

        '''

        with tf.variable_scope(scope):
            # cnn = CNN(param=Param, phase=self.phase, keep_prob=self.keep_prob, box=self.box)
            with slim.arg_scope(icp.inception_v3_arg_scope()):
                self.pred = icp.inception_v3(input_box, output_dim=DataConfig.output_dim,
                                             is_training=is_training,scope='InceptionV3',
                                             depth_multiplier=1.)

                with tf.variable_scope('error'):
                    self.error=tf.reduce_mean(tf.reduce_sum(
                        tf.square(self.pred - targets), axis=1) /DataConfig.num_feature_need, axis=0)

            if need_optim:
                with tf.variable_scope('optimizer'):
                    # Ensures that we execute the update_ops before performing the train_step
                    # optimizer = tf.train.AdamOptimizer(0.001,epsilon=1.0)
                    optimizer = tf.train.AdamOptimizer()
                    if clip_grd:
                        gvs = optimizer.compute_gradients(self.error)
                        capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
                        self.train_op = optimizer.apply_gradients(capped_gvs)
                    else:
                        self.train_op = optimizer.minimize(self.error)
Exemplo n.º 3
0
def Build_Image_Embeddings(mode, images, train_inception):
    """Builds the image model subgraph and generates image embeddings.

    Inputs:
      self.images

    Outputs:
      self.image_embeddings
    """
    print(
        "tl : Build Image Embeddings = InceptionV3 + Dense Layer / uses SlimNetsLayer and DenseLayer instead"
    )

    with slim.arg_scope(inception_v3_arg_scope()):
        net_img_in = tl.layers.InputLayer(images, name='input_image_layer')
        network = tl.layers.SlimNetsLayer(
            layer=net_img_in,
            slim_layer=inception_v3,
            slim_args={
                'trainable': train_inception,
                'is_training': mode == 'train',
            },
            name='',
        )
    network = tl.layers.DenseLayer(
        network,
        n_units=embedding_size,
        act=tf.identity,
        W_init=initializer,
        b_init=None,  # no biases
        name='image_embedding')
    return network
Exemplo n.º 4
0
def model(x, H, reuse, is_training=True):
    if H['inception'] == 1:
        with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
            _, T = inception_v1.inception_v1(
                x,
                is_training=is_training,
                num_classes=1001,
                dropout_keep_prob=H['dense_dropout'],
                input_dropout=H['input_dropout'],
                spatial_squeeze=False,
                reuse=reuse)
        coarse_feat = T[H['coarse_feat']]

        # fine feat can be used to reinspect input
        early_feat = T[H['early_feat']]
        early_feat_channels = 480
    elif H['inception'] == 3:
        with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
            _, T = inception_v3.inception_v3(x,
                                             is_training=is_training,
                                             num_classes=1001,
                                             dropout_keep_prob=0.8,
                                             spatial_squeeze=False,
                                             reuse=reuse)
        coarse_feat = T['Mixed_5b']

        # fine feat can be used to reinspect input
        attention_lname = H.get('attention_lname', 'Mixed_3b')
        early_feat = T[attention_lname]
        early_feat_channels = 480

    return coarse_feat, early_feat, early_feat_channels
Exemplo n.º 5
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 299, 299
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
         inception_v3.inception_v3_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(
         slim.get_model_variables())
     self.assertAlmostEqual(21802784, total_params)
Exemplo n.º 6
0
 def testModelHasExpectedNumberOfParameters(self):
     batch_size = 5
     height, width = 299, 299
     inputs = tf.random_uniform((batch_size, height, width, 3))
     with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
         inception_v3.inception_v3_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(
         slim.get_model_variables())
     self.assertAlmostEqual(21802784, total_params)
def predict(image, version='V3'):
    tf.reset_default_graph()
    
    # Process the image 
    raw_image, processed_image = process_image(image)
    print(raw_image.shape)
    class_names = imagenet.create_readable_names_for_imagenet_labels()
    
    # Create a placeholder for the images
    X = tf.placeholder(tf.float32, [None, 299, 299, 3], name="X")
    
    '''
    inception_v3 function returns logits and end_points dictionary
    logits are output of the network before applying softmax activation
    '''
    
    if version.upper() == 'V3':
        print("V3!!")
        model_ckpt_path = INCEPTION_V3_CKPT_PATH
        with tf.contrib.slim.arg_scope(inception_v3.inception_v3_arg_scope()):
            # Set the number of classes and is_training parameter  
            logits, end_points = inception_v3.inception_v3(X, num_classes=1001, is_training=False)
            
    elif version.upper() == 'V4':
        model_ckpt_path = INCEPTION_V4_CKPT_PATH
        with tf.contrib.slim.arg_scope(inception_v4.inception_v4_arg_scope()):
            # Set the number of classes and is_training parameter
            # Logits 
            logits, end_points = inception_v4.inception_v4(X, num_classes=1001, is_training=False)
            
    
    predictions = end_points.get('Predictions', 'No key named predictions')
    saver = tf.train.Saver()
    
    with tf.Session() as sess:
        print("model_ckpt_path", model_ckpt_path)
        saver.restore(sess, model_ckpt_path)
        prediction_values = predictions.eval({X: processed_image})
        
    try:
        # Add an index to predictions and then sort by probability
        prediction_values = [(i, prediction) for i, prediction in enumerate(prediction_values[0,:])]
        prediction_values = sorted(prediction_values, key=lambda x: x[1], reverse=True)
        
        # Plot the image
        #plot_color_image(raw_image)
        #plt.show()
        print("Using Inception_{} CNN\nPrediction: Probability\n".format(version))
        # Display the image and predictions 
        for i in range(10):
            predicted_class = class_names[prediction_values[i][0]]
            probability = prediction_values[i][1]
            print("{}: {:.2f}%".format(predicted_class, probability*100))
    
    # If the predictions do not come out right
    except:
        print(predictions)
def val(val_dir, checkpoint_dir='./checkpoint/'):
    # predict the result
    val_images = os.listdir(val_dir)

    # Define the model:
    with slim.arg_scope(v3.inception_v3_arg_scope()):
        out, end_points = v3.inception_v3(inputs=input_images,
                                          num_classes=CLASS_NUMBER,
                                          dropout_keep_prob=1.0,
                                          is_training=False)

    score = tf.nn.softmax(out, name='pre')
    class_id = tf.argmax(score, 1)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore the model from checkpoint %s' %
                  ckpt.model_checkpoint_path)
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            start_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            raise Exception('no checkpoint find')

        result = []
        loop = 0
        with open('val_pre.json', 'w') as f:
            for val_image in val_images:
                temp_dict = {}
                x = herb_input.img_resize(os.path.join(val_dir, val_image),
                                          IMAGE_GNET_SIZE)
                pre_label = sess.run(class_id,
                                     feed_dict={
                                         input_images: np.expand_dims(x,
                                                                      axis=0),
                                         keep_prob: 1
                                     })
                temp_dict['image_id'] = val_image
                temp_dict['label_id'] = pre_label.tolist()
                result.append(temp_dict)
                loop = loop + 1
                print('image %s is %d' % (val_image, pre_label[0]))

                if loop % 1000 == 1:
                    json.dump(result, f)
                    print('loop:%d, write result json, num is %d' %
                          (loop, len(result)))
                    del result[:]

            json.dump(result, f)
            print('loop end %d, write result json, num is %d' %
                  (loop, len(result)))
Exemplo n.º 9
0
    def build_model(self):
        self.input = tf.placeholder(tf.uint8, [None, None, 3])
        self.processed_image = inception_preprocessing.preprocess_image(
            self.input, self.height, self.width, is_training=False)
        self.processed_images = tf.expand_dims(self.processed_image, 0)

        with slim.arg_scope(inception_v3_arg_scope()):
            self.logits, self.end_points = inception_v3(self.processed_images,
                                                        num_classes=1001,
                                                        is_training=False)
        self.probabilities = tf.nn.softmax(self.logits)
Exemplo n.º 10
0
    def _buildGraph(self):
        x_in = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
        y_in = tf.placeholder(tf.int64, shape=[None])
        onehot_labels = tf.one_hot(indices=tf.cast(y_in, tf.int32), depth=self.n_class)
        is_train = tf.placeholder_with_default(False, shape=[], name="is_train")
        global_step = tf.Variable(0, trainable=False)

        if self.architecture == 'I3':
            print('Using Inception v3 architecture.')
            """
            logits, nett, _ = models.inceptionv3(x_in,
                                                 num_classes=self.n_class, is_training=is_train,
                                                 dropout_keep_prob=self.dropout, scope='InceptionV3')
            """
            with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
                logits, _ = inception_v3.inception_v3(x_in,
                                                      num_classes=self.n_class, is_training=is_train,
                                                      dropout_keep_prob=self.dropout)




        elif self.architecture == 'IR2':
            print('Using Inception-Resnet v2 architecture.')
            logits, nett, _ = models.inceptionresnetv2(x_in,
                                                       num_classes=self.n_class, is_training=is_train,
                                                       dropout_keep_prob=self.dropout, scope='InceptionResV2')
        else:
            print('Using default architecture: Inception V3.')
            logits, nett, _ = models.inceptionv3(x_in,
                                                 num_classes=self.n_class, is_training=is_train,
                                                 dropout_keep_prob=self.dropout, scope='InceptionV3')

        pred = tf.nn.softmax(logits, name="prediction")
        cost = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)

        tf.summary.scalar("{}_cost".format(self.architecture), cost)
        tf.summary.tensor_summary("{}_pred".format(self.architecture), pred)

        # optimizer based on TensorFlow version
        if int(str(tf.__version__).split('.', 3)[0]) == 2:
            opt = tf.optimizers.Adam(learning_rate=self.learning_rate)
        else:
            opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)

        train_op = opt.minimize(loss=cost, global_step=global_step)
        merged_summary = tf.summary.merge_all()

        return (x_in, y_in, is_train,
                logits, pred, cost,
                global_step, train_op, merged_summary)
Exemplo n.º 11
0
 def _build(self):
     reuse = True if self.built else None
     with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
         logits, end_points = inception_v3.inception_v3(
             self.input,
             num_classes=self.num_classes,
             is_training=False,
             reuse=reuse)
         self.built = True
     self.end_points = end_points
     self.logits = logits
     if not self.ckpt_loaded:
         saver = tf.train.Saver()
         saver.restore(self.sess, ckpt_dir + 'inception_v3.ckpt')
         self.ckpt_loaded = True
Exemplo n.º 12
0
def getvector(imagedir):
  slim = tf.contrib.slim

  batch_size = 3
  image_size = v3.inception_v3.default_image_size

  url = "http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz"
  checkpoints_dir = os.getcwd()

  if not tf.gfile.Exists(checkpoints_dir + '/inception_v3.ckpt'):
    dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)

  with tf.Graph().as_default():
    # imagedir = '/home/jiexun/Desktop/Siraj/ImageChallenge/Necessary/train/cat.0.jpg'
    image_string = tf.read_file(imagedir)
    image = tf.image.decode_jpeg(image_string, channels=3)
    
    #Intentando reparar el resize
    '''
    print(image.shape)
    print(image)
    image=tf.cast(image, tf.float32)
    image=tf.image.resize_images(image, [299, 299])
    print(image.shape)
    print(image)
    image_jpg = tf.image.encode_jpeg(image)
    plt.imshow(image_jpg)
    '''
    #fin de parche

    processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
    processed_images = tf.expand_dims(processed_image, 0)

    # Create the model, use the default arg scope to configure the batch norm parameters.
    print('Inicializando el modelo InceptionV3...')
    with slim.arg_scope(v3.inception_v3_arg_scope()):
      vector, _ = v3.inception_v3(processed_images, num_classes=1001, is_training=False)

    init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir, 'inception_v3.ckpt'),
                                             slim.get_model_variables('InceptionV3'))
    with tf.Session() as sess:
      init_fn(sess)
      np_image, vector = sess.run([image, vector])

    a = np.asarray([x for xs in vector for xss in xs for xsss in xss for x in xsss])
    np.reshape(a, (1, 2048))

  return a
Exemplo n.º 13
0
def create_combined_network(n_components):
    """
    Combines the Inception v3 network with my own
    :param n_components:
    :return:
    """
    print('Creating inception_v3 network')
    input_images = tf.placeholder('float32', [None, 299, 299, 3])
    transformed_inputs = tf_transform_input_img(input_images)
    with slim.arg_scope(inception_v3_arg_scope()):
        logits, end_points = inception_v3(transformed_inputs,
                                          num_classes=6012,
                                          is_training=False)
    bottleneck = slim.flatten((end_points['PreLogits'] + 0.3) * (1.0 / 8.0))
    print('Creating lower network')
    network = create_network(n_components, bottleneck)

    return Network(input_images, network.output, network.end_points)
def test(test_dir, checkpoint_dir='./checkpoint/'):
    # predict the result
    test_images = os.listdir(test_dir)
    '''new'''
    # Define the model:
    with slim.arg_scope(v3.inception_v3_arg_scope()):
        out, end_points = v3.inception_v3(inputs=input_images,
                                          num_classes=CLASS_NUMBER,
                                          dropout_keep_prob=1.0,
                                          is_training=False)

    score = tf.nn.softmax(out, name='pre')
    class_id = tf.argmax(score, 1)
    '''new'''

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore the model from checkpoint %s' %
                  ckpt.model_checkpoint_path)
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise Exception('no checkpoint find')

        result = []
        for test_image in test_images:
            temp_dict = {}
            x = herb_input.img_resize(os.path.join(test_dir, test_image),
                                      IMAGE_GNET_SIZE)
            pre_label = sess.run(class_id,
                                 feed_dict={
                                     input_images: np.expand_dims(x, axis=0),
                                     keep_prob: 1
                                 })
            temp_dict['image_id'] = test_image
            temp_dict['label_id'] = pre_label.tolist()
            result.append(temp_dict)
            print('image %s is %d' % (test_image, pre_label[0]))

        with open('submit.json', 'w') as f:
            json.dump(result, f)
            print('write result json, num is %d' % len(result))
Exemplo n.º 15
0
    def __init__(
            self,
            chkpointpath='../models/inceptionv3/inception_v3.ckpt',
            lblmetadatapath='../models/inceptionv3/imagenet_class_index.json'):

        tf.reset_default_graph()
        self.graph = tf.Graph()
        self.chkpntpath = chkpointpath
        self.labelmetadatapath = lblmetadatapath
        self.num_classes = 1001  # 0 is background or null class
        self.label_dict = {}
        if not tf.io.gfile.exists(self.chkpntpath):
            raise ValueError("There is no checkpoint at the input path")
        with self.graph.as_default():
            self.input_batch = tf.placeholder(tf.float32,
                                              shape=(None, 299, 299, 3))
            with slim.arg_scope(inception_v3_arg_scope()):
                _, self.end_points = inception_v3(self.input_batch,
                                                  is_training=False,
                                                  num_classes=self.num_classes)
                self.session = tf.Session(graph=self.graph)
                self.saver = tf.train.Saver()
                self.saver.restore(self.session, self.chkpntpath)

            self.logits = self.graph.get_tensor_by_name(
                'InceptionV3/Logits/SpatialSqueeze:0')
            self.trainable_variables = tf.trainable_variables()

        if not tf.io.gfile.exists(self.labelmetadatapath):
            raise ValueError("There is no label file at the input path.")

        # process labels in appropriate dictionary
        with open(self.labelmetadatapath) as json_file:
            data = json.load(json_file)
            shift = 0
            if self.num_classes == 1001:
                self.label_dict = {0: ["background", "background"]}
                shift = 1
            for key in data:
                self.label_dict[int(key) + shift] = data[key]
Exemplo n.º 16
0
def main(_):
    if not tf.gfile.Exists(FLAGS.log_dir):
        tf.gfile.MakeDirs(FLAGS.log_dir)

    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.DEBUG)

        # Select the dataset
        dataset = hico.get_split('train', FLAGS.dataset_dir)
        data_provider = slim.dataset_data_provider.DatasetDataProvider(
                        dataset, 
                        num_readers=4,
                        common_queue_capacity=20 * FLAGS.batch_size, 
                        common_queue_min=10 * FLAGS.batch_size)

        image, label = data_provider.get(['image', 'label'])

        label = tf.decode_raw(label, tf.float32)
        
        label = tf.reshape(label, [FLAGS.num_classes])

        # Preprocess images
        image = inception_preprocessing.preprocess_image(image, image_size, image_size,
                is_training=True)

        # Training bathes and queue
        images, labels = tf.train.batch(
                [image, label],
                batch_size = FLAGS.batch_size,
                num_threads = 1,
                capacity = 5 * FLAGS.batch_size)
        
        # Create the model
        with slim.arg_scope(inception_v3_arg_scope()):
            logits, _ = inception_v3(images, num_classes = FLAGS.num_classes, is_training=True)
        
        predictions = tf.nn.sigmoid(logits, name='prediction')
        
        cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits = logits, labels = labels)
        loss = tf.reduce_mean(cross_entropy)

        # Add summaries
        tf.summary.scalar('loss', loss)

        # Fine-tune only the new layers
        trainable_scopes = ['InceptionV3/Logits', 'InceptionV3/AuxLogits']
        scopes = [scope.strip() for scope in trainable_scopes]
        variables_to_train = []
        for scope in scopes:
            variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
            variables_to_train.extend(variables)
        

        optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)

        train_op = slim.learning.create_train_op(loss, optimizer, variables_to_train=variables_to_train)

        num_batches = math.ceil(data_provider.num_samples()/float(FLAGS.batch_size)) 
        num_steps = FLAGS.epochs * int(num_batches)
        
        slim.learning.train(
            train_op,
            logdir=FLAGS.log_dir,
            init_fn=get_init_fn(FLAGS.checkpoint),
            number_of_steps=num_steps,
            save_summaries_secs=300,
            save_interval_secs=300
        )
def train(train_dir, annotations, max_step, checkpoint_dir='./checkpoint/'):
    max_acc = 0.0
    # train the model
    plant_data = herb_input.plant_data_fn(train_dir, annotations)
    val_data = herb_input.plant_data_fn(VAL_DIR, VAL_ANNO)
    val_size = val_data.data_counts()

    # Define the model:
    with slim.arg_scope(v3.inception_v3_arg_scope()):
        out, end_points = v3.inception_v3(inputs=input_images,
                                          num_classes=CLASS_NUMBER,
                                          dropout_keep_prob=keep_prob,
                                          is_training=is_training)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels,
                                    logits=out)  #添加交叉熵损失
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(add_regularization_losses=True)  #添加正则化损失
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(one_hot_labels, 1)),
                tf.float32))

    # define the tensorboard summary info
    try:
        image_summary = tf.image_summary
        scalar_summary = tf.scalar_summary
        histogram_summary = tf.histogram_summary
        merge_summary = tf.merge_summary
        SummaryWriter = tf.train.SummaryWriter
    except:
        image_summary = tf.summary.image
        scalar_summary = tf.summary.scalar
        histogram_summary = tf.summary.histogram
        merge_summary = tf.summary.merge
        SummaryWriter = tf.summary.FileWriter

    # Specify the optimization scheme:
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNINGRATE)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        train_op = slim.learning.create_train_op(total_loss=loss,
                                                 optimizer=optimizer)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore the model from checkpoint %s' %
                  ckpt.model_checkpoint_path)
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            start_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            sess.run(tf.global_variables_initializer())
            start_step = 0
            print('start training from new state')
        logger = herb_input.train_log(LOGNAME)

        for step in range(start_step, start_step + max_step):
            start_time = time.time()
            x, y = plant_data.next_batch(BATCH_SIZE, IMAGE_GNET_SIZE)
            _, train_loss = sess.run(
                [train_op, loss],
                feed_dict={
                    input_images: x,
                    input_labels: y,
                    keep_prob: 0.5,
                    is_training: True
                })
            if step % 50 == 0:
                #outputs = sess.run(out, feed_dict={input_images: x, input_labels: y,
                #                                                keep_prob: 1.0, is_training: False})
                train_accuracy = sess.run(accuracy,
                                          feed_dict={
                                              input_images: x,
                                              input_labels: y,
                                              keep_prob: 1.0,
                                              is_training: False
                                          })
                #train_loss = sess.run(cross_entropy, feed_dict={features: x, labels: y, keep_prob: 1})

                # summary
                loss_summary = scalar_summary('loss', train_loss)
                acc_summary = scalar_summary('accuracy', train_accuracy)
                merged = merge_summary([loss_summary, acc_summary])
                summary = sess.run(merged,
                                   feed_dict={
                                       features: x,
                                       labels: y,
                                       keep_prob: 1
                                   })
                writer.add_summary(summary, step)

                duration = time.time() - start_time
                logger.info(
                    "step %d: training accuracy %g, loss is %g (%0.3f sec)" %
                    (step, train_accuracy, train_loss, duration))
                #print(outputs)
            if step % 1000 == 1:
                saver.save(sess, CHECKFILE, global_step=step)
                print('writing checkpoint at step %s' % step)

            if VAL_OPEN and step % 5000 == 1:
                # 验证准确率
                print('step into validation, data_size:%d' % val_size)
                mean_loss, mean_acc = vali_evaluation(sess, val_data, loss,
                                                      accuracy, val_size)
                print("%s: Step [%d]  val Loss : %f, val accuracy :  %g" %
                      (datetime.now(), step, mean_loss, mean_acc))

                if mean_acc > max_acc and mean_acc > 0.7:
                    # 保存val准确率最高的模型
                    max_acc = mean_acc
                    best_models = os.path.join(
                        MODEL_DIR, 'best_models_{:.4f}.ckpt'.format(max_acc))
                    print('{}------save:{}'.format(datetime.now(),
                                                   best_models))
                    saver.save(sess, best_models)
                    # 拷贝最佳模型到指定目录
                    if BESTMODEL_DIR:
                        if not os.path.exists(BESTMODEL_DIR):
                            os.makedirs(BESTMODEL_DIR)
                        shutil.copy(best_models + '.meta', BESTMODEL_DIR)
                        shutil.copy(best_models + '.index', BESTMODEL_DIR)
                        shutil.copy(best_models + '.data-00000-of-00001',
                                    BESTMODEL_DIR)
def calWholeAcc(train_dir, annotations, checkpoint_dir='./checkpoint/'):
    # calculate train accuracy
    plant_data = herb_input.plant_data_fn(train_dir, annotations)
    x, y = plant_data.whole_batch(IMAGE_GNET_SIZE)

    # Define the model:
    with slim.arg_scope(v3.inception_v3_arg_scope()):
        out, end_points = v3.inception_v3(inputs=input_images,
                                          num_classes=CLASS_NUMBER,
                                          dropout_keep_prob=keep_prob,
                                          is_training=is_training)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels,
                                    logits=out)  #添加交叉熵损失
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(add_regularization_losses=True)  #添加正则化损失
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(one_hot_labels, 1)),
                tf.float32))

    # Specify the optimization scheme:
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNINGRATE)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        train_op = slim.learning.create_train_op(total_loss=loss,
                                                 optimizer=optimizer)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore the model from checkpoint %s' %
                  ckpt.model_checkpoint_path)
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            start_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            sess.run(tf.global_variables_initializer())
            start_step = 0
            print('start training from new state')
        logger = herb_input.train_log(LOGNAME)

        start_time = time.time()
        train_accuracy = sess.run(accuracy,
                                  feed_dict={
                                      input_images: x,
                                      input_labels: y,
                                      keep_prob: 1.0,
                                      is_training: False
                                  })
        duration = time.time() - start_time
        logger.info("whole training accuracy %g(%0.3f sec)" %
                    (train_accuracy, duration))
Exemplo n.º 19
0
def main(_):
    
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        # Select the dataset
        dataset = hico.get_split('test', FLAGS.dataset_dir)
        data_provider = slim.dataset_data_provider.DatasetDataProvider(
                        dataset, 
                        num_readers=1,
                        common_queue_capacity=20 * FLAGS.batch_size, 
                        common_queue_min=10 * FLAGS.batch_size,
                        shuffle=False)

        image, label = data_provider.get(['image', 'label'])
        
        label = tf.decode_raw(label, tf.float32)
        
        label = tf.reshape(label, [FLAGS.num_classes])

        
        # Preprocess images
        image = inception_preprocessing.preprocess_image(image, image_size, image_size,
                is_training=False)

        # Training bathes and queue
        images, labels = tf.train.batch(
                [image, label],
                batch_size = FLAGS.batch_size,
                num_threads = 1,
                capacity = 5 * FLAGS.batch_size,
                allow_smaller_final_batch=True)
        
       
        # Create the model
        with slim.arg_scope(inception_v3_arg_scope()):
            logits, _ = inception_v3(images, num_classes = FLAGS.num_classes, is_training=False)
        
        predictions = tf.nn.sigmoid(logits, name='prediction')
        
        cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits = logits, labels = labels)
        loss = tf.reduce_mean(cross_entropy)

        correct_prediction = tf.equal(tf.round(predictions), labels)
        
        # Mean accuracy over all labels:
        # http://stackoverflow.com/questions/37746670/tensorflow-multi-label-accuracy-calculation
        accuracy = tf.cast(correct_prediction, tf.float32)
        mean_accuracy = tf.reduce_mean(accuracy)


        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path,
            slim.get_variables_to_restore())

        num_batches = math.ceil(data_provider.num_samples()/float(FLAGS.batch_size))

        prediction_list = []
        label_list = []
        count = 0
        
        with tf.Session() as sess:
            with slim.queues.QueueRunners(sess):
                sess.run(tf.local_variables_initializer())
                init_fn(sess)
                
                for step in range(int(num_batches)):
                    np_loss, np_accuracy, np_logits, np_prediction, np_labels = sess.run(
                            [loss, mean_accuracy, logits, predictions, labels]) 
                    
                    prediction_list.append(np_prediction)
                    label_list.append(np_labels)
                    
                    count += np_labels.shape[0]
                    
                    print('Step {}, count {}, loss: {}'.format(step,count,  np_loss))
                    
        
        prediction_arr = np.concatenate(prediction_list, axis=0)
        label_arr = np.concatenate(label_list, axis=0)
        
        mAP = calculate_mAP(prediction_arr, label_arr)
        print('mAP score: {}'.format(mAP))
Exemplo n.º 20
0
def main():
    # 加载预处理好的数据。
    processed_data = np.load(INPUT_DATA)
    training_images = processed_data[0]
    n_training_example = len(training_images)
    training_labels = processed_data[1]
    validation_images = processed_data[2]
    validation_labels = processed_data[3]
    testing_images = processed_data[4]
    testing_labels = processed_data[5]

    # 定义inception-v3的输入,images为输入图片,labels为每一张图片
    # 对应的标签。
    images = tf.placeholder(
        tf.float32, [None, 299, 299, 3], 
        name='input_images')
    labels = tf.placeholder(tf.int64, [None], name='labels')
    
    # 定义inception-v3模型。因为谷歌给出的只有模型参数取值,所以这里
    # 需要在这个代码中定义inception-v3的模型结构。因为模型
    # 中使用到了dropout,所以需要定一个训练时使用的模型,一个测试时
    # 使用的模型。
    with slim.arg_scope(inception_v3.inception_v3_arg_scope()):
        train_logits, _ = inception_v3.inception_v3(
            images, num_classes=N_CLASSES, is_training=True)
        # 定义测试使用的模型时需要将reuse设置为True。
        test_logits, _ = inception_v3.inception_v3(
            images, num_classes=N_CLASSES, is_training=False, reuse=True)
    
    
    trainable_variables = get_trainable_variables()
    print trainable_variables
    
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=train_logits, 
        labels=tf.one_hot(labels, N_CLASSES))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(
        cross_entropy_mean,
        var_list=trainable_variables)
    
    # 计算正确率。
    with tf.name_scope('evaluation'):
        correct_prediction = tf.equal(tf.argmax(test_logits, 1), labels)
        evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    # 
    loader = tf.train.Saver(get_tuned_variables())
    saver = tf.train.Saver()
    with tf.variable_scope("InceptionV3", reuse = True):
        check1 = tf.get_variable("Conv2d_1a_3x3/weights")
        check2 = tf.get_variable("Logits/Conv2d_1c_1x1/weights")
        
    with tf.Session() as sess:
        # 初始化没有加载进来的变量。
        init = tf.global_variables_initializer()
        sess.run(init)
        print sess.run(check1)
        print sess.run(check2)
        
        # 加载谷歌已经训练好的模型。
        print('Loading tuned variables from %s' % CKPT_FILE)
        loader.restore(sess, CKPT_FILE)
            
        start = 0
        end = BATCH
        for i in range(STEPS):
            print sess.run(check1)
            print sess.run(check2)
            
            _, loss = sess.run([train_step, cross_entropy_mean], feed_dict={
                images: training_images[start:end], 
                labels: training_labels[start:end]})

            if i % 100 == 0 or i + 1 == STEPS:
                saver.save(sess, TRAIN_FILE, global_step=i)
                validation_accuracy = sess.run(evaluation_step, feed_dict={
                    images: validation_images, labels: validation_labels})
                print('Step %d: Training loss is %.1f%% Validation accuracy = %.1f%%' % (
                    i, loss * 100.0, validation_accuracy * 100.0))
            
            start = end
            if start == n_training_example:
                start = 0
            
            end = start + BATCH
            if end > n_training_example: 
                end = n_training_example
            
        # 在最后的测试数据上测试正确率。
        test_accuracy = sess.run(evaluation_step, feed_dict={
            images: test_images, labels: test_labels})
        print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
def main(_):
    # Placeholder to feed the image
    images = tf.placeholder(tf.float32, [None, None, None, 3])

    # get the logist and bottlenecks for each images
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(images, num_classes=1001, is_training=False)
        bottlenecks = end_points['PreLogits']
        bottlenecks = tf.squeeze(bottlenecks)

    # List of the classes
    list_classes = os.listdir(os.path.join(FLAGS.rgb_dir, 'train'))
    list_classes = [x for x in list_classes if 'DS' not in x ]

    # Create the directories of the features for each classes
    for c in list_classes:
        if not os.path.exists(os.path.join(FLAGS.inception_bottlenecks_dir, c)):
            os.makedirs(os.path.join(FLAGS.inception_bottlenecks_dir, c))

    print("All the classes: "+str(list_classes))

    # Saver to restore the Inception v3 pretrained model on Imagenet
    saver = tf.train.Saver(max_to_keep=None)

    # # Start the session
    with tf.Session() as sess:
        # Restore the pretrained model
        print("Restoring inception v3 model from: " + FLAGS.ckpt_path)
        saver.restore(sess, FLAGS.ckpt_path)
        print("Restored!")

        # Loop over all the directories (video)
        for label in list_classes:
            list_img = os.listdir(os.path.join(FLAGS.rgb_dir, 'train', label))
            list_img = [x for x in list_img if 'DS' not in x]
            print("Class: " + label + " has " + str(len(list_img)) + " images.")
            for img in list_img:
                print(img)
                # Catch file from directory one by one and convert them to np array
                img_filename = os.path.join(FLAGS.rgb_dir, 'train', label, img)
                image = Image.open(img_filename)
                image = image.resize((299, 299))
                image_data = np.array(image)
                # Inception v3 preprocessing
                image_data = image_data / 255.0
                image_data = image_data - 0.5
                image_data = image_data * 2
                image_data = image_data.reshape((1, 299, 299, 3))

                # Catch the features
                bottlenecks_v = sess.run(bottlenecks, {images: image_data})

                # Save bottlenecks
                txtfile_bottlenecks = os.path.join(FLAGS.inception_bottlenecks_dir, 'train', label, img + '.txt')
                np.savetxt(txtfile_bottlenecks, bottlenecks_v)

        # Finally do the same for test images
        list_test_img = os.listdir(os.path.join(FLAGS.rgb_dir, 'test_stg1'))
        list_test_img = [x for x in list_test_img if 'DS' not in x]
        for img in list_test_img:
            print(img)
            # Catch file from directory one by one and convert them to np array
            img_filename = os.path.join(FLAGS.rgb_dir, 'test_stg1', img)
            image = Image.open(img_filename)
            image = image.resize((299, 299))
            image_data = np.array(image)
            image_data = image_data / 255.0
            image_data = image_data - 0.5
            image_data = image_data * 2
            image_data = image_data.reshape((1, 299, 299, 3))

            # Catch the logits
            bottlenecks_v = sess.run(bottlenecks, {images: image_data})

            # Save bottlenecks
            txtfile_bottlenecks = os.path.join(FLAGS.inception_bottlenecks_dir, 'test_stg1', img + '.txt')
            np.savetxt(txtfile_bottlenecks, bottlenecks_v)


    ################################################
    ## Merge features/labels to one big .txt file ##
    ################################################
    # Training set
    print("Creating training set")
    list_features = []
    list_labels = []
    for c in list_classes:
        print(c)
        list_img_features = os.listdir(os.path.join(FLAGS.inception_bottlenecks_dir,'train', c))
        for img_features in list_img_features:
            features = np.loadtxt(os.path.join(FLAGS.inception_bottlenecks_dir,'train', c, img_features))
            list_features.append(features)
            list_labels.append(list_classes.index(c))
    array_features = np.asarray(list_features)
    array_labels = np.asarray(list_labels)
    # Save to txt files
    np.savetxt('./fish_features.txt', array_features)
    np.savetxt('./fish_labels.txt', array_labels)

    # Test set
    print("Creating test set")
    list_features = []
    list_img_name = []
    list_img_features = os.listdir(os.path.join(FLAGS.inception_bottlenecks_dir,'test_stg1'))
    for img_features in list_img_features:
        features = np.loadtxt(os.path.join(FLAGS.inception_bottlenecks_dir,'test_stg1', img_features))
        list_features.append(features)
        list_labels.append(list_classes.index(c))
        list_img_name.append(img_features.split('.')[0] + '.jpg')
    array_features = np.asarray(list_features)
    np.savetxt('./fish_features_test.txt', array_features)

    # Write name of each pictures for submission
    with open('./pic_names_test.txt', 'w') as thefile:
        for item in list_img_name:
            thefile.write("%s\n" % item)
# 载入标签
labels = np.loadtxt(LABEL_FILE, str, delimiter='\t')
labels_en = np.loadtxt(LABEL_FILE_EN, str, delimiter='\t')

# 定义input_images为图片数据
input_images = tf.placeholder(
    dtype=tf.float32,
    shape=[None, IMAGE_GNET_SIZE, IMAGE_GNET_SIZE, 3],
    name='input')
# 定义dropout的概率
keep_prob = tf.placeholder(tf.float32, name='keep_prob')

# Define the model:
print("Define the model--------------------")
with slim.arg_scope(v3.inception_v3_arg_scope()):
    out, end_points = v3.inception_v3(inputs=input_images,
                                      num_classes=CLASS_NUMBER,
                                      dropout_keep_prob=1.0,
                                      is_training=False)

sess = 0
scores = tf.nn.softmax(out, name='pre')
values, indices = tf.nn.top_k(scores, 3)


# 载入模型创建Sesssion
def create_sess():
    global sess
    sess = tf.Session()
    saver = tf.train.Saver()
Exemplo n.º 23
0
def get_transformed_data(img_paths,
                         size,
                         checkpoint='./data/model.ckpt',
                         num_classes=6012,
                         batch_size=32,
                         name_regex=None):
    """
    Loads the supplied image_paths, runs them through the trained Inception_v3 network and returns
         a dictionary per image with it's bottleneck and a resized image.

    :param img_paths: iterable containing the paths to the images to be loaded
    :param size: image width and height in pixels
    :param checkpoint: location of the Inception_v3 weights
    :param num_classes:
    :param batch_size:
    :param name_regex: regex to obtain image name from image path
    :return:
    """
    if not os.path.exists(checkpoint):
        tf.logging.fatal(
            'Checkpoint %s does not exist. See README.md for more information',
            checkpoint)
    name_regex = name_regex if name_regex is not None else re.compile(
        '.*\/.*\/(.*\.jpg)')
    g = tf.Graph()
    with g.as_default():
        input_images = tf.placeholder('float32', [None, 299, 299, 3])
        transformed_inputs = tf_transform_input_img(input_images)

        with slim.arg_scope(inception_v3_arg_scope()):
            logits, end_points = inception_v3(transformed_inputs,
                                              num_classes=num_classes,
                                              is_training=False)

        bottleneck = end_points['PreLogits']
        saver = tf_saver.Saver()
        data = {
            'image_names': [],
            'images': np.empty((len(img_paths), size, size, 3),
                               dtype=np.uint8),
            'bottlenecks': np.empty((len(img_paths), 2048), dtype=np.float32)
        }
        i = 0
        with tf.Session() as sess:
            saver.restore(sess, checkpoint)
            for c in chunks(img_paths, batch_size):
                batch = []
                inner_i = 0
                for file_name in c:
                    name = name_regex.match(file_name).group(1)
                    try:
                        image = Image.open(file_name)
                    except Exception:
                        print('Could not open image {}'.format(file_name))
                        continue
                    img, resized_img = preprocess_image(image, size)
                    batch.append(img)
                    data['images'][i + inner_i] = resized_img
                    data['image_names'].append(name)
                    inner_i += 1
                feed_dict = {input_images: np.asarray(batch)}
                # Run the evaluation on the image
                bottleneck_eval = sess.run(bottleneck, feed_dict=feed_dict)
                inner_i = 0
                for bn in bottleneck_eval:
                    # Resize the bottlenecks to the 0 - 1 range
                    data['bottlenecks'][i + inner_i] = (np.squeeze(bn) +
                                                        0.3) * (1.0 / 8.0)
                    inner_i += 1
                i += inner_i
            if i < len(img_paths):
                for key in ['images', 'bottlenecks']:
                    prev_shape = list(data[key].shape)
                    prev_shape[0] = i
                    data[key].resize(prev_shape)
            return data
Exemplo n.º 24
0
def run():
    #Create the log directory here. Must be done here otherwise import will activate this unneededly.
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    #======================= TRAINING PROCESS =========================
    #Now we start to construct the graph and build our model
    with tf.Graph().as_default() as graph:
        tf.logging.set_verbosity(tf.logging.INFO) #Set the verbosity to INFO level

        #First create the dataset and load one batch
        dataset = get_split('train', dataset_dir, file_pattern=file_pattern)
        images, _, labels = load_batch(dataset, batch_size=batch_size)

        #Know the number steps to take before decaying the learning rate and batches per epoch
        num_batches_per_epoch = dataset.num_samples // batch_size
        num_steps_per_epoch = num_batches_per_epoch #Because one step is one batch processed
        decay_steps = int(num_epochs_before_decay * num_steps_per_epoch)

        #Create the model inference
        with slim.arg_scope(inception_v3_arg_scope()):
            logits, end_points = inception_v3(images, num_classes = dataset.num_classes, is_training = True)

        #Perform one-hot-encoding of the labels (Try one-hot-encoding within the load_batch function!)
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)

        #Performs the equivalent to tf.nn.sparse_softmax_cross_entropy_with_logits but enhanced with checks
        loss = tf.losses.softmax_cross_entropy(onehot_labels = one_hot_labels, logits = logits)
        total_loss = tf.losses.get_total_loss()    #obtain the regularization losses as well

        #Create the global step for monitoring the learning_rate and training.
        global_step = get_or_create_global_step()

        #Define your exponentially decaying learning rate
        lr = tf.train.exponential_decay(
            learning_rate = initial_learning_rate,
            global_step = global_step,
            decay_steps = decay_steps,
            decay_rate = learning_rate_decay_factor,
            staircase = True)

        #Now we can define the optimizer that takes on the learning rate
        optimizer = tf.train.AdamOptimizer(learning_rate = lr)
        #optimizer = tf.train.RMSPropOptimizer(learning_rate = lr, momentum=0.9)

        #Create the train_op.
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        #State the metrics that you want to predict. We get a predictions that is not one_hot_encoded.
        predictions = tf.argmax(end_points['Predictions'], 1)
        probabilities = end_points['Predictions']
        accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels)
        metrics_op = tf.group(accuracy_update, probabilities)


        #Now finally create all the summaries you need to monitor and group them into one summary op.
        tf.summary.scalar('losses/Total_Loss', total_loss)
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('learning_rate', lr)
        my_summary_op = tf.summary.merge_all()

        #Now we need to create a training step function that runs both the train_op, metrics_op and updates the global_step concurrently.
        def train_step(sess, train_op, global_step):
            '''
            Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
            '''
            #Check the time for each sess run
            start_time = time.time()
            total_loss, global_step_count, _ = sess.run([train_op, global_step, metrics_op])
            time_elapsed = time.time() - start_time

            #Run the logging to print some results
            logging.info('global step %s: loss: %.4f (%.2f sec/step)', global_step_count, total_loss, time_elapsed)

            return total_loss, global_step_count


        #Define your supervisor for running a managed session. Do not run the summary_op automatically or else it will consume too much memory
        sv = tf.train.Supervisor(logdir = log_dir, summary_op = None)

        #Run the managed session
        with sv.managed_session() as sess:
            for step in range(num_steps_per_epoch * num_epochs):
                #At the start of every epoch, show the vital information:
                if step % num_batches_per_epoch == 0:
                    logging.info('Epoch %s/%s', step/num_batches_per_epoch + 1, num_epochs)
                    learning_rate_value, accuracy_value = sess.run([lr, accuracy])
                    logging.info('Current Learning Rate: %s', learning_rate_value)
                    logging.info('Current Streaming Accuracy: %s', accuracy_value)

                    # optionally, print your logits and predictions for a sanity check that things are going fine.
                    logits_value, probabilities_value, predictions_value, labels_value = sess.run([logits, probabilities, predictions, labels])
                    print('logits: \n', logits_value[:5])
                    print('Probabilities: \n', probabilities_value[:5])
                    print('predictions: \n', predictions_value[:100])
                    print('Labels:\n:', labels_value[:100])

                #Log the summaries every 10 step.
                if step % 10 == 0:
                    loss, _ = train_step(sess, train_op, sv.global_step)
                    summaries = sess.run(my_summary_op)
                    sv.summary_computed(sess, summaries)
                    
                #If not, simply run the training step
                else:
                    loss, _ = train_step(sess, train_op, sv.global_step)

            #We log the final training loss and accuracy
            logging.info('Final Loss: %s', loss)
            logging.info('Final Accuracy: %s', sess.run(accuracy))

            #Once all the training has been done, save the log files and checkpoint model
            logging.info('Finished training! Saving model to disk now.')
def run():
    #Create log_dir for evaluation information
    if not os.path.exists(log_eval):
        os.mkdir(log_eval)

    #Just construct the graph from scratch again
    with tf.Graph().as_default() as graph:
        tf.logging.set_verbosity(tf.logging.INFO)
        #Get the dataset first and load one batch of validation images and labels tensors. Set is_training as False so as to use the evaluation preprocessing
        dataset = get_split('validation', dataset_dir)
        images, raw_images, labels = load_batch(dataset,
                                                batch_size=batch_size,
                                                is_training=False)

        #Create some information about the training steps
        num_batches_per_epoch = dataset.num_samples / batch_size
        num_steps_per_epoch = num_batches_per_epoch

        #Now create the inference model but set is_training=False
        with slim.arg_scope(inception_v3_arg_scope()):
            logits, end_points = inception_v3(images,
                                              num_classes=dataset.num_classes,
                                              is_training=False)

        # #get all the variables to restore from the checkpoint file and create the saver function to restore
        variables_to_restore = slim.get_variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        def restore_fn(sess):
            return saver.restore(sess, checkpoint_file)

        #Just define the metrics to track without the loss or whatsoever
        probabilities = end_points['Predictions']
        predictions = tf.argmax(probabilities, 1)

        accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(
            predictions, labels)
        metrics_op = tf.group(accuracy_update)

        #Create the global step and an increment op for monitoring
        global_step = get_or_create_global_step()
        global_step_op = tf.assign(
            global_step, global_step + 1
        )  #no apply_gradient method so manually increasing the global_step

        #Create a evaluation step function
        def eval_step(sess, metrics_op, global_step):
            '''
            Simply takes in a session, runs the metrics op and some logging information.
            '''
            start_time = time.time()
            _, global_step_count, accuracy_value = sess.run(
                [metrics_op, global_step_op, accuracy])
            time_elapsed = time.time() - start_time

            #Log some information
            logging.info(
                'Global Step %s: Streaming Accuracy: %.4f (%.2f sec/step)',
                global_step_count, accuracy_value, time_elapsed)

            return accuracy_value

        #Define some scalar quantities to monitor
        tf.summary.scalar('Validation_Accuracy', accuracy)
        my_summary_op = tf.summary.merge_all()

        #Get your supervisor
        sv = tf.train.Supervisor(logdir=log_eval,
                                 summary_op=None,
                                 init_fn=restore_fn)

        #Now we are ready to run in one session
        with sv.managed_session() as sess:
            for step in xrange(int(num_batches_per_epoch * num_epochs)):
                #print vital information every start of the epoch as always
                if step % num_batches_per_epoch == 0:
                    logging.info('Epoch: %s/%s',
                                 step / num_batches_per_epoch + 1, num_epochs)
                    logging.info('Current Streaming Accuracy: %.4f',
                                 sess.run(accuracy))

                #Compute summaries every 10 steps and continue evaluating
                if step % 10 == 0:
                    eval_step(sess,
                              metrics_op=metrics_op,
                              global_step=sv.global_step)
                    summaries = sess.run(my_summary_op)
                    sv.summary_computed(sess, summaries)

                #Otherwise just run as per normal
                else:
                    eval_step(sess,
                              metrics_op=metrics_op,
                              global_step=sv.global_step)

            #At the end of all the evaluation, show the final accuracy
            logging.info('Final Streaming Accuracy: %.4f', sess.run(accuracy))

            #Now we want to visualize the last batch's images just to see what our model has predicted
            raw_images, labels, predictions, probabilities = sess.run(
                [raw_images, labels, predictions, probabilities])
            for i in range(10):
                image, label, prediction, probability = raw_images[i], labels[
                    i], predictions[i], probabilities[i]
                prediction_name, label_name = dataset.labels_to_name[
                    prediction], dataset.labels_to_name[label]
                text = 'Prediction: %s \n Ground Truth: %s \n Probability: %s' % (
                    prediction_name, label_name, probability[prediction])
                img_plot = plt.imshow(image)

                #Set up the plot and hide axes
                plt.title(text)
                img_plot.axes.get_yaxis().set_ticks([])
                img_plot.axes.get_xaxis().set_ticks([])
                plt.show()

            logging.info(
                'Model evaluation has completed! Visit TensorBoard for more information regarding your evaluation.'
            )
            sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
Exemplo n.º 26
0
# -*- coding:utf-8 -*-

import tensorflow as tf
import inception_v3 as iv3

FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("ckpt_file", 'ckpt/inception_v3.ckpt',
                       "Inception-v3 checkpoint file.")
tf.flags.DEFINE_string('log_dir', 'logs/', "TensorBoard log directory.")
tf.flags.DEFINE_string('output_dir', './ckpt/', "Output directory.")
tf.flags.DEFINE_string('output_file', 'inception_v3.pb', "Output file name.")

# Inception-v3を読み込み
input_img = tf.placeholder(tf.float32, [None, 299, 299, 3], name='input_image')
arg_scope = iv3.inception_v3_arg_scope()
with tf.contrib.slim.arg_scope(arg_scope):
    logits, end_points = iv3.inception_v3(inputs=input_img,
                                          is_training=False,
                                          num_classes=1001)

# 計算グラフ取得
graph = tf.get_default_graph()

# TensorBordで確認できるように
writer = tf.summary.FileWriter(FLAGS.log_dir, graph)
writer.close()

# pb形式で書き出し
tf.train.write_graph(graph, FLAGS.output_dir, FLAGS.output_file)
saver = tf.train.Saver()
 def __init__(self, **kwargs):
     super().__init__('inceptionv3ens4adv.ckpt', 'InceptionV3Ens4Adv', \
             inception_v3.inception_v3_arg_scope(), \
             inception_v3.inception_v3, 1, **kwargs)