Пример #1
0
def flower_train():
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split('train', flower_data_dir)
        images, _, labels = load_batch(dataset)

        logits = my_cnn(images,
                        num_classes=dataset.num_classes,
                        is_training=True)

        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
        slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        total_loss = slim.losses.get_total_loss()

        tf.scalar_summary('losses/Total Loss', total_loss)

        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        final_loss = slim.learning.train(train_op,
                                         logdir=train_dir,
                                         number_of_steps=100,
                                         save_summaries_secs=1)

    print('Finished training... Final batch loss %d' % final_loss)
Пример #2
0
def main(_):

    # 1. change the dataset
    # dataset = imagenet.get_split('train'), FLAGS.data_dir)
    dataset = flowers.get_split(FLAGS.data_split, FLAGS.data_dir)

    model = InceptionModel(checkpoints_file=FLAGS.checkpoint_file_path)

    # 2. set the model to training mode
    # op, graph model.build(dataset, image_height=224, image_width=224, num_classes=1000, is_training=True)
    op, graph = model.build(dataset,
                            image_height=224,
                            image_width=224,
                            num_classes=1000,
                            is_training=False)

    # 3. comment out the actual training code
    # slim.learning.train(
    #     op,
    #     logdir=train_dir,
    #     init_fn=model.init_fn,
    #     number_of_steps=100)

    # 4. dump model to the specified path
    from bigdl.util.tf_utils import dump_model
    dump_model(path=FLAGS.dump_model_path,
               ckpt_file=FLAGS.checkpoint_file_path,
               graph=graph)
Пример #3
0
def eval():
    # This might take a few minutes.
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.DEBUG)
        
        dataset = flowers.get_split('validation', flowers_data_dir)
        images, _, labels = load_batch(dataset)
        
        logits,_ = squeezenet.squeezenet(images, num_classes=dataset.num_classes, is_training=False)
        predictions = tf.argmax(logits, 1)
        
        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
        })
    
        num_batches = math.ceil(dataset.num_samples / 32)
        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            num_evals=num_batches,
            eval_op=names_to_updates.values(),
            final_op=names_to_values.values())
    
        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))
Пример #4
0
def train():

    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)
        
        dataset = flowers.get_split('train', flowers_data_dir)
        images, _, labels = load_batch(dataset)
      
        # Create the model:
        logits ,_= squeezenet.squeezenet(images, num_classes=dataset.num_classes, is_training=True)
     
        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
        slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        total_loss = slim.losses.get_total_loss()
    
        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', total_loss)
      
        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train_op = slim.learning.create_train_op(total_loss, optimizer)
    
        # Run the training:
        final_loss = slim.learning.train(
          train_op,
          logdir=train_dir,
          number_of_steps=100, # For speed, we just do 1 epoch
          save_interval_secs=600,
          save_summaries_secs=6000,
          log_every_n_steps =1,)
      
        print('Finished training. Final batch loss %d' % final_loss)
Пример #5
0
def flower_eval():
    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = flowers.get_split('train', flower_data_dir)
    images, _, labels = load_batch(dataset)

    logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)
    predictions = tf.argmax(logits, 1)

    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'eval/Accuray':
        slim.metrics.streaming_accuracy(predictions, labels),
        'eval/Recall@5':
        slim.metrics.streaming_recall_at_k(logits, labels, 5)
    })

    print('Running evaluation loop...')
    checkpoint_path = tf.train.latest_checkpoint(train_dir)
    metric_values = slim.evaluation.evaluate_once(
        master='',
        checkpoint_path=checkpoint_path,
        logdir=train_dir,
        eval_op=names_to_updates.values(),
        final_op=names_to_values.values())

    names_to_values = dict(zip(names_to_values.keys(), metric_values))
    for name in names_to_values:
        print('%s: %f' % (name, names_to_values[name]))
Пример #6
0
    def adapt_pretrain_imagenet_to_flower():
        import os
        from datasets import flowers
        from nets import inception
        from preprocessing import inception_preprocessing

        slim = tf.contrib.slim
        image_size = inception.inception_v1.default_image_size

        def get_init_fn():
            checkpoint_exclude_scopes = [
                'InceptionV1/Logits', 'InceptionV1/AuxLogits'
            ]
            exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
            variables_to_restore = []
            for var in slim.get_model_variables():
                excluded = False
                for exclusion in exclusions:
                    if var.op.name.startswith(exclusion):
                        excluded = True
                        break
                if not excluded:
                    variables_to_restore.append(var)

            return slim.assign_from_checkpoint_fn(
                os.path.join(checkpoint_dir, 'inception_v1.ckpt'),
                variables_to_restore)

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            dataset = flowers.get_split('train', flower_data_dir)
            images, _, labels = load_batch(dataset,
                                           height=image_size,
                                           width=image_size)

            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(
                    images, num_classes=dataset.num_classes, is_training=True)

                one_hot_labels = slim.one_hot_encoding(labels,
                                                       dataset.num_classes)
                slim.losses.softmax_cross_entropy(logits, one_hot_labels)
                total_loss = slim.losses.get_total_loss()

                tf.scalar_summary('losses/Total Loss', total_loss)

                optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
                train_op = slim.learning.create_train_op(total_loss, optimizer)

                final_loss = slim.learning.train(train_op,
                                                 logdir=train_dir,
                                                 init_fn=get_init_fn(),
                                                 number_of_steps=2)

        print("Finished training. Las batch loss %f" % final_loss)
Пример #7
0
    def eval_adapt_pretrained_to_flower():
        import numpy as np
        import tensorflow as tf
        from datasets import flowers
        from nets import inception

        slim = tf.contrib.slim

        image_size = inception.inception_v1.default_image_size
        batch_size = 8

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            dataset = flowers.get_split('train', flower_data_dir)
            images, images_raw, labels = load_batch(dataset,
                                                    height=image_size,
                                                    width=image_size)

            with slim.arg_scope(inception.inception_v1_arg_scope()):
                logits, _ = inception.inception_v1(
                    images, num_classes=dataset.num_classes, is_training=True)
                probabilities = tf.nn.softmax(logits)

                checkpoint_path = tf.train.latest_checkpoint(train_dir)
                init_fn = slim.assign_from_checkpoint_fn(
                    checkpoint_path, slim.get_variables_to_restore())
                with tf.Session() as sess:
                    with slim.queues.QueueRunners(sess):
                        sess.run(tf.initialize_local_variables())
                        init_fn(sess)
                        np_probabilities, np_images_raw, np_labels = sess.run(
                            [probabilities, images_raw, labels])

                        for i in xrange(batch_size):
                            image = np_images_raw[i, :, :, :]
                            true_label = np_labels[i]
                            predicted_label = np.argmax(np_probabilities[i, :])
                            predicted_name = dataset.labels_to_names[
                                predicted_label]
                            true_name = dataset.labels_to_names[true_label]

                            plt.figure()
                            plt.imshow(image.astype(np.uint8))
                            plt.title('Ground Truth: [%s], Prediction [%s]' %
                                      (true_name, predicted_name))
                            plt.axis('off')
                            plt.show()
Пример #8
0
    def use_fined_model(self):
        image_size = inception.inception_v4.default_image_size
        batch_size = 3
        flowers_data_dir = "../../data/flower"
        train_dir = '/tmp/inception_finetuned/'

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)

            dataset = flowers.get_split('train', flowers_data_dir)
            images, images_raw, labels = self.load_batch(dataset,
                                                         height=image_size,
                                                         width=image_size)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(
                    images, num_classes=dataset.num_classes, is_training=True)

            probabilities = tf.nn.softmax(logits)

            checkpoint_path = tf.train.latest_checkpoint(train_dir)
            init_fn = slim.assign_from_checkpoint_fn(
                checkpoint_path, slim.get_variables_to_restore())

            with tf.Session() as sess:
                with slim.queues.QueueRunners(sess):
                    sess.run(tf.initialize_local_variables())
                    init_fn(sess)
                    np_probabilities, np_images_raw, np_labels = sess.run(
                        [probabilities, images_raw, labels])

                    for i in range(batch_size):
                        image = np_images_raw[i, :, :, :]
                        true_label = np_labels[i]
                        predicted_label = np.argmax(np_probabilities[i, :])
                        predicted_name = dataset.labels_to_names[
                            predicted_label]
                        true_name = dataset.labels_to_names[true_label]

                        plt.figure()
                        plt.imshow(image.astype(np.uint8))
                        plt.title('Ground Truth: [%s], Prediction [%s]' %
                                  (true_name, predicted_name))
                        plt.axis('off')
                        plt.show()
                return
def train():
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)  # not showing INFO logs

        dataset = flowers.get_split(
            'train', flowers_data_dir
        )  # TODO Add directory of dataset, Check for format of dataset!
        images, _, labels = load_batch(dataset,
                                       height=image_size,
                                       width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, _ = resnet_v2.resnet_v2_50(
                images, num_classes=dataset.num_classes,
                is_training=True)  # TODO Choose Model (50, 101, 152, ...)

        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)

        tf.losses.softmax_cross_entropy(logits, one_hot_labels)

        total_loss = tf.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', total_loss)

        #TODO Testing learning rate decay
        #starter_learning_rate = 0.01
        #learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
        #                                   100000, 0.96, staircase=True)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.MomentumOptimizer(
            learning_rate=learning_rate, momentum=0.9
        )  # TODO add lowering of learning_rate, add weight decay (resnet_utils: weight_decay -> 0.0002)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        # Run the training:
        final_loss = slim.learning.train(train_op,
                                         logdir=train_dir,
                                         init_fn=get_init_fn(),
                                         number_of_steps=1030)

    print('Finished training. Last batch loss %f' % final_loss)
def eval():
    # This might take a few minutes.
    with tf.Graph().as_default():
        #tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split(
            'train', flowers_data_dir
        )  # TODO Add direcotry of dataset, Check for format of dataset!
        images, images_raw, labels = load_batch(
            dataset, height=image_size, width=image_size
        )  # TODO load_batch really necessary? Processing all!

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, _ = resnet_v2.resnet_v2_50(
                images, num_classes=dataset.num_classes,
                is_training=True)  # TODO Choose Model (50, 101, 152, ...)

        predictions = tf.argmax(logits, 1)

        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        # Define the metrics:
        names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
            'eval/Accuracy':
            slim.metrics.streaming_accuracy(predictions, labels),
            'eval/Recall@5':
            slim.metrics.streaming_sparse_recall_at_k(logits, labels, 5)
        })

        print('Running evaluation Loop...')
        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        metric_values = slim.evaluation.evaluate_once(
            master='',
            checkpoint_path=checkpoint_path,
            logdir=train_dir,
            eval_op=list(names_to_updates.values()),
            final_op=list(names_to_values.values()))

        names_to_values = dict(zip(names_to_values.keys(), metric_values))
        for name in names_to_values:
            print('%s: %f' % (name, names_to_values[name]))
Пример #11
0
def display_some_data():

    with tf.Graph().as_default():
        dataset = flowers.get_split('train', flower_data_dir)
        data_provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset, common_queue_capacity=32, common_queue_min=1)
        image, label = data_provider.get(['image', 'label'])
        with tf.Session() as sess:
            with slim.queues.QueueRunners(sess):
                for i in xrange(4):
                    np_image, np_label = sess.run([image, label])
                    height, width, _ = np_image.shape
                    class_name = name = dataset.labels_to_names[np_label]

                    plt.figure()
                    plt.imshow(np_image)
                    plt.title('%s, %d x %d' % (name, height, width))
                    plt.axis('off')
                    plt.show()
Пример #12
0
    def fine_tune_inception(self):
        train_dir = '/tmp/inception_finetuned/'
        image_size = inception.inception_v4.default_image_size
        checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"
        flowers_data_dir = "../../data/flower"

        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)

            dataset = flowers.get_split('train', flowers_data_dir)
            images, _, labels = self.load_batch(dataset,
                                                height=image_size,
                                                width=image_size)

            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(
                    images, num_classes=dataset.num_classes, is_training=True)

            # Specify the loss function:
            one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
            total_loss = slim.losses.softmax_cross_entropy(
                logits, one_hot_labels)
            #             total_loss = slim.losses.get_total_loss(add_regularization_losses=False)
            #             total_loss = slim.losses.get_total_loss()

            # Create some summaries to visualize the training process:
            tf.summary.scalar('losses/Total_Loss', total_loss)

            # Specify the optimizer and create the train op:
            optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
            train_op = slim.learning.create_train_op(total_loss, optimizer)

            # Run the training:
            number_of_steps = math.ceil(dataset.num_samples / 32) * 1
            final_loss = slim.learning.train(
                train_op,
                logdir=train_dir,
                init_fn=self.get_init_fn(checkpoint_path),
                number_of_steps=number_of_steps)

            print('Finished training. Last batch loss %f' % final_loss)
        return
Пример #13
0
def fine_tune():
    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)

        dataset = flowers.get_split('train', flowers_data_dir)
        images, _, labels = load_batch(dataset,
                                       height=image_size,
                                       width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(images,
                                               num_classes=dataset.num_classes,
                                               is_training=True)

        # Specify the loss function:
        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
        slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        total_loss = slim.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/Total Loss', total_loss)

        # Specify the optimizer and create the train op:
        optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            logdir=train_dir,
            init_fn=get_init_fn(),
            number_of_steps=1000,  # For speed, we just do 1 epoch
            save_interval_secs=600,
            save_summaries_secs=6000,
            log_every_n_steps=1,
        )

    print('Finished training. Last batch loss %f' % final_loss)
Пример #14
0
def main(_):

    # 1. change the dataset
    # dataset = imagenet.get_split('train'), FLAGS.data_dir)
    dataset = flowers.get_split(FLAGS.data_split, FLAGS.data_dir)

    model = InceptionModel(checkpoints_file=FLAGS.checkpoint_file_path)

    # 2. set the model to training mode
    # op, graph model.build(dataset, image_height=224, image_width=224, num_classes=1000, is_training=True)
    op, graph = model.build(dataset, image_height=224, image_width=224, num_classes=1000, is_training=False)

    # 3. comment out the actual training code
    # slim.learning.train(
    #     op,
    #     logdir=train_dir,
    #     init_fn=model.init_fn,
    #     number_of_steps=100)

    # 4. dump model to the specified path
    from bigdl.util.tf_utils import dump_model
    dump_model(path=FLAGS.dump_model_path, ckpt_file=FLAGS.checkpoint_file_path, graph=graph)
Пример #15
0
def main(args):
    # load the dataset
    dataset = flowers.get_split('train', FLAGS.data_dir)
    # dataset = cifar10.get_split('train', FLAGS.data_dir)
    # load batch of dataset
    images, image_raw, labels = load_flower_batch(dataset,
                                                  FLAGS.batch_size,
                                                  is_training=True)
    #images, labels = load_mnist_batch(
    #    dataset,
    #    FLAGS.batch_size,
    #    is_training=True)
    # run the image through the model
    # predictions = lenet(images)
    logits, end_points = inception_v3(images, dataset.num_classes)

    # get the cross-entropy loss
    one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
    slim.losses.softmax_cross_entropy(logits, one_hot_labels)
    total_loss = slim.losses.get_total_loss()
    tf.summary.scalar('loss', total_loss)

    # use RMSProp to optimize
    optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)

    # create train op
    train_op = slim.learning.create_train_op(total_loss,
                                             optimizer,
                                             summarize_gradients=True)

    # run training
    slim.learning.train(train_op,
                        FLAGS.train_log,
                        save_summaries_secs=30,
                        number_of_steps=5000,
                        save_interval_secs=60)
Пример #16
0
def run(config):
    # Specify where the Model, trained on ImageNet, was saved.

    # This might take a few minutes.
    train_summary_dir = os.path.join("/data/summary/flowers/",
                                     config.MODEL_NAME,
                                     str(config.TRAIN_LEARNING_RATE), "train")
    checkpoint_dir = os.path.join('/data/checkpoints/flowers/',
                                  config.MODEL_NAME,
                                  str(config.TRAIN_LEARNING_RATE))

    # Create the log directory here. Must be done here otherwise import will activate this unneededly.
    if not os.path.exists(checkpoint_dir):
        os.mkdir(checkpoint_dir)

    # ======================= TRAINING PROCESS =========================
    # Now we start to construct the graph and build our model
    with tf.Graph().as_default() as graph:
        tf.logging.set_verbosity(
            tf.logging.INFO)  # Set the verbosity to INFO level

        # First create the dataset and load one batch
        train_dataset = flowers.get_split('train', config.TRAIN_TF_RECORDS)
        images, labels = dataset.load_batch(train_dataset,
                                            batch_size=config.TRAIN_BATCH_SIZE,
                                            width=config.INPUT_WIDTH,
                                            is_training=True)

        # Know the number steps to take before decaying the learning rate and batches per epoch
        num_batches_per_epoch = int(dataset.num_samples /
                                    config.TRAIN_BATCH_SIZE)
        num_steps_per_epoch = num_batches_per_epoch  # Because one step is one batch processed
        decay_steps = int(config.TRAIN_EPOCHS_BEFORE_DECAY *
                          num_steps_per_epoch)

        # Create the model inference
        net_fn = nets_factory.get_network_fn(
            config.PRETAIN_MODEL,
            dataset.num_classes,
            weight_decay=config.L2_WEIGHT_DECAY,
            is_training=True)

        logits, end_points = net_fn(images)

        # Define the scopes that you want to exclude for restoration
        variables_to_restore = slim.get_variables_to_restore(
            exclude=arg_config.EXCLUDE_NODES)

        # Performs the equivalent to tf.nn.sparse_softmax_cross_entropy_with_logits but enhanced with checks
        entropy_loss = tf.losses.sparse_softmax_cross_entropy(labels=labels,
                                                              logits=logits)
        tf.summary.scalar('losses/entropy_loss', entropy_loss)

        regular_loss = tf.losses.get_regularization_loss()
        tf.summary.scalar('losses/regular_loss', regular_loss)

        # obtain the regularization losses as well
        total_loss = tf.losses.get_total_loss()
        tf.summary.scalar('losses/total_loss', total_loss)

        # # Specify the loss function, this will add regulation loss as well:
        # one_hot_labels = slim.one_hot_encoding(labels, 5)
        # slim.losses.softmax_cross_entropy(logits, one_hot_labels)
        # total_loss = slim.losses.get_total_loss()

        # Create the global step for monitoring the learning_rate and training.
        global_step = tf.train.get_or_create_global_step()

        # Define your exponentially decaying learning rate
        lr = tf.train.exponential_decay(
            learning_rate=config.TRAIN_LEARNING_RATE,
            global_step=global_step,
            decay_steps=decay_steps,
            decay_rate=config.TRAIN_RATE_DECAY_FACTOR,
            staircase=True)

        # Now we can define the optimizer that takes on the learning rate
        optimizer = tf.train.AdamOptimizer(learning_rate=lr)

        # Create the train_op.
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        # State the metrics that you want to predict. We get a predictions that is not one_hot_encoded.
        predictions = tf.argmax(end_points['Predictions'], 1)
        probabilities = end_points['Predictions']
        accuracy, accuracy_update = tf.metrics.accuracy(labels, predictions)
        metrics_op = tf.group(accuracy_update, probabilities)

        # Now finally create all the summaries you need to monitor and group them into one summary op.
        # precision, recall, f1, _ = score(labels, predictions)
        # tf.summary.scalar('precision', np.mean(precision))
        # tf.summary.scalar('Recall', np.mean(recall))
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('learning_rate', lr)
        my_summary_op = tf.summary.merge_all()

        # Now we need to create a training step function that runs both the train_op,
        # metrics_op and updates the global_step concurrently.
        def train_step(sess_, train_op_, global_step_):
            '''
            Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
            '''
            # Check the time for each sess run
            start_time = time.time()
            total_loss_, global_step_count, _ = sess_.run(
                [train_op_, global_step_, metrics_op])
            time_elapsed = time.time() - start_time

            # Run the logging to print some results
            print('global step {}: loss: {:.4f} ({:.2f} sec/step)'.format(
                global_step_count, total_loss_, time_elapsed))

            return total_loss_, global_step_count

        # Now we create a saver function that actually restores the variables from a checkpoint file in a sess
        saver = tf.train.Saver(variables_to_restore)

        def restore_fn(sess_):
            return saver.restore(sess_, config.PRETAIN_MODEL_PATH)

        train_summ_writer = tf.summary.FileWriter(train_summary_dir, graph)

        # Define your supervisor for running a managed session.
        # Do not run the summary_op automatically or else it will consume too much memory
        sv = tf.train.Supervisor(save_model_secs=30,
                                 logdir=checkpoint_dir,
                                 summary_op=None,
                                 init_fn=restore_fn,
                                 summary_writer=train_summ_writer)

        # Run the managed session
        with sv.managed_session() as sess:
            for step in xrange(num_steps_per_epoch *
                               config.TRAIN_EPOCHS_COUNT):
                # At the start of every epoch, show the vital information:
                if step % num_batches_per_epoch == 0:
                    print('Epoch {}/{}'.format(
                        step / num_batches_per_epoch + 1,
                        config.TRAIN_EPOCHS_COUNT))
                    learning_rate_value, accuracy_value = sess.run(
                        [lr, accuracy])
                    print('Current Learning Rate: {:f}'.format(
                        learning_rate_value))
                    print('Current Streaming Accuracy: {:f}'.format(
                        accuracy_value))

                    # optionally, print your logits and predictions for a sanity check that things are going fine.
                    logits_value, probabilities_value, predictions_value, labels_value = sess.run(
                        [logits, probabilities, predictions, labels])
                    print 'predictions: \n', predictions_value
                    print 'Labels:\n', labels_value

                # Log the summaries every 10 step.
                if step % 10 == 0:
                    loss, _ = train_step(sess, train_op, sv.global_step)
                    summaries = sess.run(my_summary_op)
                    sv.summary_computed(sess, summaries)

                # If not, simply run the training step
                else:
                    loss, _ = train_step(sess, train_op, sv.global_step)

            # We log the final training loss and accuracy
            print('Final Loss: {:f}'.format(loss))
            print('Final Accuracy: {:f}'.format(sess.run(accuracy)))

            # Once all the training has been done, save the log files and checkpoint model
            print('Finished training! Saving model to disk now.')
            sv.saver.save(sess, sv.save_path)
Пример #17
0
import tensorflow as tf
from datasets import flowers

slim = tf.contrib.slim
DATA_DIR = '/data/image-data/flowers_slim'
# Selects the 'validation' dataset.
dataset = flowers.get_split('validation', DATA_DIR)

# Creates a TF-Slim DataProvider which reads the dataset in the background
# during both training and testing.
provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])
print(image, label)
Пример #18
0
def train():
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        # Create a variable to count the number of train() calls. This equals the
        # number of batches processed * FLAGS.num_gpus.
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        # Calculate the learning rate schedule.
        opt = tf.train.AdamOptimizer(1e-4)

        isTrain_ph = tf.placeholder(tf.bool, shape=None, name="is_train")
        # images = tf.placeholder(tf.float32, [None, 224, 224, 3])
        # labels = tf.placeholder(tf.float32, shape=[1024])

        dataset = flowers.get_split(FLAGS.subset, FLAGS.data_dir)
        provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            shuffle=True,
            common_queue_capacity=2 * FLAGS.batch_size,
            common_queue_min=FLAGS.batch_size)
        [image, label] = provider.get(['image', 'label'])
        image = preprocessing.preprocess_image(image,
                                               224,
                                               224,
                                               is_training=True)
        images, labels = tf.train.batch([image, label],
                                        batch_size=FLAGS.batch_size,
                                        num_threads=4,
                                        capacity=5 * FLAGS.batch_size)

        with tf.variable_scope(tf.get_variable_scope()) as scope:
            loss = cpu_loss(images, labels, scope, isTrain_ph)
            # Calculate the gradients for the batch of data on this CIFAR tower.
            grads = opt.compute_gradients(loss)

        # Apply the gradients to adjust the shared variables.
        # apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
        train_op = opt.apply_gradients(grads)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())
        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU
        # implementations.
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss],
                                     feed_dict={isTrain_ph: False})
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration / FLAGS.num_gpus

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Пример #19
0
                                                     width,
                                                     is_training=is_training)

    # Batch it up.
    images_, labels_ = tf.train.batch([image, label],
                                      batch_size=batch_size,
                                      num_threads=4,
                                      capacity=4 * batch_size,
                                      allow_smaller_final_batch=True)

    return images_, labels_


if __name__ == '__main__':
    flowers_data_dir = "/data/flowers"
    train_dataset = flowers.get_split('validation', flowers_data_dir)
    images, labels = load_batch(train_dataset,
                                200,
                                is_training=False,
                                epochs=1)

    total_train_samples = 0

    print(images)
    with tf.Session() as sess:
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        with slim.queues.QueueRunners(sess):
            try:
                for step in range(1000):
Пример #20
0
    image_raw = tf.image.resize_images(image_raw, [height, width])
    image_raw = tf.image.convert_image_dtype(image_raw, tf.float32)

    image_raw, labels = tf.train.batch([image_raw, label],
                                       batch_size=batch_size,
                                       num_threads=1,
                                       capacity=2 * batch_size)
    return image_raw, labels


g = tf.Graph()
with g.as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    from datasets import flowers
    dataset = flowers.get_split("train", "./data/flower_photos")
    images, labels = load_batch(dataset)

    with slim.arg_scope(model.inception_resnet_v2_arg_scope()):
        pre, _ = model.inception_resnet_v2(images, num_classes=5)

    probabilities = tf.nn.softmax(pre)
    one_hot_labels = slim.one_hot_encoding(labels, num_classes=5)

    print(one_hot_labels.shape)
    print(probabilities.shape)

    slim.losses.softmax_cross_entropy(probabilities, one_hot_labels)
    total_loss = slim.losses.get_total_loss()

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
Пример #21
0
def run():
    #Create the log directory here. Must be done here otherwise import will activate this unneededly.
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    ## Now we are finally ready to construct the graph! We first start by setting the logging level to INFO (which gives us sufficient information for training purposes), and load our dataset.

    #======================= TRAINING PROCESS =========================
    #Now we start to construct the graph and build our model
    ##Now we are finally ready to construct the graph! We first start by setting the logging level to INFO (which gives us sufficient information for training purposes), and load our dataset.
    #https://blog.gtwang.org/programming/tensorflow-read-write-tfrecords-data-format-tutorial/
    '''  WHY using  tf.Graph().as_default():!!!!!!!!!!!!!!!
       Since a default graph is always registered, every op and variable is placed into the default graph. 
       The statement of  tf.Graph().as_default(): , however,creates a new graph and places everything (declared inside its scope) into this graph.
       If the graph is the only graph, it's useless. 
       But it's a good practice because if you start to work with many graphs it's easier to understand where ops and 
       vars are placed. 
       Since this statement costs you nothing, it's better to write it anyway. 
       Just to be sure that if you refactor the code in the future, 
       the operations defined belong to the graph you choose initially
    '''
    with tf.Graph().as_default() as graph:

        #call datasets/flowers.py , not  above function!
        dataset = flowers.get_split('train', flowers_data_dir)
        images, _, labels = load_batch(dataset, batch_size=batch_size)

        #Know the number steps to take before decaying the learning rate and batches per epoch
        num_batches_per_epoch = int(dataset.num_samples / batch_size)
        num_steps_per_epoch = num_batches_per_epoch  #Because one step is one batch processed
        decay_steps = int(num_epochs_before_decay * num_steps_per_epoch)

        #Create the pretrain-model  inference
        #This function is used to init model and input necessary papermate
        with slim.arg_scope(inception_resnet_v2_arg_scope()):
            logits, end_points = inception_resnet_v2(
                images, num_classes=dataset.num_classes, is_training=True)
        ''' 
            what mean is  logits??????????   
            logit = w*x + b,
            x: input, w: weight, b: bias. That's it.
            
            logit is defined as the output of a neuron without applying activation function:
            Define the scopes that you want to exclude for restoration (force to train!)
        '''
        '''
            when you are training on grayscale images, you would have to remove the initial input convolutional layer, which assumes you have an RGB image with 3 channels, if you set the argument channels=3 for the Image decoder in the get_split function. In total, here are the 3 scopes that you can exclude:
            InceptionResnetV2/AuxLogits
            InceptionResnetV2/Logits
            InceptionResnetV2/Conv2d_1a_3x3 (Optional, for Grayscale images)
        '''
        exclude = ['InceptionResnetV2/Logits', 'InceptionResnetV2/AuxLogits']
        variables_to_restore = slim.get_variables_to_restore(exclude=exclude)

        #Perform one-hot-encoding of the labels (Try one-hot-encoding within the load_batch function!)
        '''
        why we use one_hot_lables?  in order to count out the:cross-entropy
        The main benefits of this are:
        1.Solved the problem classifier is not good at processing attribute data
        2.expanding features.
        '''

        one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)

        #Performs the equivalent to tf.nn.sparse_softmax_cross_entropy_with_logits but enhanced with checks
        # counte level of softmax_cross_entropy
        loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels,
                                               logits=logits)
        total_loss = tf.losses.get_total_loss(
        )  #obtain the regularization losses as well

        #The total loss is defined as the cross entropy loss plus all of the weight
        #Create the global step variable for monitoring the learning_rate and training.
        global_step = get_or_create_global_step()

        #Define your exponentially decaying learning rate
        lr = tf.train.exponential_decay(learning_rate=initial_learning_rate,
                                        global_step=global_step,
                                        decay_steps=decay_steps,
                                        decay_rate=learning_rate_decay_factor,
                                        staircase=True)

        #Now we can define the optimizer that takes on the learning rate
        optimizer = tf.train.AdamOptimizer(learning_rate=lr)
        '''        
            create_train_op perform more functions like gradient clipping or multiplication to 
            prevent exploding or vanishing gradients. 
            This is done rather than simply doing an Optimizer.minimize function, 
            which simply just combines compute_gradients and 
            apply_gradients without any gradient processing after compute_gradients.
        '''
        train_op = slim.learning.create_train_op(total_loss, optimizer)
        ''' 
            Now we simply get the predictions through extracting the probabilities predicted 
            from end_points['Predictions'], 
            and perform an argmax function that returns us the index of the highest probability,
            which is also the class label.
        '''
        #return an index of the Max Value of array ->  https://blog.csdn.net/UESTC_C2_403/article/details/72232807
        #  https://blog.csdn.net/qq575379110/article/details/70538051
        predictions = tf.argmax(end_points['Predictions'], 1)
        probabilities = end_points['Predictions']  #

        accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(
            predictions, labels)
        metrics_op = tf.group(accuracy_update, probabilities)

        #Now finally create all the summaries you need to monitor and group them into one summary op.
        tf.summary.scalar('losses/Total_Loss', total_loss)
        tf.summary.scalar('accuracy', accuracy)
        tf.summary.scalar('learning_rate', lr)
        my_summary_op = tf.summary.merge_all()

        #Now we need to create a training step function that runs both the train_op, metrics_op and updates the global_step concurrently.
        # train_step function takes in a session and runs all these ops together .
        def train_step(sess, train_op, global_step):
            '''
            Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
            '''
            #Check the time for each sess run
            start_time = time.time()
            '''invoke sess.run to execute matrix multiplication to do cauculate 3 op train_op, global_step, metrics_op into a numpy arry and retun it!!'''
            total_loss, global_step_count, _ = sess.run(
                [train_op, global_step, metrics_op])

            time_elapsed = time.time() - start_time
            logging.info('global step %s: loss: %.4f (%.2f sec/step)',
                         global_step_count, total_loss, time_elapsed)

            return total_loss, global_step_count

        #Now we create a saver function that actually restores the variables from a checkpoint file in a sess
        #we have defined our variables to restore  .
        #we need asign our variables into model ervertime sinec we load pre traing model of  inception_resnet_v2_2016_08_30

        saver = tf.train.Saver(variables_to_restore)

        def restore_fn(sess):
            return saver.restore(sess, checkpoint_file)

        #Define your supervisor for running a managed session. Do not run the summary_op automatically or else it will consume too much memory
        #supervisor is especially useful when you are training your models for many days. I
        #supervisor helps you deal with summaryWriter and the initialization of your global and local variables
        sv = tf.train.Supervisor(logdir=log_dir,
                                 summary_op=None,
                                 init_fn=restore_fn)

        #Run the managed session
        ''' Why we need to use managed_session
            auto check checkpoint to save data
            auto saver checkpoint
            auto summary_computed
            
            
        '''
        #Run the managed session
        with sv.managed_session() as sess:
            for step in range(num_steps_per_epoch * num_epochs):
                #At the start of every epoch, show the vital information:
                if (step % num_batches_per_epoch) == 0:
                    print('Epoch %s/%s' %
                          (step / num_batches_per_epoch + 1, num_epochs))
                    learning_rate_value, accuracy_value = sess.run(
                        [lr, accuracy])
                    print('Current Learning Rate: %s' % (learning_rate_value))
                    print('Current Streaming Accuracy: %s' % (accuracy_value))

                    # optionally, print your logits and predictions for a sanity check that things are going fine.
                    logits_value, probabilities_value, predictions_value, labels_value = sess.run(
                        [logits, probabilities, predictions, labels])
                    print('logits: \n', logits_value)
                    print('Probabilities: \n', probabilities_value)
                    print('predictions: \n', predictions_value)
                    print('Labels:\n:', labels_value)

                #Log the summaries every 10 step.
                if (step % 10) == 0:
                    loss, _ = train_step(sess, train_op, sv.global_step)
                    summaries = sess.run(my_summary_op)
                    sv.summary_computed(sess, summaries)

                #If not, simply run the training step
                else:
                    loss, _ = train_step(sess, train_op, sv.global_step)

            #We log the final training loss and accuracy
            print('Final Loss: %s' % loss)
            print('Final Accuracy: %s' % sess.run(accuracy))

            #Once all the training has been done, save the log files and checkpoint model
            print('Finished training! Saving model to disk now.')
            # saver.save(sess, "./flowers_model.ckpt")
            sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
Пример #22
0
                                                 common_queue_capacity=32,
                                                 common_queue_min=1)
    image_raw, label = data_provider.get(['image', 'label'])
    image_raw = tf.image.resize_images(image_raw, [height, width])
    image_raw = tf.image.convert_image_dtype(image_raw, tf.float32)

    images_raw, labels = tf.train.batch([image_raw, label],
                                        batch_size=batch_size,
                                        num_threads=1,
                                        capacity=2 * batch_size)
    return images_raw, labels


with tf.Graph().as_default():
    tf.logging.set_verbosity(tf.logging.INFO)
    dataset = flowers.get_split("train", flowers_data_dir)
    images, labels = load_batch(dataset)

    probabilities = model.Slim_cnn(images, 5)
    probabilities = tf.nn.softmax(probabilities.net)

    one_hot_labels = slim.one_hot_encoding(labels, 5)
    slim.losses.softmax_cross_entropy(probabilities, one_hot_labels)
    total_loss = slim.losses.get_total_loss()

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    train_op = slim.learning.create_train_op(total_loss, optimizer)

    final_loss = slim.learning.train(train_op,
                                     logdir=save_model,
                                     number_of_steps=100)
Пример #23
0
    image_raw = tf.image.resize_images(image_raw, [height, width])
    image_raw = tf.image.convert_image_dtype(image_raw, tf.float32)

    images_raw, labels = tf.train.batch([image_raw, label],
                                        batch_size=batch_size,
                                        num_threads=1,
                                        capacity=2 * batch_size)
    return images_raw, labels


g = tf.Graph()
with g.as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    from datasets import flowers
    dataset = flowers.get_split('train', global_variable.flowers_data_dir)
    images, labels = load_batch(dataset)

    with slim.arg_scope(model.inception_resnet_v2_arg_scope()):
        pre, _ = model.inception_resnet_v2(images, num_classes=5)
    probabilities = tf.nn.softmax(pre)

    one_hot_labels = slim.one_hot_encoding(labels, num_classes=5)

    print(one_hot_labels.shape)
    print(probabilities.shape)

    slim.losses.softmax_cross_entropy(probabilities, one_hot_labels)
    total_loss = slim.losses.get_total_loss()

    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
 def get_input(self, split_name, is_training=True, batch_size=32):
     flowers_data_dir = './data/flowers'
     self.dataset = flowers.get_split(split_name, flowers_data_dir)
     return self.load_batch(self.dataset, batch_size=batch_size, is_training=is_training)
Пример #25
0
 def _input_fn():
     dataset = flowers.get_split(params['split_name'], params['data_dir'])
     images, image_raw, labels = load_flower_batch(
         dataset, params['batch_size'], is_training=params['is_training'])
     return images, labels
Пример #26
0
    image_raw, label = data_provider.get(['image', 'label'])
    image_raw = tf.image.resize_images(image_raw, [height, width])
    image_raw = tf.image.convert_image_dtype(image_raw,tf.float32)

    images_raw, labels = tf.train.batch(
        [image_raw, label],
        batch_size=batch_size,
        num_threads=1,
        capacity=2 * batch_size)
    return images_raw, labels

with tf.Graph().as_default():

    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = flowers.get_split('train', flowers_data_dir)
images, labels = load_batch(dataset)
 
    probabilities = model.Slim_cnn(images,5)
    probabilities = tf.nn.softmax(probabilities.net)

    one_hot_labels = slim.one_hot_encoding(labels, 5)
    slim.losses.softmax_cross_entropy(probabilities, one_hot_labels)
total_loss = slim.losses.get_total_loss()
    
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)

    final_loss = slim.learning.train(
        train_op,
        logdir=save_model,
Пример #27
0
import tensorflow as tf
from datasets import flowers

slim = tf.contrib.slim

# Selects 'validation' dataset.
dataset = flowers.get_split('validation', './flowers_data')

# Creates a TF-Slim DataProvider which reads the dataset in the background
# during both training and testing
provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])
Пример #28
0
def train():
    # Specify where the Model, trained on ImageNet, was saved.
    inception_v1_model_dir = "/data/inception/v1"

    # This might take a few minutes.
    TRAIN_SUMMARY_DIR = "/data/summary/flowers/train"
    l_rate = 0.0001
    CHECKPOINT_DIR = '/data/checkpoints/flowers/'
    model_name = "slim_inception_v1_ft"
    flowers_data_dir = "/data/flowers"
    batch_size = 64

    checkpoint_dir = os.path.join(CHECKPOINT_DIR, model_name, str(l_rate))
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    print('Will save model to %s' % checkpoint_dir)

    with tf.Graph().as_default() as graph:
        tf.logging.set_verbosity(tf.logging.INFO)

        image_size = inception.inception_v1.default_image_size

        train_dataset = flowers.get_split('train', flowers_data_dir)
        images, labels = dataset.load_batch(train_dataset,
                                            batch_size,
                                            height=image_size,
                                            width=image_size,
                                            is_training=True)

        tf.summary.image('images/train', images)

        # Create the model:
        net_fn = nets_factory.get_network_fn("inception_v1",
                                             dataset.num_classes,
                                             is_training=True)
        logits, end_points = net_fn(images)

        # Specify the loss function:
        # one_hot_labels = slim.one_hot_encoding(labels, 5)
        tf.losses.sparse_softmax_cross_entropy(labels, logits)
        total_loss = tf.losses.get_total_loss()

        # Create some summaries to visualize the training process:
        tf.summary.scalar('losses/total_loss', total_loss)

        # with tf.name_scope('accuracy'):
        #     with tf.name_scope('prediction'):
        #         correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        #     with tf.name_scope('accuracy'):
        #         accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        #     tf.summary.scalar('accuracy', accuracy)
        correct_prediction = tf.equal(tf.argmax(logits, 1), labels)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

        global_step = slim.get_or_create_global_step()

        learning_rate = tf.train.exponential_decay(l_rate,
                                                   global_step,
                                                   100,
                                                   0.7,
                                                   staircase=True)

        # Specify the optimizer and create the train op:
        # optimizer = tf.train.AdamOptimizer(learning_rate, global_step=global_step)
        # train_op = slim.learning.create_train_op(total_loss, optimizer)
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = slim.learning.create_train_op(total_loss, optimizer)

        train_summ_writer = tf.summary.FileWriter(
            os.path.join(TRAIN_SUMMARY_DIR, model_name, str(l_rate),
                         datetime.datetime.now().strftime("%Y%m%d-%H%M")),
            graph)

        # Run the training:
        final_loss = slim.learning.train(
            train_op,
            global_step=global_step,
            logdir=checkpoint_dir,
            number_of_steps=300,  # For speed, we just do 1 epoch
            save_interval_secs=10,
            save_summaries_secs=1,
            init_fn=get_init_fn(inception_v1_model_dir),
            summary_writer=train_summ_writer)

        print('Finished training. Final batch loss %d' % final_loss)
Пример #29
0
@author: saurabh
"""

import tensorflow as tf
from datasets import flowers
import numpy as np
import skimage.io as io

val_img = []
val_lab = []
cnt = 0

slim = tf.contrib.slim

# Selects the 'validation' dataset.
dataset = flowers.get_split('validation', 'test_slm/flowers')


provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])

with tf.Session() as sess:
    sess.run([
               tf.local_variables_initializer(),
               tf.global_variables_initializer(),
       ])
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
      while not coord.should_stop():
Пример #30
0
import tensorflow as tf
from datasets import flowers

slim = tf.contrib.slim

# Selects the 'validation' dataset.
dataset = flowers.get_split('validation',
                            '/home/navallo/Documents/data/flowers')

# Creates a TF-Slim DataProvider which reads the dataset in the background
# during both training and testing.
provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = provider.get(['image', 'label'])
print([label])
sess = tf.InteractiveSession()
init_op = tf.initialize_all_variables()

node1 = label
print(node1)
#node1.eval()
Пример #31
0
        for exclusion in exclusions:
            if var.op.name.startswith(exclusion):
                break
        else:
            variables_to_restore.append(var)

    return slim.assign_from_checkpoint_fn(
        './pre_trained_models/inception_v1.ckpt', variables_to_restore)


train_dir = './models/inception_finetuned_model/'

with tf.Graph().as_default():
    tf.logging.set_verbosity(tf.logging.INFO)

    dataset = flowers.get_split('train', "./data/flowers")
    images, _, labels = load_batch(dataset,
                                   height=image_size,
                                   width=image_size)

    # Create the model, use the default arg scope to configure the batch norm parameters.
    with slim.arg_scope(inception.inception_v1_arg_scope()):
        logits, _ = inception.inception_v1(images,
                                           num_classes=dataset.num_classes,
                                           is_training=True)

    # Specify the loss function:
    one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
    slim.losses.softmax_cross_entropy(logits, one_hot_labels)
    total_loss = slim.losses.get_total_loss()