def test():
    print('Test read data')
    dataset = ucf11.get_split('test', 'data/UCF11-tfrecord')
    input, label = ucf11.build_data(dataset, 'train', batch_size=BATCH_SIZE)

    new_input = off_preprocessing.preprocess_image(input,
                                                   240,
                                                   240,
                                                   is_training=True)

    example_queue = tf.FIFOQueue(
        3 * BATCH_SIZE,
        dtypes=[tf.float32, tf.uint8, tf.int32],
        shapes=[_OUTPUT_SHAPES, _ORIGINAL_OUTPUT_SHAPES, []])
    num_threads = 1

    example_queue_op = example_queue.enqueue([new_input, input, label])
    tf.train.add_queue_runner(
        tf.train.queue_runner.QueueRunner(example_queue,
                                          enqueue_ops=[example_queue_op] *
                                          num_threads))

    new_inputs, inputs, labels = example_queue.dequeue_many(BATCH_SIZE)

    new_images = tf.unstack(new_inputs, axis=0)

    print(new_images[0])

    fx, fy = sobel(new_images[0])

    spax = tf.unstack(fx, axis=0)
    spay = tf.unstack(fy, axis=0)

    print(fx)
    print(fy)
    print(spax)

    with tf.Session() as sess:
        tf.train.start_queue_runners(sess)
        sx, sy, l = sess.run([spax[0], spay[0], labels])
        for i in range(sx.shape[2]):
            image.imsave('%d_fx%d' % (l[0], i), sx[:, :, i], cmap='gray')
            image.imsave('%d_fy%d' % (l[0], i), sy[:, :, i], cmap='gray')
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    #######################
    # Config model_deploy #
    #######################
    deploy_config = model_deploy.DeploymentConfig(
        num_clones=FLAGS.num_clones,
        clone_on_cpu=FLAGS.clone_on_cpu,
        replica_id=FLAGS.task,
        num_replicas=FLAGS.worker_replicas,
        num_ps_tasks=FLAGS.num_ps_tasks)

    # Create global_step
    with tf.device(deploy_config.variables_device()):
        global_step = tf.train.get_or_create_global_step()

    ######################
    # Select the dataset #
    ######################
    # dataset = dataset_factory.get_dataset(
    #     FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    dataset = ucf11.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir)
    ######################
    # Select the network #
    ######################
    network_fn = nets_factory.get_network_fn(FLAGS.model_name,
                                             num_classes=(dataset.num_classes -
                                                          FLAGS.labels_offset),
                                             weight_decay=FLAGS.weight_decay,
                                             is_training=True)

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        preprocessing_name, is_training=True)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    train_image_size = FLAGS.train_image_size or network_fn.default_image_size

    input, label = ucf11.build_data(dataset)
    input = image_preprocessing_fn(input, train_image_size, train_image_size)

    inputs, labels = tf.train.batch(
        [input, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)
    labels = slim.one_hot_encoding(labels,
                                   dataset.num_classes - FLAGS.labels_offset)
    batch_queue = slim.prefetch_queue.prefetch_queue([inputs, labels],
                                                     capacity=2 *
                                                     deploy_config.num_clones)

    ####################
    # Define the model #
    ####################
    def clone_fn(batch_queue):
        """Allows data parallelism by creating multiple clones of network_fn."""
        inputs, labels = batch_queue.dequeue()
        images = tf.unstack(inputs, axis=1)

        # with tf.variable_scope('Inception', reuse=True):
        logits1, end_points1 = network_fn(images[0])
        logits2, end_points2 = network_fn(images[1])

        # Feature with size maximum k
        f_k_1 = [
            end_points1['Conv2d_1a_7x7'],
        ]
        f_k_2 = [
            end_points2['Conv2d_1a_7x7'],
        ]

        # Feature with size k/2
        f_k2_1 = [
            end_points1['MaxPool_2a_3x3'],
            end_points1['Conv2d_2b_1x1'],
            end_points1['Conv2d_2c_3x3'],
        ]
        f_k2_2 = [
            end_points2['MaxPool_2a_3x3'],
            end_points2['Conv2d_2b_1x1'],
            end_points2['Conv2d_2c_3x3'],
        ]

        # Feature with size k/4
        f_k4_1 = [
            end_points1['MaxPool_3a_3x3'],
            end_points1['Mixed_3b'],
            end_points1['Mixed_3c'],
        ]
        f_k4_2 = [
            end_points2['MaxPool_3a_3x3'],
            end_points2['Mixed_3b'],
            end_points2['Mixed_3c'],
        ]

        logits_off, end_point_off = off(
            f_k_1,
            f_k_2,
            f_k2_1,
            f_k2_2,
            f_k4_1,
            f_k4_2,
            num_classes=dataset.num_classes - FLAGS.labels_offset,
            resnet_model_name=FLAGS.resnet_model_name,
            resnet_weight_decay=FLAGS.resnet_weight_decay)

        logits_gen = tf.reduce_mean(tf.stack([logits1, logits2], axis=2),
                                    axis=2)

        logits = tf.multiply(logits_gen, logits_off, name='logits')

        #############################
        # Specify the loss function #
        #############################
        if FLAGS.use_off_sub_loss:
            logits_tier1 = end_point_off['logits_tier1']
            logits_tier2 = end_point_off['logits_tier2']

            tf.losses.softmax_cross_entropy(
                logits=logits_tier1,
                onehot_labels=labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=0.2,
                scope='off_tier1_loss')

            tf.losses.softmax_cross_entropy(
                logits=logits_tier2,
                onehot_labels=labels,
                label_smoothing=FLAGS.label_smoothing,
                weights=0.4,
                scope='off_tier2_loss')

        tf.losses.softmax_cross_entropy(logits=logits,
                                        onehot_labels=labels,
                                        label_smoothing=FLAGS.label_smoothing,
                                        weights=1.0)
        return dict(end_points1, **end_point_off)

    clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
    first_clone_scope = deploy_config.clone_scope(0)
    # Gather update_ops from the first clone. These contain, for example,
    # the updates for the batch_norm variables created by network_fn.
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)

    # Gather initial summaries.
    summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))

    # Add summaries for end_points.
    end_points = clones[0].outputs
    for end_point in end_points:
        x = end_points[end_point]
        summaries.add(tf.summary.histogram('activations/' + end_point, x))
        summaries.add(
            tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x)))

    # Add summaries for losses.
    for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
        summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))

    # Add summaries for variables.
    for variable in slim.get_model_variables():
        summaries.add(tf.summary.histogram(variable.op.name, variable))

    # # Accuracy
    # predictions = tf.argmax(end_points['Logits'], axis=1)
    # truth = tf.argmax(labels, axis=1)
    # truth = tf.squeeze(truth)
    # # accuracy, accuracy_update = tf.metrics.accuracy(truth, predictions)
    # #
    # # update_ops.append(accuracy_update)
    # # summaries.add(tf.summary.scalar('Accuracy', accuracy))
    #
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
    #     'Accuracy': slim.metrics.streaming_accuracy(predictions, truth),
    #     'Recall_5': slim.metrics.streaming_recall_at_k(
    #         end_points['Logits'], truth, 5),
    # })
    #
    # # Print the summaries to screen.
    # for name, value in names_to_values.items():
    #     summary_name = '%s' % name
    #     op = tf.summary.scalar(summary_name, value, collections=[])
    #     op = tf.Print(op, [value], summary_name)
    #     summaries.add(op)
    #
    # update_ops.append(names_to_updates)

    #################################
    # Configure the moving averages #
    #################################
    if FLAGS.moving_average_decay:
        moving_average_variables = slim.get_model_variables()
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay, global_step)
    else:
        moving_average_variables, variable_averages = None, None

    #########################################
    # Configure the optimization procedure. #
    #########################################
    with tf.device(deploy_config.optimizer_device()):
        learning_rate = _configure_learning_rate(dataset.num_samples,
                                                 global_step)
        optimizer = _configure_optimizer(learning_rate)
        summaries.add(tf.summary.scalar('learning_rate', learning_rate))

    if FLAGS.sync_replicas:
        # If sync_replicas is enabled, the averaging will be done in the chief
        # queue runner.
        optimizer = tf.train.SyncReplicasOptimizer(
            opt=optimizer,
            replicas_to_aggregate=FLAGS.replicas_to_aggregate,
            variable_averages=variable_averages,
            variables_to_average=moving_average_variables,
            replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
            total_num_replicas=FLAGS.worker_replicas)
    elif FLAGS.moving_average_decay:
        # Update ops executed locally by trainer.
        update_ops.append(variable_averages.apply(moving_average_variables))

    # Variables to train.
    variables_to_train = _get_variables_to_train()

    #  and returns a train_tensor and summary_op
    total_loss, clones_gradients = model_deploy.optimize_clones(
        clones, optimizer, var_list=variables_to_train)
    # Add total_loss to summary.
    summaries.add(tf.summary.scalar('total_loss', total_loss))

    # Create gradient updates.
    grad_updates = optimizer.apply_gradients(clones_gradients,
                                             global_step=global_step)
    update_ops.append(grad_updates)

    update_op = tf.group(*update_ops)
    train_tensor = control_flow_ops.with_dependencies([update_op],
                                                      total_loss,
                                                      name='train_op')

    # Add the summaries from the first clone. These contain the summaries
    # created by model_fn and either optimize_clones() or _gather_clone_loss().
    summaries |= set(
        tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))

    # Merge all summaries together.
    summary_op = tf.summary.merge(list(summaries), name='summary_op')

    ###########################
    # Kicks off the training. #
    ###########################
    slim.learning.train(
        train_tensor,
        logdir=FLAGS.train_dir,
        master=FLAGS.master,
        is_chief=(FLAGS.task == 0),
        init_fn=_get_init_fn(),
        summary_op=summary_op,
        number_of_steps=FLAGS.max_number_of_steps,
        log_every_n_steps=FLAGS.log_every_n_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        sync_optimizer=optimizer if FLAGS.sync_replicas else None,
        session_config=session_config)
示例#3
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    # with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()

    ######################
    # Select the dataset #
    ######################
    dataset = ucf11.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir)

    ####################
    # Select the model #
    ####################
    network_fn = nets_factory.get_network_fn(FLAGS.model_name,
                                             num_classes=(dataset.num_classes -
                                                          FLAGS.labels_offset),
                                             is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    input, label = ucf11.build_data(dataset, is_training=False)
    label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        preprocessing_name, is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size

    input = image_preprocessing_fn(input, eval_image_size, eval_image_size)

    inputs, labels = tf.train.batch(
        [input, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ####################
    # Define the model #
    ####################
    images = tf.unstack(inputs, axis=1)

    def off_rgb(image1, image2, name):
        logits1, endpoints1 = network_fn(image1)
        logits2, endpoints2 = network_fn(image2)

        # Feature with size maximum k
        f_k_1, f_k2_1, f_k4_1 = get_basic_feature(endpoints1)
        f_k_2, f_k2_2, f_k4_2 = get_basic_feature(endpoints2)

        logits_off, end_point_off = off(
            f_k_1,
            f_k_2,
            f_k2_1,
            f_k2_2,
            f_k4_1,
            f_k4_2,
            num_classes=dataset.num_classes - FLAGS.labels_offset,
            resnet_model_name=FLAGS.resnet_model_name,
            resnet_weight_decay=0.0,
            is_training=False)

        logits_gen = tf.reduce_mean(tf.stack([logits1, logits2], axis=2),
                                    axis=2)

        logits = tf.multiply(logits_gen, logits_off, name='logits' + name)

        return logits

    logits_arr = []

    max_range = 1 if FLAGS.mode == 'fast' else 10

    for i in range(0, max_range, 1):
        logits = off_rgb(images[i], images[i + 1], str(i))
        logits_arr.append(logits)

    logits = tf.reduce_mean(tf.stack(logits_arr, axis=2), axis=2)

    if FLAGS.moving_average_decay:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay, tf_global_step)
        variables_to_restore = variable_averages.variables_to_restore(
            slim.get_model_variables())
        variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    predictions = tf.argmax(logits, 1)
    labels = tf.squeeze(labels)

    # Define the metrics:
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy':
        slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5':
        slim.metrics.streaming_recall_at_k(logits, labels, 5),
        # 'Confusion': _streaming_confusion_matrix(labels, predictions, dataset.num_classes)
        # get_streaming_metrics(predictions, labels, dataset.num_classes),
    })

    # Print the summaries to screen.
    for name, value in names_to_values.items():
        # if name != 'Confusion':
        summary_name = 'eval/%s' % name
        op = tf.summary.scalar(summary_name, value, collections=[])
        op = tf.Print(op, [value], summary_name)
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
        # else:
        #     summary_name = 'eval/%s' % name
        #     c_image = tf.reshape(tf.cast(value, tf.float32), [1, 11, 11, 1])
        #     tf.summary.image('confusion_image', c_image)
        #     op = tf.summary.tensor_summary(summary_name, c_image, collections=[])
        #     op = tf.Print(op, [value], summary_name, summarize=121)
        #     tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

    # TODO(sguada) use num_epochs=1
    if FLAGS.max_num_batches:
        num_batches = FLAGS.max_num_batches
    else:
        # This ensures that we make a single pass over all of the data.
        num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
        checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    # update_ops = []
    # update_ops.extend(list(names_to_updates.values()))
    # update_ops.extend(confusion_op)
    # update_op = tf.group(*update_ops)

    slim.evaluation.evaluate_once(master=FLAGS.master,
                                  checkpoint_path=checkpoint_path,
                                  logdir=FLAGS.eval_dir,
                                  num_evals=num_batches,
                                  eval_op=list(names_to_updates.values()),
                                  variables_to_restore=variables_to_restore)
示例#4
0
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    # with tf.Graph().as_default():
    tf_global_step = slim.get_or_create_global_step()

    ######################
    # Select the dataset #
    ######################
    dataset = ucf11.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir)

    ####################
    # Select the model #
    ####################
    network_fn = nets_factory.get_network_fn(
        FLAGS.model_name,
        num_classes=(dataset.num_classes - FLAGS.labels_offset),
        is_training=False)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    input, label = ucf11.build_data(dataset)
    label -= FLAGS.labels_offset

    #####################################
    # Select the preprocessing function #
    #####################################
    preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        preprocessing_name,
        is_training=False)

    eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size

    input = image_preprocessing_fn(input, eval_image_size, eval_image_size)

    inputs, labels = tf.train.batch(
        [input, label],
        batch_size=FLAGS.batch_size,
        num_threads=FLAGS.num_preprocessing_threads,
        capacity=5 * FLAGS.batch_size)

    ####################
    # Define the model #
    ####################
    images = tf.unstack(inputs, axis=1)
    logits, end_points = network_fn(images[0])

    if FLAGS.moving_average_decay:
        variable_averages = tf.train.ExponentialMovingAverage(
            FLAGS.moving_average_decay, tf_global_step)
        variables_to_restore = variable_averages.variables_to_restore(
            slim.get_model_variables())
        variables_to_restore[tf_global_step.op.name] = tf_global_step
    else:
        variables_to_restore = slim.get_variables_to_restore()

    predictions = tf.nn.softmax(logits)
    predictions = tf.argmax(predictions, axis=1)
    labels = tf.squeeze(labels)

    # Define the metrics:
    names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
        'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
        'Recall_5': slim.metrics.streaming_recall_at_k(
            logits, labels, 5),
    })

    # Print the summaries to screen.
    for name, value in names_to_values.items():
        summary_name = 'eval/%s' % name
        op = tf.summary.scalar(summary_name, value, collections=[])
        op = tf.Print(op, [value], summary_name)
        tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

    # TODO(sguada) use num_epochs=1
    if FLAGS.max_num_batches:
        num_batches = FLAGS.max_num_batches
    else:
        # This ensures that we make a single pass over all of the data.
        num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))

    if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
    else:
        checkpoint_path = FLAGS.checkpoint_path

    tf.logging.info('Evaluating %s' % checkpoint_path)

    slim.evaluation.evaluate_once(
        master=FLAGS.master,
        checkpoint_path=checkpoint_path,
        logdir=FLAGS.eval_dir,
        num_evals=num_batches,
        eval_op=list(names_to_updates.values()),
        variables_to_restore=variables_to_restore)