Ejemplo n.º 1
0
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    spec = models.get_data_spec(model_class=models.GoogleNet)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    net = models.GoogleNet({'data': input_node})

    with tf.Session() as sesh:
        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)
        # Load the input image
        print('Loading the images')
        input_images = dataset.load_images(image_paths, spec).eval()
        # Perform a forward pass through the network to get the class probabilities
        print('Classifying')
        probs = sesh.run(net.get_output(),
                         feed_dict={input_node: input_images})
        display_results(image_paths, probs)
Ejemplo n.º 2
0
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    spec = models.get_data_spec(model_class=models.GoogleNet)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    net = models.GoogleNet({'data': input_node})

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)

    with tf.Session() as sesh:
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print('Classifying')
        # probs = sesh.run(net.get_output(), feed_dict={input_node: input_images})
        # display_results([image_paths[i] for i in indices], probs)

        # # ////////////////////////////////////////////////////////////////////////////////////
        feature_tensor = sesh.graph.get_tensor_by_name('pool5_7x7_s1:0')
        features = sesh.run(feature_tensor,
                            feed_dict={input_node: input_images})
        features = np.squeeze(features)

        for i, j in enumerate(indices):
            video_feature[image_paths[j]] = features[i]

        # print features.shape
        # print features
        # ////////////////////////////////////////////////////////////////////////////////////

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
Ejemplo n.º 3
0
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    spec = models.get_data_spec(model_class=models.GoogleNet)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    net = models.GoogleNet({'data': input_node})

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)

    with tf.Session() as sesh:
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print('Timing in seconds :')
        start_time = time.time()
        probs = sesh.run(net.get_output(),
                         feed_dict={input_node: input_images})
        duration = time.time() - start_time
        print(duration)
        display_results([image_paths[i] for i in indices], probs)

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)