def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model('data/checkpoints/inception.057-1.16.hdf5') #replaced by your model name

    # Get all our test images.
    images = glob.glob('./data/test/**/*.jpg')

    for _ in range(nb_images):
        print('-'*80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)
        
        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()

    # load the trained model that has been saved in CNN_train_UCF101.py
    checkpoint = sorted(
        os.listdir('data/checkpoints/'))[-1]  # get the last checkpoint
    filename = os.path.join('data/checkpoints/', checkpoint)
    model = load_model(filename)

    # Get all our test images.
    images = glob.glob('./data/test/**/*.jpg')

    for _ in range(nb_images):
        print('-' * 80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
Beispiel #3
0
from UCFdata import DataSet
import numpy as np
import tensorflow as tf
from keras.utils import np_utils
import cv2 as cv

data = DataSet()

all_labels = range(101)


def read_video_data(cap):
    video_data = []
    for _ in range(100):
        ret, frame = cap.read()
        if ret:
            frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            video_data.append(frame)
        else:
            video_data.append(np.zeros(shape=(240, 320)))
    return video_data


all_labels = range(101)


def load_data_batch(data, all_labels, begin, batch_size):
    train_data_load = list()
    train_label_load = list()
    for i in range(begin, begin + batch_size):
        filename = 'data/' + data.data[i][0] + '/' + data.data[i][
extract all 101 classes. For instance, set class_limit = 8 to just
extract features for the first 8 (alphabetical) classes in the dataset.
Then set the same number when training models.
"""
import numpy as np
import os.path
from UCFdata import DataSet
from extractor import Extractor
from tqdm import tqdm

# Set defaults.
seq_length = 40  #You can modify it according to your own needs
class_limit = None  # Number of classes to extract. Can be 1-101 or None for all.

# Get the dataset.
data = DataSet(seq_length=seq_length, class_limit=class_limit)

# get the model.
model = Extractor()

# Loop through data.
pbar = tqdm(total=len(data.data))
for video in data.data:

    # Get the path to the sequence for this video.
    path = os.path.join('data', 'sequences', video[2] + '-' + str(seq_length) + \
        '-features')  # numpy will auto-append .npy

    # Check if we already have it.
    if os.path.isfile(path + '.npy'):
        pbar.update(1)
Beispiel #5
0
def train(data_type,
          seq_length,
          model,
          saved_model=None,
          class_limit=None,
          image_shape=None,
          load_to_memory=False,
          batch_size=32,
          nb_epoch=100):
    # Save the model.
    checkpointer = ModelCheckpoint(
        filepath=os.path.join('data', 'checkpoints', model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5'),
        verbose=1,
        save_best_only=True)

    # TensorBoard
    tb = TensorBoard(log_dir=os.path.join('data', 'logs', model))

    #Stop when we stop learning.
    early_stopper = EarlyStopping(patience=50)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', model + '-' + 'training-' + \
        str(timestamp) + '.log'))

    # Get the data and process it.
    if image_shape is None:  #LSTM and MLp
        data = DataSet(seq_length=seq_length, class_limit=class_limit)
    else:  #other models
        data = DataSet(seq_length=seq_length,
                       class_limit=class_limit,
                       image_shape=image_shape)

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    steps_per_epoch = (len(data.data) * 0.7) // batch_size

    if load_to_memory:
        # Get data.
        print("Get data  from sequences ")
        X, y = data.get_all_sequences_in_memory('train', data_type)
        X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
    else:
        # Get generators.
        print("Get data from generator")
        generator = data.frame_generator(batch_size, 'train', data_type)
        val_generator = data.frame_generator(batch_size, 'test', data_type)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        print("Use standard fit")
        rm.model.fit(X,
                     y,
                     batch_size=batch_size,
                     validation_data=(X_test, y_test),
                     verbose=1,
                     callbacks=[tb, early_stopper, csv_logger],
                     epochs=nb_epoch)
    else:
        # Use fit generator.
        print("Use fit generator")
        rm.model.fit_generator(
            generator=generator,
            steps_per_epoch=steps_per_epoch,
            epochs=nb_epoch,
            verbose=1,
            callbacks=[tb, early_stopper, csv_logger, checkpointer],
            validation_data=val_generator,
            validation_steps=40,
            workers=4)