Ejemplo n.º 1
0
def preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty):
    X, Y = zip(*dataset)

    X = np.array([string_to_int(i, Tx, human_vocab) for i in X])
    Y = [string_to_int(t, Ty, machine_vocab) for t in Y]

    Xoh = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), X)))
    Yoh = np.array(list(map(lambda x: to_categorical(x, num_classes=len(machine_vocab)), Y)))

    return X, np.array(Y), Xoh, Yoh
Ejemplo n.º 2
0
def to_test_train(avg_fname, all_frames, all_counts, train_ratio=0.8):
    print(all_frames.shape)
    assert len(all_frames) == len(
        all_counts), 'Frame length should equal counts length'

    nb_classes = all_counts.max() + 1
    X = all_frames

    mean = np.mean(X, axis=0)
    np.save(avg_fname, mean)

    ### Here is to perform random shuffle
    N = len(X)
    p = np.random.permutation(len(all_counts))
    p = p[0:N]
    Y = np_utils.to_categorical(all_counts, nb_classes)
    assert len(X) == len(
        Y), 'Len of X (%d) not equal to len of Y (%d)' % (len(X), len(Y))
    X, Y = X[p], Y[p]
    X -= mean

    ### Splitting the data
    def split(arr):
        ind = int(len(arr) * train_ratio)
        return arr[:ind], arr[ind:]

    X_train, X_test = split(X)
    Y_train, Y_test = split(Y)

    return X_train, X_test, Y_train, Y_test
Ejemplo n.º 3
0
def get_data_for_test(csv_fname,
                      video_fname,
                      avg_fname,
                      num_frames=None,
                      start_frame=0,
                      OBJECTS=['car'],
                      resol=(50, 50),
                      center=True,
                      dtype='float32'):
    # Get the data for avg frame
    avg_frame = np.load(avg_fname)
    all_counts = get_binary(csv_fname,
                            limit=num_frames,
                            OBJECTS=OBJECTS,
                            start=start_frame)
    all_frames = videoUtils.get_all_frames(len(all_counts),
                                           video_fname,
                                           scale=resol,
                                           start=start_frame)

    nb_classes = all_counts.max() + 1
    X = all_frames
    N = len(X)
    p = np.random.permutation(len(all_counts))
    p = p[0:N]
    Y = np_utils.to_categorical(all_counts, nb_classes)
    assert len(X) == len(
        Y), 'Len of X (%d) not equal to len of Y (%d)' % (len(X), len(Y))
    X, Y = X[p], Y[p]
    X -= avg_frame
    # Get all the frame and minus it by the avg frame
    nb_classes = all_counts.max() + 1

    return (X, Y), nb_classes
Ejemplo n.º 4
0
def keras_fmin_fnct(space):

    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test
Ejemplo n.º 5
0
    def fit(self,
            data_dir_path,
            model_dir_path,
            vgg16_include_top=True,
            data_set_name='UCF-101',
            test_size=0.3,
            random_state=42):
        self.vgg16_include_top = vgg16_include_top

        config_file_path = self.get_config_file_path(model_dir_path,
                                                     vgg16_include_top)
        weight_file_path = self.get_weight_file_path(model_dir_path,
                                                     vgg16_include_top)
        architecture_file_path = self.get_architecture_file_path(
            model_dir_path, vgg16_include_top)

        vgg16_model = VGG16(include_top=self.vgg16_include_top,
                            weights='imagenet')
        vgg16_model.compile(optimizer=SGD(),
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
        self.vgg16_model = vgg16_model

        feature_dir_name = data_set_name + '-VGG16-Features'
        if not vgg16_include_top:
            feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
        max_frames = 0
        self.labels = dict()
        x_samples, y_samples = scan_and_extract_vgg16_features(
            data_dir_path,
            output_dir_path=feature_dir_name,
            model=self.vgg16_model,
            data_set_name=data_set_name)
        self.num_input_tokens = x_samples[0].shape[1]
        frames_list = []
        for x in x_samples:
            frames = x.shape[0]
            frames_list.append(frames)
            max_frames = max(frames, max_frames)
            self.expected_frames = int(np.mean(frames_list))
        print('max frames: ', max_frames)
        print('expected frames: ', self.expected_frames)
        for i in range(len(x_samples)):
            x = x_samples[i]
            frames = x.shape[0]
            print(x.shape)
            if frames > self.expected_frames:
                x = x[0:self.expected_frames, :]
                x_samples[i] = x
            elif frames < self.expected_frames:
                temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
                temp[0:frames, :] = x
                x_samples[i] = temp
        for y in y_samples:
            if y not in self.labels:
                self.labels[y] = len(self.labels)
        print(self.labels)
        for i in range(len(y_samples)):
            y_samples[i] = self.labels[y_samples[i]]

        self.nb_classes = len(self.labels)

        y_samples = np_utils.to_categorical(y_samples, self.nb_classes)

        config = dict()
        config['labels'] = self.labels
        config['nb_classes'] = self.nb_classes
        config['num_input_tokens'] = self.num_input_tokens
        config['expected_frames'] = self.expected_frames
        config['vgg16_include_top'] = self.vgg16_include_top
        self.config = config

        np.save(config_file_path, config)

        model = self.create_model()
        open(architecture_file_path, 'w').write(model.to_json())

        Xtrain, Xtest, Ytrain, Ytest = train_test_split(
            x_samples,
            y_samples,
            test_size=test_size,
            random_state=random_state)

        train_gen = generate_batch(Xtrain, Ytrain)
        test_gen = generate_batch(Xtest, Ytest)

        train_num_batches = len(Xtrain) // BATCH_SIZE
        test_num_batches = len(Xtest) // BATCH_SIZE

        checkpoint = ModelCheckpoint(filepath=weight_file_path,
                                     save_best_only=True)
        history = model.fit_generator(generator=train_gen,
                                      steps_per_epoch=train_num_batches,
                                      epochs=NUM_EPOCHS,
                                      verbose=1,
                                      validation_data=test_gen,
                                      validation_steps=test_num_batches,
                                      callbacks=[checkpoint])
        model.save_weights(weight_file_path)

        return history
Ejemplo n.º 6
0
batch_size = 16
epochs = 10

# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = load_data()

x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

clf = Classifier(784, 10)  # 784 is the number of pixels in an image

clf.fit(x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        validation_data=(x_test, y_test))

score = clf.evaluate(x_test, y_test)

print(score)
Ejemplo n.º 7
0
def plot_attention_map(model, input_vocabulary, inv_output_vocabulary, text, n_s=128, num=6, Tx=30, Ty=10):
    """
    Plot the attention map.

    """
    attention_map = np.zeros((10, 30))
    Ty, Tx = attention_map.shape

    s0 = np.zeros((1, n_s))
    c0 = np.zeros((1, n_s))
    layer = model.layers[num]

    encoded = np.array(string_to_int(text, Tx, input_vocabulary)).reshape((1, 30))
    encoded = np.array(list(map(lambda x: to_categorical(x, num_classes=len(input_vocabulary)), encoded)))

    f = K.function(model.inputs, [layer.get_output_at(t) for t in range(Ty)])
    r = f([encoded, s0, c0])

    for t in range(Ty):
        for t_prime in range(Tx):
            attention_map[t][t_prime] = r[t][0, t_prime, 0]

    # Normalize attention map
    #     row_max = attention_map.max(axis=1)
    #     attention_map = attention_map / row_max[:, None]

    prediction = model.predict([encoded, s0, c0])

    predicted_text = []
    for i in range(len(prediction)):
        predicted_text.append(int(np.argmax(prediction[i], axis=1)))

    predicted_text = list(predicted_text)
    predicted_text = int_to_string(predicted_text, inv_output_vocabulary)
    text_ = list(text)

    # get the lengths of the string
    input_length = len(text)
    output_length = Ty

    # Plot the attention_map
    plt.clf()
    f = plt.figure(figsize=(8, 8.5))
    ax = f.add_subplot(1, 1, 1)

    # add image
    i = ax.imshow(attention_map, interpolation='nearest', cmap='Blues')

    # add colorbar
    cbaxes = f.add_axes([0.2, 0, 0.6, 0.03])
    cbar = f.colorbar(i, cax=cbaxes, orientation='horizontal')
    cbar.ax.set_xlabel('Alpha value (Probability output of the "softmax")', labelpad=2)

    # add labels
    ax.set_yticks(range(output_length))
    ax.set_yticklabels(predicted_text[:output_length])

    ax.set_xticks(range(input_length))
    ax.set_xticklabels(text_[:input_length], rotation=45)

    ax.set_xlabel('Input Sequence')
    ax.set_ylabel('Output Sequence')

    # add grid and legend
    ax.grid()

    # f.show()

    return attention_map
Ejemplo n.º 8
0
+nb_classes = 10
+# Количество эпох для обучения
+nb_epoch = 25
+# Размер изображений
+img_rows, img_cols = 32, 32
+# Количество каналов в изображении: RGB
+img_channels = 3
+
+# Нормализуем данные
+X_train = X_train.astype('float32')
+X_test = X_test.astype('float32')
+X_train /= 255
+X_test /= 255
+
+# Преобразуем метки в категории
+Y_train = np_utils.to_categorical(y_train, nb_classes)
+Y_test = np_utils.to_categorical(y_test, nb_classes)
+
+# Создаем последовательную модель
+model = Sequential()
+# Первый сверточный слой
+model.add(Conv2D(32, (3, 3), padding='same',
+                        input_shape=(32, 32, 3), activation='relu'))
+# Второй сверточный слой
+model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
+# Первый слой подвыборки
+model.add(MaxPooling2D(pool_size=(2, 2)))
+# Слой регуляризации Dropout
+model.add(Dropout(0.25))
+
+# Третий сверточный слой
data():
'''
Data providing function:

This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)


def keras_fmin_fnct(space):

    '''
    Data providing function:

    This function is separated from model() so that hyperopt
    won't reload data for each evaluation run.
    '''
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')