def load_dataset_tensors():
    """ 
    Retonar os tensors para serem treinados com o Keras
    """
    print("Carregando os dados...")
    train_files, targets_train = get_dataset("train")
    valid_files, targets_valid = get_dataset("val")

    # pre-process the data for Keras
    print("Convertendo para tensor...")
    train_tensors = paths_to_tensor(train_files)
    valid_tensors = paths_to_tensor(valid_files)

    return train_tensors, targets_train, valid_tensors, targets_valid
def load_train_tensors():
    """ 
    Retonar os tensors para serem treinados com o Keras (Só treinamento)
    """
    print("Carregando os dados...")
    train_files, targets_train = get_dataset("train", True)

    # pre-process the data for Keras
    print("Convertendo para tensor...")
    train_tensors = paths_to_tensor(train_files)

    return train_tensors, targets_train
def load_dataset_tensors():
    """
    Carrega as imagens e converte para tenors para treinar a deep
    """
    files1 = util.get_all_files_names("output/Dia/")
    files2 = util.get_all_files_names("output/Noite/")
    files3 = util.get_all_files_names("output/Trasnsicao/")

    files_names = files1 + files2 + files3
    labels = [0] * len(files1) + [1] * len(files2) + [2] * len(files3)

    files_train, files_val, targets_train, targets_valid = train_test_split(
        files_names, labels, test_size=.2)

    train_tensors = util.paths_to_tensor(files_train)
    train_tensors = preprocess_input(train_tensors)

    valid_tensors = util.paths_to_tensor(files_val)
    valid_tensors = preprocess_input(valid_tensors)

    targets_train = np_utils.to_categorical(util.np.array(targets_train), 3)
    targets_valid = np_utils.to_categorical(util.np.array(targets_valid), 3)

    return train_tensors, targets_train, valid_tensors, targets_valid
def load_test_tensors(val=False):
    """ 
    Retonar os tensors de test
    """

    print("Carregando os dados...")
    if val:
        test_files, targets_test = get_dataset("val", True)
    else:
        test_files, targets_test = get_dataset("test", True)

    # pre-process the data for Keras
    print("Convertendo para tensor...")
    test_tensors = paths_to_tensor(test_files)

    return test_tensors, targets_test
示例#5
0
def load_dataset_tensors(nexet=False):
    """
    Carrega as imagens e converte para tenors para treinar a deep
    """
    if nexet:
        files_names = pdn.get_all_files_nexet()[0:10000]
    else:
        files1 = util.get_all_files_names("output/Dia/")
        files2 = util.get_all_files_names("output/Noite/")
        files3 = util.get_all_files_names("output/Trasnsicao/")

        files_names = files1 + files2 + files3

    x = util.paths_to_tensor(files_names)
    x = pi(x)

    return x
示例#6
0
def get_CNN_features(files_names=None):
    """
    Extrai features com CNNs pre-treinadas e os retorna
    """
    model = ResNet50(include_top=False, weights='imagenet')
    # model = InceptionV3(include_top=False, weights='imagenet')
    # model = Xception(include_top=False, weights='imagenet')

    if files_names is None:
        files1 = util.get_all_files_names("output/Dia/")
        files2 = util.get_all_files_names("output/Noite/")
        files3 = util.get_all_files_names("output/Trasnsicao/")

        files_names = files1 + files2 + files3

    tensors = util.paths_to_tensor(files_names)
    tensors = preprocess_input(tensors)
    pred = model.predict(tensors).reshape(len(files_names), 2048)

    return pred
    for i_file in [0]:
        files = [train_files, valid_files][i_file]
        target = [train_targets, valid_targets][i_file]

        print("Incializando o pre data Augmentation...")
        datagen_train = ImageDataGenerator(
            rotation_range=40,
            width_shift_range=0.2,
            height_shift_range=0.2,
            # rescale=1./255,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            fill_mode='nearest')
        tensors = paths_to_tensor(files)
        datagen_train.fit(tensors)

        j = 0
        for X_batch, y_batch in datagen_train.flow(
                tensors, target, batch_size=n_examples[i_file]):
            print("Gerando batch %d" % j)

            count = int(np.ceil(len(X_batch) / block_size))
            print(len(X_batch), count)
            # for i in range(count):
            #    print("{0} lotte: {1}/{2}".format(files_names[i_file][0], i, count))
            #    begin = i*block_size
            #    end = begin + block_size
            #    if end > len(X_batch):
            #        end = len(X_batch)
示例#8
0
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from PIL import ImageFile
from joblib import Parallel, delayed
from util import paths_to_tensor

import numpy as np
import multiprocessing

if __name__ == "__main__":
    print("Carregando os dados...")
    valid_files, valid_targets = get_dataset("val")

    # pre-process the data for Keras
    print("Convertendo para tensor...")
    valid_tensors = paths_to_tensor(valid_files).astype('float32') / 255.0

    model = Sequential()
    model.add(
        ResNet50(weights='imagenet',
                 include_top=False,
                 input_shape=(224, 224, 3)))
    model.add(Flatten(name='flatten'))
    model.add(Dense(83, activation='softmax'))

    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    ### Load the model weights with the best validation loss.
    model.load_weights('saved_models/weights.best.ResNet50.hdf5')
    train_files, train_targets = get_dataset("train")
    valid_files, valid_targets = get_dataset("val")

    files_names = ["dataset_train3.csv", "dataset_val3.csv"]
    block_size = 500

    # 0 - train, 1 - validação
    for i_file in [0, 1]:

        files = [train_files, valid_files][i_file]
        count = int(np.ceil(len(files) / block_size))

        for i in range(count):
            print("{0} lotte: {1}/{2}".format(files_names[i_file], i, count))

            begin = i * block_size
            end = begin + block_size
            if end > len(files):
                end = len(files)

            # Processa um lote de imagens paralelamente
            tensors = paths_to_tensor(files[begin:end])
            tensors = preprocess_input(tensors)
            pred = model.predict(tensors).reshape(end - begin, 2048)

            # Adiciona essas imagens ao csv
            df(pred).to_csv(path + files_names[i_file],
                            mode='a',
                            header=(i == 10),
                            index=False)