示例#1
0
def main():
    cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT_PATH,
                        save_best_only=True, save_weights_only=False, verbose=1)
    history_checkpoint = LossHistory()
    labels = decompress_pickle('labels.pickle.pbz2')
    partition = decompress_pickle('partition.pickle.pbz2')
    training_generator = OurGenerator(partition['train'], labels)
    validation_generator = OurGenerator(partition['val'], labels)
    model = get_model()
    history = model.fit_generator(generator=training_generator,
                    validation_data=validation_generator,
                    epochs=10,
                    callbacks=[cp_callback, history_checkpoint])
    model.save('models/ours_3_10epochs_model')
    compressed_pickle('history/ours_3_10epochs.pickle', history.history)
def setupInput():
    class_to_int = decompress_pickle('class_to_int.pickle.pbz2')
    labels = {}
    ids = []
    counter = 0
    for class_dir in os.listdir(FRAME_DATA_PATH):
        class_dir_path = os.path.join(FRAME_DATA_PATH, class_dir)
        if not os.path.isdir(class_dir_path):
            continue
        label = class_to_int[class_dir]
        all_videos = os.listdir(class_dir_path)
        num_to_choose = VIDEOS_PER_CLASS
        if len(all_videos) < VIDEOS_PER_CLASS:
            num_to_choose = len(all_videos)
        chosen_videos = random.choices(all_videos, k = num_to_choose)
        remaining_videos = list(set(all_videos) - set(chosen_videos))
        for video_dir in chosen_videos:
            video_dir_path = os.path.join(class_dir_path, video_dir)
            if not os.path.isdir(video_dir_path):
                continue
            frames = []
            all_frames =  os.listdir(video_dir_path)
            num_frames_choose = FRAMES_PER_VIDEO
            if len(all_frames) < FRAMES_PER_VIDEO:
                num_frames_choose = len(all_frames)
            chosen_frames_indices = np.linspace(0, len(all_frames) - 1, num = num_frames_choose)
            for frame_index in chosen_frames_indices:
                frame_index = math.floor(frame_index)
                frame_file = all_frames[frame_index]
                frame_path = os.path.join(video_dir_path, frame_file)
                if not os.path.isfile(frame_path):
                    continue
                image = Image.open(frame_path)
                image = normalize(image)
                frames.append(image)

            id = 'id-' + str(counter)
            ids.append(id)
            compressed_pickle('new_inputs/' + id + '.pickle', frames)
            counter += 1
            labels[id] = label

        print('saved inputs for ' + class_dir)
        compressed_pickle('new_remaining/' + class_dir + '.pickle', remaining_videos)


    training_ids = random.choices(ids, k = int(np.floor(len(ids) * 2 / 3)))
    rest = list(set(ids) - set(training_ids))
    val_ids = random.choices(rest, k = int(np.floor(len(rest) * 2 / 3)))
    test_ids = list(set(rest) - set(val_ids))
    partition =  {'train': training_ids, 'val': val_ids, 'test': test_ids}
    compressed_pickle('partition.pickle', partition)
    compressed_pickle('labels.pickle', labels)
    def __data_generation(self, list_IDs_temp):
        # 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.empty((self.batch_size, *self.dim, self.n_channels))
        y = np.empty((self.batch_size), dtype=int)

        if self.use_pretrained:
            X = np.empty((self.batch_size, 800, 2048))
            y = np.empty((self.batch_size), dtype=int)

            for i, ID in enumerate(list_IDs_temp):
                # Store sample
                x = np.array(
                    decompress_pickle('new_inputs/' + ID + '.pickle.pbz2'))
                x = keras.applications.resnet.preprocess_input(x)
                x = self.model.predict(x)
                x = x.reshape(-1, 2048)
                X[i, ] = x

                # Store class
                y[i] = self.labels[ID]

            return X, keras.utils.to_categorical(y, num_classes=self.n_classes)

        else:
            X = np.empty((self.batch_size, *self.dim, self.n_channels))
            y = np.empty((self.batch_size), dtype=int)

            # Generate data
            for i, ID in enumerate(list_IDs_temp):
                # Store sample
                X[i, ] = np.array(
                    decompress_pickle('new_inputs/' + ID + '.pickle.pbz2'))

                # Store class
                y[i] = self.labels[ID]

            return X, keras.utils.to_categorical(y, num_classes=self.n_classes)
示例#4
0
from utils import decompress_pickle, get_correct_classification_rate, get_correction_worked_rate
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np

def pltcolor(lst):
    paleta = list(mcolors.TABLEAU_COLORS.items())
    unicos = np.unique(lst)
    paleta = {str(unicos[i]): paleta[i][0] for i in range(len(unicos))}
    return [paleta[str(i)] for i in lst]

reports = decompress_pickle("reports.pbz2")

reports_keys = reports.keys()
reports_keys = list(reports_keys)
metrics_keys = reports[reports_keys[0]].keys()
metrics_keys = list(metrics_keys)


p_error = []
detection_ratio = []
len_hamming_message = []
correction_worked = []

for _, report in reports.items():
    p_error.append(report["p_error"][0])
    detection_ratio.append(get_correct_classification_rate(report))
    len_hamming_message.append(report["len_hamming_message"][0])
    correction_worked.append(get_correction_worked_rate(report))

import keras
from utils import compressed_pickle, decompress_pickle, plotLearningCurve
from ourGenerator import OurGenerator
from keras.applications.resnet50 import ResNet50
import numpy as np

model = keras.models.load_model('models/ours_7_10epochs_model', compile=True)

labels = decompress_pickle('old/labels.pickle.pbz2')
partition = decompress_pickle('old/partition.pickle.pbz2')

resnet = ResNet50(include_top=False,
                  weights="imagenet",
                  input_tensor=None,
                  input_shape=(240, 320, 3),
                  pooling=None)

correct_count = 0
incorrect_count = 0

for ID in partition['test']:
    x = np.array(decompress_pickle('old/new_inputs/' + ID + '.pickle.pbz2'))
    x = keras.applications.resnet.preprocess_input(x)
    x = resnet.predict(x)
    x = x.reshape(32, -1, 2048)
    prediction = model.predict(x)[0]
    prediction = np.argmax(prediction)
    label = labels[ID]
    if label == prediction:
        correct_count += 1
        print('correct')
示例#6
0
    probs = 1 / np.logspace(0, 4, 10)
    lengths = np.logspace(1, 5, 10, dtype=int)
    experiments = []
    for n in tqdm(lengths, ncols=100):
        for p in probs:
            current_experiment = Experiment(p, n, n_trials)
            current_experiment.make_report()
            experiments.append(current_experiment)

    reports = {experiment.__repr__(): experiment.report for experiment in experiments}
    compressed_pickle(file, reports)
    open_reports = False
    del experiments

if open_reports:  # si la variable "reports" no está asignada
    reports = decompress_pickle(file)

percentage_more_than_one = []

for name, report in reports.items():
    plot_confussion_matrix(report, title=name, save_as=f'cms/{name}')
    plt.close("all")

error_percentage = []
for _, report in reports.items():
    error_percentage.append(percentages_erros(report))
error_percentage = dictionaries_to_lists(error_percentage)
error_percentage['theoretical'] = np.array([i[0] for i in error_percentage['theoretical']])
error_percentage['empirical'] = np.array(error_percentage['empirical'])
plt.plot(error_percentage['theoretical'], error_percentage['empirical'], '+')
plt.xlabel("porcentaje con más de un error teórico")