def class_to_int():
    class_to_int = {}
    class_count = 0
    for class_dir in os.listdir(FRAME_DATA_PATH):
        class_dir_path = os.path.join(FRAME_DATA_PATH, class_dir)
        if not os.path.isdir(class_dir_path):
            continue
        class_to_int[class_dir] = class_count
        class_count += 1
    compressed_pickle('class_to_int.pickle', class_to_int)
Exemplo n.º 2
0
def main():
    cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT_PATH,
                        save_best_only=True, save_weights_only=False, verbose=1)
    history_checkpoint = LossHistory()
    labels = decompress_pickle('labels.pickle.pbz2')
    partition = decompress_pickle('partition.pickle.pbz2')
    training_generator = OurGenerator(partition['train'], labels)
    validation_generator = OurGenerator(partition['val'], labels)
    model = get_model()
    history = model.fit_generator(generator=training_generator,
                    validation_data=validation_generator,
                    epochs=10,
                    callbacks=[cp_callback, history_checkpoint])
    model.save('models/ours_3_10epochs_model')
    compressed_pickle('history/ours_3_10epochs.pickle', history.history)
def setupInput():
    class_to_int = decompress_pickle('class_to_int.pickle.pbz2')
    labels = {}
    ids = []
    counter = 0
    for class_dir in os.listdir(FRAME_DATA_PATH):
        class_dir_path = os.path.join(FRAME_DATA_PATH, class_dir)
        if not os.path.isdir(class_dir_path):
            continue
        label = class_to_int[class_dir]
        all_videos = os.listdir(class_dir_path)
        num_to_choose = VIDEOS_PER_CLASS
        if len(all_videos) < VIDEOS_PER_CLASS:
            num_to_choose = len(all_videos)
        chosen_videos = random.choices(all_videos, k = num_to_choose)
        remaining_videos = list(set(all_videos) - set(chosen_videos))
        for video_dir in chosen_videos:
            video_dir_path = os.path.join(class_dir_path, video_dir)
            if not os.path.isdir(video_dir_path):
                continue
            frames = []
            all_frames =  os.listdir(video_dir_path)
            num_frames_choose = FRAMES_PER_VIDEO
            if len(all_frames) < FRAMES_PER_VIDEO:
                num_frames_choose = len(all_frames)
            chosen_frames_indices = np.linspace(0, len(all_frames) - 1, num = num_frames_choose)
            for frame_index in chosen_frames_indices:
                frame_index = math.floor(frame_index)
                frame_file = all_frames[frame_index]
                frame_path = os.path.join(video_dir_path, frame_file)
                if not os.path.isfile(frame_path):
                    continue
                image = Image.open(frame_path)
                image = normalize(image)
                frames.append(image)

            id = 'id-' + str(counter)
            ids.append(id)
            compressed_pickle('new_inputs/' + id + '.pickle', frames)
            counter += 1
            labels[id] = label

        print('saved inputs for ' + class_dir)
        compressed_pickle('new_remaining/' + class_dir + '.pickle', remaining_videos)


    training_ids = random.choices(ids, k = int(np.floor(len(ids) * 2 / 3)))
    rest = list(set(ids) - set(training_ids))
    val_ids = random.choices(rest, k = int(np.floor(len(rest) * 2 / 3)))
    test_ids = list(set(rest) - set(val_ids))
    partition =  {'train': training_ids, 'val': val_ids, 'test': test_ids}
    compressed_pickle('partition.pickle', partition)
    compressed_pickle('labels.pickle', labels)
Exemplo n.º 4
0
file = 'reports.pbz2'
open_reports = True
n_trials = 1000

if not os.path.isfile(file):  # si no están los experimentos guardados
    probs = 1 / np.logspace(0, 4, 10)
    lengths = np.logspace(1, 5, 10, dtype=int)
    experiments = []
    for n in tqdm(lengths, ncols=100):
        for p in probs:
            current_experiment = Experiment(p, n, n_trials)
            current_experiment.make_report()
            experiments.append(current_experiment)

    reports = {experiment.__repr__(): experiment.report for experiment in experiments}
    compressed_pickle(file, reports)
    open_reports = False
    del experiments

if open_reports:  # si la variable "reports" no está asignada
    reports = decompress_pickle(file)

percentage_more_than_one = []

for name, report in reports.items():
    plot_confussion_matrix(report, title=name, save_as=f'cms/{name}')
    plt.close("all")

error_percentage = []
for _, report in reports.items():
    error_percentage.append(percentages_erros(report))