Beispiel #1
0
    def test_read_from_file(self):
        """ Read from a text and json files """
        data = run.read_from_file("test_users.txt")
        self.assertIn("test_user1", data)
        self.assertIn("test_user2", data)
        self.assertIn("test_user3", data)
        self.assertNotIn("test_user4", data)
        print("test_read_from_file Read from TEXT file -- PASS")

        data = run.read_from_file("test_json.json")
        self.assertIn("ObiWanKenobi.png", data)
        self.assertIn("answer", data)
        self.assertIn("ahead of the curve", data)
        self.assertNotIn("bulldozer", data)
        print("test_read_from_file Read from JSON file -- PASS")

        data_json = json.loads(data)
        for each in data_json:
            self.assertIn(
                each["source"],
                "AheadOfTheCurve.png ObiWanKenobi.png DrawingTheShortStraw.png"
            )
            self.assertIn(
                each["answer"],
                "Obi Wan Kenobi ahead of the curve drawing the short straw")
            self.assertIn(str(each["id"]), "1 2 3")
            self.assertNotIn(each["source"],
                             "BowTie.png bulldozer.png CoverLetter.png")
            self.assertNotIn(each["answer"], "biw tie bulldozer coverletter")
            self.assertNotIn(str(each["id"]), "4 5 6")
        print("test_read_from_file Check contents of JSON file -- PASS")
Beispiel #2
0
def analyse_preprocessing():
    print(
        "-----------------STATISTICS FOR THE PREPROCESSED VIDEOS-------------")

    stats = read_from_file("E:\preprocessing_stats")

    paths = [stat[0] for stat in stats]
    time = [stat[1] for stat in stats]
    err_rates = [stat[2] for stat in stats]

    print("NO OF VIDEOS PROCESSED : {}".format(len(paths)))

    avg_time = sum(time) / len(time)
    print("AVG TIME : {}s".format(round(avg_time, 2)))

    avg_err_rate = sum(err_rates) / len(err_rates) * 100
    print("AVG ERR RATE : {}%".format(round(avg_err_rate, 2)))

    # Visualising the data
    plt.subplot(1, 2, 1)
    plt.hist(err_rates)
    plt.xlabel("err rate")
    plt.ylabel("Freq")
    plt.title("Err Rates Across The Dataset")

    plt.subplot(1, 2, 2)
    plt.scatter(time, err_rates)
    plt.xlabel("time")
    plt.ylabel("error rate")
    plt.title("Err Rate Vs Avg Time")

    plt.show()
Beispiel #3
0
def analyse_preprocessed_data():
    print("-----------------VISUALISING THE PREPROCESSED VIDEOS-------------")

    data = []

    if (os.path.exists("E:\signal_data")):
        data = read_from_file("E:\signal_data")

    real_data = []
    real_count = 0

    fake_data = []
    fake_count = 0

    for ROIs, faces, features, label in data:
        if (label == 0 and real_count < 5):  # e.g real
            real_data.append((ROIs[0], faces[0]))
            real_count += 1

        if (label == 1 and fake_count < 5):  # e.g. fake
            fake_data.append((ROIs[0], faces[0]))
            fake_count += 1

    display_faces_roi(real_data)
    display_faces_roi(fake_data)
Beispiel #4
0
def analyse_signal_data():
    data = []

    if (os.path.exists("E:\signal_data")):
        data = read_from_file("E:\signal_data")

    chroms = [len(chroms) for (ROIs, faces, chroms, label) in data]
    chroms_df = pd.DataFrame(chroms)
    print("---------CHROMS----------")
    print(chroms_df.describe())
Beispiel #5
0
 def test_read_from_file(self):
     ''' Test the read_from_file function.'''
     info = "Writing a test file."
     with open("data/test.txt", "w") as outfile:
         outfile.write(info)
     self.assertTrue(os.path.exists("data/test.txt"))
     text = run.read_from_file("test.txt")
     os.remove("data/test.txt")
     self.assertFalse(os.path.exists("data/test.txt"))
     self.assertEqual(text, info)
     print("test_read_from_file -- PASS")
Beispiel #6
0
def analyse_videos():
    print(
        "-----------------STATISTICS FOR THE UNPROCESSED VIDEOS-------------")

    vid_stats = []

    if (os.path.exists("video_stats")):
        vid_stats = read_from_file("video_stats")

    analyse_attribute(vid_stats, "framerate")
    analyse_attribute(vid_stats, "no_frames")
    analyse_attribute(vid_stats, "width")
    analyse_attribute(vid_stats, "height")
Beispiel #7
0
def visualise_signal_data():
    data = read_from_file("E:\signal_data")
    data = slice_data(data)

    pulse_signals = []
    labels = []

    for (ROIS, faces, features, label) in data:
        pulse_signals.append(features)
        labels.append(label)

    for pulse_signal, label in zip(pulse_signals, labels):
        if label == 1:
            plt.plot(pulse_signal)
            plt.xlabel("time")
            plt.ylabel("pulse signal")
            plt.show()
            break
Beispiel #8
0
                  optimizer=opt,
                  metrics=['accuracy'])

    es = EarlyStopping(monitor='loss', patience=100)

    model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              epochs=10000,
              shuffle=True,
              callbacks=[es],
              batch_size=150)

    evaluate_model(model, X_test, y_test)
    save_keras_model_to_file(model, "wavenet.h5")
    return model


if __name__ == "__main__":
    data = read_from_file("E:\signal_data")
    data = slice_data(data)
    X_train, y_train, X_val, y_val, X_test, y_test = retrieve_X_and_y(data)

    # run_SVM(X_train, y_train, X_test, y_test)
    # run_CNN(X_train, y_train, X_val, y_val, X_test, y_test)
    # run_AlexNet(X_train, y_train, X_val, y_val, X_test, y_test)
    # run_CNN_RNN(X_train, y_train, X_val, y_val, X_test, y_test)
    # run_WaveNet(X_train, y_train, X_val, y_val, X_test, y_test)
    # run_RNN(X_train, y_train, X_val, y_val, X_test, y_test)
    # run_deep_WaveNet(X_train, y_train, X_val, y_val, X_test, y_test)
    model = run_best_WaveNet(X_train, y_train, X_val, y_val, X_test, y_test)