Esempio n. 1
0
 def get_data(self, audio_file, transcripts_file):
     encoder_input_data = load_pickle_data(audio_file)
     (decoder_input_data,
      decoder_target_data) = load_pickle_data(transcripts_file)
     data = self._generate_timestep_dict(encoder_input_data,
                                         decoder_input_data,
                                         decoder_target_data)
     return data
Esempio n. 2
0
def plot_train_loss_acc(model_hist_path, word_level):
    # Load history
    train_hist = load_pickle_data(model_hist_path)
    if word_level:
        title = "mots"
    else:
        title = "caractères"

    # Prepare paths
    accuracy_plot_path = model_hist_path.split(".pkl")[0] + "acc.png"
    loss_plot_path = model_hist_path.split(".pkl")[0] + "loss.png"

    # Plot and save accuracy
    plt.plot(train_hist["accuracy"])
    plt.title("Évolution de l'exactitude pour la reconnaissance basée " +
              title)
    plt.xlabel("Epochs")
    plt.ylabel("Accuracy")
    plt.legend(['train accuracy'], loc='upper left')
    plt.savefig(accuracy_plot_path)
    # plt.show()
    plt.clf()

    # Plot and save loss
    plt.plot(train_hist["loss"], "r")
    plt.title(
        "Évolution de la fonction d'erreur pour la reconnaissance basée " +
        title)
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend(['train loss'], loc='upper left')
    plt.savefig(loss_plot_path)
    # plt.show()
    plt.clf()
Esempio n. 3
0
    def on_epoch_end(self, epoch, logs=None):
        # Saving training history
        # Check if directory exists
        directory_path = settings.TRAINED_MODELS_PATH + self.model_name
        if not file_exists(directory_path):
            create_dir(directory_path)

        # Word level history
        if self.word_level:
            hist_path = settings.TRAINED_MODELS_PATH + self.model_name + "/" + self.model_name + "word.pkl"
            average_accuracy = 0
            if file_exists(hist_path):
                acc_loss_history = load_pickle_data(hist_path)
            else:
                acc_loss_history = dict()
                acc_loss_history["accuracy"] = []
                acc_loss_history["loss"] = []

                # Average accuracy
            for i in range(0, 6):
                accuracy = "decoder_dense" + str(i) + "_acc"
                average_accuracy += logs[accuracy]

            average_accuracy = float(average_accuracy) / float(6)

            acc_loss_history["accuracy"].append(average_accuracy)
            acc_loss_history["loss"].append(logs["loss"])

        # Character level history
        else:
            hist_path = settings.TRAINED_MODELS_PATH + self.model_name + "/" + self.model_name + "char.pkl"
            if file_exists(hist_path):
                acc_loss_history = load_pickle_data(hist_path)
            else:
                acc_loss_history = dict()
                acc_loss_history["accuracy"] = []
                acc_loss_history["loss"] = []

            acc_loss_history["accuracy"].append(logs["acc"])
            acc_loss_history["loss"].append(logs["loss"])

        generate_pickle_file(acc_loss_history, hist_path)
        plot_train_loss_acc(hist_path, self.word_level)

        self.model.save(self.model_path)
Esempio n. 4
0
def _get_train_test_data(train_ratio=0.8, padding=False):
    """
    Splits dataset into train and test according to a ratio
    :param train_ratio: float
    :param padding: Boolean
    :return: List of InputAudio, List of InputAudio
    """
    if padding is False:
        data = load_pickle_data(PICKLE_PAD_FILE_PATH)
    else:
        data = load_pickle_data(PICKLE_PAD_FILE_PATH)

    train_length = int(len(data) * train_ratio)
    train_data = []
    test_data = []
    for i, audio_sample in enumerate(data):
        if i <= train_length:
            train_data.append(audio_sample)
        else:
            test_data.append(audio_sample)

    return train_data, test_data
Esempio n. 5
0
    def __init__(self, model_path, latent_dim):
        self.model = models.load_model(model_path)
        self.encoder_states = None
        self.latent_dim = latent_dim

        # Getting dataset and training information
        general_info = load_pickle_data(
            settings.DATASET_WORD_INFERENCE_INFORMATION_PATH)
        settings.MFCC_FEATURES_LENGTH = general_info[0]
        settings.TOTAL_SAMPLES_NUMBER = general_info[1]
        settings.WORD_SET = general_info[2]
        settings.LONGEST_WORD_LENGTH = general_info[3]
        settings.CHARACTER_SET = general_info[4]
        settings.WORD_TARGET_LENGTH = general_info[5]

        self.encoder_model = None
        self.decoder_model = None
        self.get_encoder_decoder_baseline()
Esempio n. 6
0
def _get_train_test_data_partition(dataset_path, train_ratio=0.8):
    """
    Splits dataset into train and test according to a ratio
    :param train_ratio: float
    :param padding: Boolean
    :return: List of InputAudio, List of InputAudio
    """

    data = load_pickle_data(dataset_path)
    train_length = int(len(data) * train_ratio)
    train_data = []
    test_data = []
    for i, audio_sample in enumerate(data):
        if i <= train_length:
            train_data.append(audio_sample)
        else:
            test_data.append(audio_sample)

    return train_data, test_data
def bench(size, bench_size, filename):
    if os.path.isfile(filename):
        open("../benchmarks/pid", "w+").write(str(os.getpid()))
    else:
        con, meta = connect('ganga', 'ganga', 'jobs')
        JOBS, BLOBS = create_tables(con, meta)

        jobs, blobs = utils.load_pickle_data(size=size)
        add_jobs_batch(con, JOBS=JOBS, jobs=jobs, batch_size=batch_size)

        add_blobs_batch(con, BLOBS=BLOBS, blobs=blobs, batch_size=batch_size)

        query_jobs_all(con, "jobs")
        query_blobs_all(con, "blobs")

        if not os.path.isdir("../benchmarks"):
            os.makedirs("../benchmarks")

        json.dump(utils.TIMES, open(filename, "w+"))
        open("../benchmarks/pid", "w+").write(str(os.getpid()))
Esempio n. 8
0
def bench(size, batch_size, filename):
    if os.path.isfile(filename):
        open("../benchmarks/pid", "w+").write(str(os.getpid()))
    else:
        jobs, blobs = utils.load_pickle_data(size=size)
        print("starting to conniet")
        session, _ = connect_cassandra()
        session = create_tables_cassandra(session)

        add_jobs_cassandra(session, jobs=jobs, batch_size=batch_size)
        add_blobs_cassandra(session, blobs=blobs, batch_size=batch_size)

        # query_jobs_all_cassandra(session, "JOB")
        # query_blobs_all_cassandra(session, "BLOB")

        if not os.path.isdir("../benchmarks"):
            os.makedirs("../benchmarks")

        json.dump(utils.TIMES, open(filename, "w+"))
        open("../benchmarks/pid", "w+").write(str(os.getpid()))
Esempio n. 9
0
    def __init__(self, word_level, architecture, latent_dim):

        model_name = "architecture" + str(architecture)
        if word_level:
            model_path = settings.TRAINED_MODELS_PATH + model_name + "/" + model_name + "word.h5"
        else:
            model_path = settings.TRAINED_MODELS_PATH + model_name + "/" + model_name + "char.h5"

        print(model_path)

        self.model = models.load_model(model_path)
        self.encoder_states = None
        self.latent_dim = latent_dim
        general_info = load_pickle_data(settings.DATASET_CHAR_INFERENCE_INFORMATION_PATH)
        settings.MFCC_FEATURES_LENGTH = general_info[0]
        settings.CHARACTER_SET = general_info[2]
        self.encoder_model = None
        self.decoder_model = None


        if architecture == 6:
            self._get_encoder_decoder_model_baseline()
        else:
            self._get_encoder_decoder_model_cnn()
Esempio n. 10
0
def upload_dataset_partition(train_ratio=0.95, word_level=False, partitions=8):
    """
    Generate :
    train ==> encoder inputs, decoder inputs, decoder target
    test ==>  encoder inputs, decoder inputs, decoder target
    :return: Tuple, Tuple
    """
    print("PREPARING PARTITIONED DATASET")
    if empty_directory(
            settings.AUDIO_CHAR_SPLIT_TRAIN_PATH) or empty_directory(
                settings.AUDIO_WORD_SPLIT_TRAIN_PATH):

        get_dataset_information(word_level, train_ratio=train_ratio)

        list_datasets = get_files(settings.PICKLE_PARTITIONS_PATH)

        for dataset_number, dataset_file in enumerate(list_datasets):

            # Generate directories
            if word_level:
                path = settings.AUDIO_WORD_SPLIT_TRAIN_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.AUDIO_WORD_SPLIT_TEST_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.TRANSCRIPTS_ENCODING_WORD_SPLIT_TRAIN_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.TRANSCRIPTS_ENCODING_WORD_SPLIT_TEST_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)

            else:
                path = settings.AUDIO_CHAR_SPLIT_TRAIN_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.AUDIO_CHAR_SPLIT_TEST_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.TRANSCRIPTS_ENCODING_CHAR_SPLIT_TRAIN_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)
                path = settings.TRANSCRIPTS_ENCODING_CHAR_SPLIT_TEST_PATH + "dataset" + str(
                    dataset_number) + "/"
                if not file_exists(path):
                    os.mkdir(path)

            # Upload train and test data, the train ration is 0.8 and can be modified through ration param
            train_data, test_data = _get_train_test_data_partition(
                dataset_path=dataset_file, train_ratio=train_ratio)

            if word_level:
                train_audio, train_transcripts = _get_audio_transcripts_word_level(
                    train_data)
                # train_audio, train_transcripts = print_suspicious_characters(train_data)
                test_audio, test_transcripts = _get_audio_transcripts_word_level(
                    test_data)

            else:
                train_audio, train_transcripts = _get_audio_transcripts_character_level(
                    train_data)
                # train_audio, train_transcripts = print_suspicious_characters(train_data)
                test_audio, test_transcripts = _get_audio_transcripts_character_level(
                    test_data)

            _generate_spllited_encoder_input_data_partition(
                train_audio,
                word_level=word_level,
                dataset_number=dataset_number,
                partitions=partitions)
            # train_encoder_input = _get_encoder_input_data(audio_data=train_audio)
            _generate_spllited_encoder_input_data_partition(
                test_audio,
                word_level=word_level,
                dataset_number=dataset_number,
                test=True,
                partitions=partitions)

            generate_decoder_input_target(transcripts=train_transcripts,
                                          word_level=word_level,
                                          partitions=partitions,
                                          dataset_number=dataset_number)

            generate_decoder_input_target(transcripts=test_transcripts,
                                          word_level=word_level,
                                          partitions=partitions,
                                          dataset_number=dataset_number,
                                          test=True)

    else:
        if not file_exists(settings.DATASET_CHAR_INFORMATION_PATH):
            get_dataset_information(0, train_ratio=train_ratio)

        if not file_exists(settings.DATASET_WORD_INFORMATION_PATH):
            get_dataset_information(1, train_ratio=train_ratio)

        if word_level:
            general_info = load_pickle_data(
                settings.DATASET_WORD_INFORMATION_PATH)
            settings.MFCC_FEATURES_LENGTH = general_info[0]
            settings.TOTAL_SAMPLES_NUMBER = general_info[1]
            settings.WORD_SET = general_info[2]
            settings.LONGEST_WORD_LENGTH = general_info[3]
            settings.CHARACTER_SET = general_info[4]
            settings.WORD_TARGET_LENGTH = general_info[5]

        else:
            general_info = load_pickle_data(
                settings.DATASET_CHAR_INFORMATION_PATH)
            settings.MFCC_FEATURES_LENGTH = general_info[0]
            settings.TOTAL_SAMPLES_NUMBER = general_info[1]
            settings.CHARACTER_SET = general_info[2]
            print(settings.CHARACTER_SET)
Esempio n. 11
0
 def get_test_data(self, audio_file, transcripts_file):
     encoder_input_data = load_pickle_data(audio_file)
     (decoder_input_data,
      decoder_target_data) = load_pickle_data(transcripts_file)
     return encoder_input_data, decoder_input_data, decoder_target_data