pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer


if __name__ == '__main__':
    # Determine path to configuration file. This path manipulation is
    # here so that the script will run successfully regardless of the
    # current working directory.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'neat.cfg')

    train_loader = ASVDatasetshort(None, do_lfcc=True, index_list=index_train,
                                   do_standardize=True)
    test_loader = ASVDataset(None, is_train=False, is_eval=False, index_list=index_test,
                             do_lfcc=True, do_standardize=True)

    winner, config, stats = run(config_path, n_generation, train_loader, spoofed_class)
    make_visualize(winner, config, stats)

    winner_net = neat.nn.RecurrentNetwork.create(winner, config)

    train_eer = evaluate(winner_net, train_loader)
    eer = evaluate(winner_net, test_loader)

    print("\n")
    print("**** training equal error rate = {}  ****".format(train_eer))

    print("\n")
    print("**** equal error rate = {}  ****".format(eer))
def load_single_data(batch_size=50,
                     length=4 * 16000,
                     num_data=10000,
                     data_type="train",
                     custom_path="./data",
                     multi_proc=True,
                     balanced=True,
                     option=OPTION,
                     metadata=False,
                     return_dataset=False):
    is_train = data_type in ["train", "train_short"]

    local_dir = os.path.dirname(__file__)

    if os.path.exists(
            os.path.join(
                local_dir, "data/preprocessed/{}_{}_{}_{}_{}_{}.torch".format(
                    data_type, option, num_data, metadata, WIN_LEN, HOP_LEN))):
        data = torch.load(
            os.path.join(
                local_dir, "data/preprocessed/{}_{}_{}_{}_{}_{}.torch".format(
                    data_type, option, num_data, metadata, WIN_LEN, HOP_LEN)))
        data.set_balance(balanced and is_train)
        if return_dataset:
            return data
        dataloader = DataLoader(data,
                                batch_size=batch_size,
                                num_workers=4,
                                shuffle=is_train,
                                drop_last=is_train)
        return dataloader

    if not os.path.isdir(os.path.join(local_dir, 'data/preprocessed')):
        os.makedirs(os.path.join(local_dir, 'data/preprocessed'))

    if data_type == "train":
        data = ASVDataset(length=length,
                          is_train=True,
                          is_eval=False,
                          nb_samples=num_data,
                          random_samples=True,
                          metadata=metadata,
                          custom_path=custom_path)
    elif data_type == "train_short":
        data = ASVDatasetshort(length=length,
                               random_samples=True,
                               metadata=metadata,
                               custom_path=custom_path)
    elif data_type == "dev":
        data = ASVDataset(length=length,
                          is_train=False,
                          is_eval=False,
                          nb_samples=num_data,
                          random_samples=True,
                          metadata=metadata,
                          custom_path=custom_path)
    elif data_type == "eval":
        data = ASVDataset(length=length,
                          is_train=False,
                          is_eval=True,
                          nb_samples=num_data,
                          random_samples=True,
                          metadata=metadata,
                          custom_path=custom_path)
    else:
        raise ValueError("data type {} not recognized".format(data_type))

    print("preprocessing_tools {} set".format(data_type))
    pp_data = PreprocessedASVDataset(data,
                                     multi_proc=multi_proc,
                                     balanced=is_train and balanced,
                                     option=option)
    torch.save(
        pp_data,
        os.path.join(
            local_dir, "data/preprocessed/{}_{}_{}_{}_{}_{}.torch".format(
                data_type, option, num_data, metadata, WIN_LEN, HOP_LEN)))

    if return_dataset:
        return pp_data

    dataloader = DataLoader(pp_data,
                            batch_size=batch_size,
                            num_workers=4,
                            shuffle=is_train,
                            drop_last=is_train)

    return dataloader
index_test = []
index_validation = []
for i in range(len(dev_border) - 1):
    index_test += rd.sample(
        [k for k in range(dev_border[i], dev_border[i + 1])], 100)

    if i == 0:
        index_validation += rd.sample(
            [k for k in range(dev_border[i], dev_border[i + 1])], 300)
    else:
        index_validation += rd.sample(
            [k for k in range(dev_border[i], dev_border[i + 1])], 50)

train_loader = ASVDatasetshort(None, nb_samples=nb_samples_train)
test_loader = ASVDataset(None,
                         is_train=False,
                         is_eval=False,
                         index_list=index_test)
validation_loader = ASVDataset(None,
                               is_train=False,
                               is_eval=False,
                               index_list=index_validation)


class Anti_spoofing_Evaluator(neat.parallel.ParallelEvaluator):
    def __init__(self,
                 num_workers,
                 eval_function,
                 data,
                 val_data,
                 config,
                 gc_eval,
Example #4
0
    # Display the winning genome.
    print('\nBest genome:\n{!s}'.format(winner_))

    return winner_, config_, stats_


if __name__ == '__main__':
    # Determine path to configuration file. This path manipulation is
    # here so that the script will run successfully regardless of the
    # current working directory.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'neat.cfg')

    train_loader = ASVDatasetshort(None, nb_samples=nb_samples_train)
    test_loader = ASVDataset(None,
                             is_train=False,
                             is_eval=False,
                             index_list=index_test)

    winner, config, stats = run(config_path, n_generation)
    make_visualize(winner, config, stats)

    winner_net = neat.nn.RecurrentNetwork.create(winner, config)

    train_accuracy, train_eer = evaluate_acc_eer(winner_net, train_loader)
    accuracy, eer = evaluate_acc_eer(winner_net, test_loader)

    print("\n")
    print("**** training accuracy = {}  ****".format(train_accuracy))
    print("**** training equal error rate = {}  ****".format(train_eer))

    print("\n")
Example #5
0
    return silence, nb_frames, nb_elements


if __name__ == '__main__':
    # Determine path to configuration file. This path manipulation is
    # here so that the script will run successfully regardless of the
    # current working directory.
    local_dir = os.path.dirname(__file__)

    nb_samples = 1

    dev_border = [0, 2548, 6264, 9980, 13696, 17412, 21128, 22296]
    index_test = []
    for i in range(len(dev_border) - 1):
        index_test += rd.sample(
            [k for k in range(dev_border[i], dev_border[i + 1])], nb_samples)

    test_loader = ASVDataset(None,
                             is_train=False,
                             is_eval=False,
                             index_list=index_test)

    silence_prob, nb_frames, nb_elements = detect_speech(
        test_loader.__getitem__(0)[0], "bonafide.wav")

    t = np.linspace(0, nb_frames * nb_elements, nb_frames)

    sns.set(style="darkgrid")
    plt.plot(t, silence_prob, 'g')
    plt.show()
Example #6
0
"""
NEAT APPLIED TO ASVspoof 2019
"""

nb_samples_train = 10  # number of audio files used for training
nb_samples_test = 10  # number of audio files used for testing

index_train = [k for k in range(5)] + [
    k for k in range(2590, 2595)
]  # index of audio files to use for training

n_processes = 10  # multiprocessing.cpu_count()  # number of workers to use for evaluating the fitness
n_generation = 300  # number of generations

train_loader = ASVDataset(length=None,
                          is_train=True,
                          is_eval=False,
                          index_list=index_train)
test_loader = ASVDataset(length=None,
                         is_train=False,
                         is_eval=False,
                         index_list=index_train)

trainloader = []
for data in train_loader:
    inputs, output = data[0], data[2]
    inputs = np.ravel(librosa.feature.mfcc(y=inputs, sr=SAMPLING_RATE))
    inputs = whiten(inputs)
    trainloader.append((inputs, output))

testloader = []
for data in test_loader:
    net_best = pickle.load(open('best_genome_eoc_batch_128_c3', 'rb'))
    net_ = pickle.load(open('best_genome_eoc_64_cqt_c3', 'rb'))
    net_b = pickle.load(open('best_genome_eoc_batch_128_nfft_1024', 'rb'))

    net = [net_best, net_]

    aggregate_net = []
    for i in range(len(net)):
        aggregate_net.append(neat.nn.RecurrentNetwork.create(net[i], config))

    eval_512 = pickle.load(open('dataset_eval_mfcc_512', 'rb'))
    eval_1024 = pickle.load(open('dataset_eval_mfcc_1024', 'rb'))
    eval_2048 = pickle.load(open('dataset_eval_mfcc_2048', 'rb'))

    eval_cqt = ASVDataset(is_train=False,
                          is_eval=True,
                          do_chroma_cqt=True,
                          nb_samples=80000)

    eval_dataset = [eval_2048, eval_cqt]

    eer = evaluate_different_dataset(aggregate_net, eval_dataset)
    """print("\n")
    print("**** equal error rate train = {}  ****".format(train_eer))

    print("\n")
    print("**** equal error rate dev = {}  ****".format(dev_eer))"""

    print("\n")
    print("**** equal error rate = {}  ****".format(eer))
    """
    test_seen_classes = []
    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer


if __name__ == '__main__':
    # Determine path to configuration file. This path manipulation is
    # here so that the script will run successfully regardless of the
    # current working directory.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'neat.cfg')

    train_set = ASVDataset(is_train=True,
                           is_eval=False,
                           nb_samples=25380,
                           do_mfcc=True)
    dev_set = ASVDataset(is_train=False,
                         is_eval=False,
                         nb_samples=24844,
                         do_mfcc=True)
    eval_set = ASVDataset(is_train=False,
                          is_eval=True,
                          nb_samples=80000,
                          do_mfcc=True)

    config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
                         neat.DefaultSpeciesSet, neat.DefaultStagnation,
                         config_path)

    winner = pickle.load(
    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer


if __name__ == '__main__':
    # Determine path to configuration file. This path manipulation is
    # here so that the script will run successfully regardless of the
    # current working directory.
    local_dir = os.path.dirname(__file__)
    config_path = os.path.join(local_dir, 'neat.cfg')

    # n_fft_list = [512, 1024, 2048]
    train_loader = ASVDatasetshort(None, nb_samples=nb_samples_train, do_lfcc=True, do_standardize=True)
    dev_loader = ASVDataset(None, is_train=False, is_eval=False, nb_samples=25000, do_lfcc=True,
                            do_standardize=True)
    eval_loader = ASVDataset(None, nb_samples=72000, random_samples=True, is_train=False, is_eval=True,
                             do_standardize=True, do_lfcc=True)


    eer_list = []
    eer_list_eval = []
    winner_list = []
    stats_list = []


    for iterations in range(20):
        print("iterations number =", iterations)
        winner, config, stats = run(config_path, n_generation)
        winner_net = neat.nn.RecurrentNetwork.create(winner, config)
        visualize.plot_stats(stats, ylog=False, view=True)