def evaluate(net, data_loader):
    correct = 0
    total = 0
    net.reset()
    target_scores = []
    non_target_scores = []
    for data in tqdm(data_loader):
        inputs, output = data[0], data[2]
        mask, scores = gate_activation_ce(net, inputs)
        selected_score = scores[mask]
        if selected_score.size == 0:
            scores = 1 / 7 * np.ones(7)
        else:
            xo = np.sum(selected_score, axis=0) / selected_score.size
            scores = softmax(xo)
        total += 1
        correct += (scores.argmax() == output)
        if output == 0:
            target_scores.append(scores[0])
        else:
            non_target_scores.append(scores[0])

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return float(correct) / total, eer
def evaluate(net, data_loader):
    """
    compute the eer equal error rate
    :param net: network
    :param data_loader: test dataset, contains audio files in a numpy array format
    :return eer
    """
    target_scores = []
    non_target_scores = []
    for data in tqdm(data_loader):
        net.reset()
        sample_input, output = data[0], data[1]
        xo = gate_lfcc(net, sample_input)
        if output == 1:
            target_scores.append(xo)
        else:
            non_target_scores.append(xo)

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer
def evaluate_acc_eer(net, data_loader):
    """
    compute the eer equal error rate and the accuracy
    :param net: network
    :param data_loader: test dataset, contains audio files in a numpy array format
    :return eer and accuracy
    """
    correct = 0
    total = 0
    net.reset()
    target_scores = []
    non_target_scores = []
    for data in tqdm(data_loader):
        sample_input, output = data[0], data[1]
        sample_input = whiten(sample_input)
        mask, score = gate_activation(net, sample_input)
        selected_score = score[mask]
        if selected_score.size == 0:
            xo = 0.5
        else:
            xo = np.sum(selected_score) / selected_score.size
        total += 1
        correct += ((xo > 0.5) == output)
        if output == 1:
            target_scores.append(xo)
        else:
            non_target_scores.append(xo)

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return float(correct) / total, eer
def evaluate_eer_acc(g,
                     conf,
                     data,
                     backprop=False,
                     use_gate=True,
                     loading_bar=True):
    """
    returns the equal error rate and the accuracy
    """

    predictions, targets = feed_and_predict(data,
                                            g,
                                            conf,
                                            backprop,
                                            use_gate=use_gate,
                                            loading_bar=loading_bar)

    accuracy = np.mean(np.abs(predictions - targets) < 0.5)

    target_scores = predictions[targets == 1]
    non_target_scores = predictions[targets == 0]

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer, accuracy
def eval_eer_gc(g, config, batch):
    jitter = 1e-8
    net = neat_local.nn.RecurrentNet.create(g,
                                            config,
                                            device="cpu",
                                            dtype=torch.float32)
    net.reset()
    input, output = batch  # input: batch x t x BIN; output: batch
    input = input.transpose(0, 1)  # input: t x batch x BIN
    batch_size = output.shape[0]
    norm = torch.zeros(batch_size)
    contribution = torch.zeros(batch_size)
    for input_t in input:
        xo = net.activate(input_t)  # batch x 2
        score = xo[:, 1]  # batch
        confidence = xo[:, 0]  # batch
        contribution += score * confidence  # batch
        norm += confidence  # batch

    predictions = contribution / (norm + jitter)  # batch

    target_scores = predictions[output == 1]
    non_target_scores = predictions[output == 0]

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)
    return eer
def eval_genomes(genomes, config_):
    """
    Most important part of NEAT since it is here that we adapt NEAT to our problem.
    We tell what is the phenotype of a genome and how to calculate its fitness (same idea than a loss)
    :param config_: config from the config file
    :param genomes: list of all the genomes to get evaluated
    """
    for _, genome in tqdm(genomes):
        net = neat.nn.RecurrentNetwork.create(genome, config_)
        target_scores = []
        non_target_scores = []
        for data in trainloader:
            inputs, output = data[0], data[1]
            net.reset()
            mask, score = gate_activation(net, inputs)
            selected_score = score[mask]
            if selected_score.size == 0:
                xo = 0.5
            else:
                xo = np.sum(selected_score) / selected_score.size
            if output == 1:
                target_scores.append(xo)
            else:
                non_target_scores.append(xo)

        target_scores = np.array(target_scores)
        non_target_scores = np.array(non_target_scores)

        pmiss, pfa = rocch(target_scores, non_target_scores)
        eer = rocch2eer(pmiss, pfa)
        genome.fitness = 2 * (.5 - eer)
def evaluate(net, data_loader):

    correct = 0
    total = 0
    net.reset()
    target_scores = []
    non_target_scores = []
    for data in tqdm(data_loader):
        inputs, output = data[0], data[1]
        """
        score_weight, score = gate_activation(net, inputs)
        selected_score = score[mask]
        selected_score = gate_activation(net, inputs)
        if selected_score.size == 0:
            xo = 0.5
        else:
            xo = np.sum(selected_score) / selected_score.size
        """
        selected_score = gate_average(net, inputs)
        xo = np.sum(selected_score) / selected_score.size
        total += 1
        correct += ((xo > 0.5) == output)
        if output == 1:
            target_scores.append(xo)
        else:
            non_target_scores.append(xo)

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return target_scores, non_target_scores, float(correct) / total, eer
def compute_eer(target_scores, non_target_scores):
    """
    Return the equal error rate from the scores
    :param target_scores: numpy array scores of the bonafide files
    :param non_target_scores: numpy array scores of the spoofed files
    :return: eer equal error rate
    """
    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)
    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)
    return eer
def evaluate_genome(g, conf, batches):
    tgs = []
    ntgs = []
    for batch in batches:
        tg, ntg = evaluate_batch(g, conf, batch)
        tgs.append(tg)
        ntgs.append(ntg)
    target_scores = np.concatenate(tgs)
    non_target_scores = np.concatenate(ntgs)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer
def eval_genome_eer(g, conf, batch, backprop=False, use_gate=True):
    """
    Same than eval_genomes() but for 1 genome. This function is used for parallel evaluation.
    The input is already preprocessed with shape batch_size x t x bins
    t: index of the windows used for the pre-processing
    bins: number of features extracted --> corresponds to the number of input neurons of the recurrent net
    Here the fitness function is the equal error rate
    """

    # inputs: batch_size x t x bins
    # outputs: batch_size
    inputs, targets = batch
    # inputs: t x batch_size x bins
    inputs = inputs.transpose(0, 1)

    net = neat_local.nn.RecurrentNet.create(g,
                                            conf,
                                            device="cpu",
                                            dtype=torch.float32)
    assert not backprop
    net.reset(len(targets))

    contribution = torch.zeros(len(targets))
    norm = torch.zeros(len(targets))
    for input_t in inputs:
        # input_t: batch_size x bins

        xo = net.activate(input_t)  # batch_size x 2
        score = xo[:, 1]
        confidence = xo[:, 0] if use_gate else torch.ones_like(score)
        contribution += score * confidence  # batch_size
        norm += confidence  # batch_size

    jitter = 1e-8
    prediction = contribution / (norm + jitter)  # batch_size

    target_scores = prediction[targets ==
                               1].numpy()  # select with mask when target == 1
    non_target_scores = prediction[
        targets == 0].numpy()  # select with mask when target == 0

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return 2 * (.5 - eer)
def eval_genome(genome, config, batch_data):
    """
    Most important part of NEAT since it is here that we adapt NEAT to our problem.
    We tell what is the phenotype of a genome and how to calculate its fitness
    (same idea than a loss)
    :param config: config from the config file
    :param genome: one genome to get evaluated
    :param batch_data: data to use to evaluate the genomes
    :return fitness: returns the fitness of the genome
    this version is intented to use ParallelEvaluator and should be much faster
    """
    net = neat.nn.RecurrentNetwork.create(genome, config)
    target_scores = []
    non_target_scores = []
    for data in batch_data:
        inputs, output = data[0], data[1]
        inputs = whiten(inputs)
        net.reset()
        """
        score_weight, score = gate_activation(net, inputs)
        selected_score = score[mask]
        selected_score = gate_activation(net, inputs)
        if selected_score.size == 0:
            xo = 0.5
        else:
            xo = np.sum(selected_score) / selected_score.size
        """
        xo = gate_mfcc(net, inputs)
        if output == 1:
            target_scores.append(xo)
        else:
            non_target_scores.append(xo)
            
    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return 2*(.5 - eer)
Ejemplo n.º 12
0
def evaluate(g, conf, data):
    """
    returns the equal error rate, the accuracy (both multi class and binary class) and the confusion matrix
    """
    predictions, targets = feed_and_predict_ce(data, g, conf)
    print(predictions)
    target_scores = predictions[targets == 0]
    non_target_scores = predictions[targets > 0]
    pmiss, pfa = rocch(target_scores[:, 0], non_target_scores[:, 0])
    eer = rocch2eer(pmiss, pfa)

    predictions = np.argmax(predictions, axis=1)
    accuracy = (predictions == targets).sum() / len(data)

    c_matrix = confusion_matrix(targets, predictions, normalize="pred")

    predictions[predictions > 0] = 1
    targets[targets > 0] = 1

    anti_spoofing_accuracy = (predictions == targets).sum() / len(data)

    return eer, accuracy, anti_spoofing_accuracy, c_matrix
def eer_gc(genome, config, validation_set):
    """
    function to use for selecting the grand xhampion of each generation
    :param genome: genome
    one genome to get evaluated
    :param config: file
    configuration file
    :param validation_set: ASVDataset
    data use for validation
    :return:
    """
    net = neat.nn.RecurrentNetwork.create(genome, config)
    target_scores = []
    non_target_scores = []
    for data in tqdm(validation_set):
        inputs, output = data[0], data[1]
        inputs = whiten(inputs)
        net.reset()
        mask, score = gate_activation(net, inputs)
        selected_score = score[mask]
        if selected_score.size == 0:
            xo = 0.5
        else:
            xo = np.sum(selected_score) / selected_score.size
        if output == 1:
            target_scores.append(xo)
        else:
            non_target_scores.append(xo)

    target_scores = np.array(target_scores)
    non_target_scores = np.array(non_target_scores)

    pmiss, pfa = rocch(target_scores, non_target_scores)
    eer = rocch2eer(pmiss, pfa)

    return eer