Exemplo n.º 1
0
        i_1 = i_1[:, ::downsampling, :]
        i_2 = i_2[:, ::downsampling, :]
        if whitening:
            i_1, i_2 = whiten(i_1), whiten(i_2)

        return [i_1, i_2], labels

    return preprocessor_


whiten_downsample = preprocessor(downsampling, whitening=True)

###################
# Create datasets #
###################
train = LibriSpeechDataset(training_set, n_seconds)
valid = LibriSpeechDataset(validation_set, n_seconds, stochastic=False)
train_generator = (whiten_downsample(batch)
                   for batch in train.yield_verification_batches(batchsize))
valid_generator = (whiten_downsample(batch)
                   for batch in valid.yield_verification_batches(batchsize))

################
# Define model #
################
encoder = get_baseline_convolutional_encoder(model_n_filters,
                                             model_embedding_dimension)
siamese = build_siamese_net(encoder, (input_length, 1))
opt = Adam(clipnorm=1.)
siamese.compile(loss=contrastive_loss, optimizer=opt, metrics=['accuracy'])
Exemplo n.º 2
0
val_metrics = ['pooled_eer', 'accuracy', 'micro_f1']

# Derived parameters
input_length = int(window_size / downsampling)
param_str = 'siamese__filters_{}__embed_{}__drop_{}__pad={}'.format(
    filters, embedding_dimension, dropout, pad)

###################
# Create datasets #
###################
# TODO replace with Kaldi
data_dir = '/home/vano/wrkdir/datasets/LibriSpeech'
# data_dir = '/home/vano/wrkdir/projects_data/sre_2019/toy_dataset'

train = LibriSpeechDataset(data_dir, training_set, n_seconds, pad=pad)
valid = LibriSpeechDataset(data_dir,
                           validation_set,
                           n_seconds,
                           stochastic=False,
                           pad=pad)

batch_preprocessor = BatchPreProcessor('siamese',
                                       preprocess_instances(downsampling))
train_generator = (batch_preprocessor(batch)
                   for batch in train.yield_verification_batches(batchsize))
valid_generator = (batch_preprocessor(batch)
                   for batch in valid.yield_verification_batches(batchsize))

################
# Define model #
Exemplo n.º 3
0
downsampling = 4
n_seconds = 3
validation_set = 'dev-clean'
siamese_model_path = PATH + '/models/n_seconds/siamese__nseconds_3.0__filters_128__embed_64__drop_0.0__r_0.hdf5'
classifier_model_path = PATH + '/models/baseline_classifier.hdf5'
k_way = list(range(2, 21, 1))
n_shot = [1, 5]
num_tasks = 1000
distance = 'dot_product'
results_path = PATH + '/logs/k-way_n-shot_accuracy_{}_{}.csv'.format(
    validation_set, distance)

###################
# Create datasets #
###################
valid = LibriSpeechDataset(validation_set, n_seconds, stochastic=False)
batch_preprocessor = BatchPreProcessor('siamese',
                                       preprocess_instances(downsampling))

#############
# Main Loop #
#############
siamese = load_model(siamese_model_path)
classifier = load_model(classifier_model_path)

with open(results_path, 'w') as f:
    print('method,n_correct,n_tasks,n_shot,k_way', file=f)

results = []
for k in k_way:
    for n in n_shot:
num_evaluation_tasks = 500
n_shot_classification = 1
k_way_classification = 5


#################
# Training Loop #
#################
for fragment_length in n_seconds:
    print('*' * 23)
    print('***** {:.1f} seconds *****'.format(fragment_length))
    print('*' * 23)
    input_length = int(LIBRISPEECH_SAMPLING_RATE * fragment_length / downsampling)

    # Create datasets
    train = LibriSpeechDataset(training_set, fragment_length, pad=True)
    valid = LibriSpeechDataset(validation_set, fragment_length, stochastic=False, pad=True)

    batch_preprocessor = BatchPreProcessor('siamese', preprocess_instances(downsampling))
    train_generator = (batch_preprocessor(batch) for batch in train.yield_verification_batches(batchsize))
    valid_generator = (batch_preprocessor(batch) for batch in valid.yield_verification_batches(batchsize))

    for repeat in range(n_repeats):
        # Define model
        encoder = get_baseline_convolutional_encoder(model_n_filters, model_embedding_dimension, dropout=model_dropout)
        siamese = build_siamese_net(encoder, (input_length, 1), distance_metric='uniform_euclidean')
        opt = Adam(clipnorm=1.)
        siamese.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

        # Train
        param_str = 'siamese__nseconds_{}__filters_{}__embed_{}__drop_{}__r_{}'.format(fragment_length, model_n_filters,
Exemplo n.º 5
0
 def setUpClass(cls):
     cls.dataset = LibriSpeechDataset('dev-clean', 3)