예제 #1
0
whiten_downsample = preprocessor(downsampling, whitening=True)

###################
# Create datasets #
###################
train = LibriSpeechDataset(training_set, n_seconds)
valid = LibriSpeechDataset(validation_set, n_seconds, stochastic=False)
train_generator = (whiten_downsample(batch)
                   for batch in train.yield_verification_batches(batchsize))
valid_generator = (whiten_downsample(batch)
                   for batch in valid.yield_verification_batches(batchsize))

################
# Define model #
################
encoder = get_baseline_convolutional_encoder(model_n_filters,
                                             model_embedding_dimension)
siamese = build_siamese_net(encoder, (input_length, 1))
opt = Adam(clipnorm=1.)
siamese.compile(loss=contrastive_loss, optimizer=opt, metrics=['accuracy'])

#################
# Training Loop #
#################
siamese.fit_generator(
    generator=train_generator,
    steps_per_epoch=evaluate_every_n_batches,
    validation_data=valid_generator,
    validation_steps=100,
    epochs=num_epochs,
    workers=multiprocessing.cpu_count(),
    use_multiprocessing=True,
예제 #2
0
        y = np.array([speaker_id_mapping[i] for i in y[:, 0]])[:, np.newaxis]
        return to_categorical(y, num_classes)

    return label_preprocessor_


batch_preprocessor = BatchPreProcessor(
    'classifier', preprocess_instances(downsampling),
    label_preprocessor(train.num_classes(), speaker_id_mapping))

train_generator = BatchedSequence(train, batch_preprocessor, batchsize)

################
# Define model #
################
classifier = get_baseline_convolutional_encoder(filters, embedding_dimension,
                                                (input_length, 1))
# Add output classification layer
classifier.add(Dense(train.num_classes(), activation='softmax'))

opt = Adam(clipnorm=1.)
classifier.compile(loss='categorical_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])
#plot_model(classifier, show_shapes=True, to_file=PATH + '/plots/classifier.png')
print(classifier.summary())

#################
# Training Loop #
#################
classifier.fit_generator(
    generator=train_generator,
예제 #3
0
                           n_seconds,
                           stochastic=False,
                           pad=pad)

batch_preprocessor = BatchPreProcessor('siamese',
                                       preprocess_instances(downsampling))
train_generator = (batch_preprocessor(batch)
                   for batch in train.yield_verification_batches(batchsize))
valid_generator = (batch_preprocessor(batch)
                   for batch in valid.yield_verification_batches(batchsize))

################
# Define model #
################
encoder = get_baseline_convolutional_encoder(filters,
                                             embedding_dimension,
                                             dropout=dropout)
siamese = build_siamese_net(encoder, (input_length, 1),
                            distance_metric='uniform_euclidean')
opt = Adam(clipnorm=1.)
siamese.compile(loss='binary_crossentropy',
                optimizer=opt,
                metrics=['accuracy'])
# plot_model(siamese, show_shapes=True, to_file=PATH + '/plots/siamese.png')
print(siamese.summary())

#################
# Training Loop #
#################
callbacks = [
    # First generate custom n-shot classification metric
예제 #4
0
train = LibriSpeechDataset(training_set, n_seconds)
valid = LibriSpeechDataset(validation_set, n_seconds, stochastic=False)

batch_preprocessor = BatchPreProcessor('siamese',
                                       preprocess_instances(downsampling))
train_generator = (batch_preprocessor(batch)
                   for batch in train.yield_verification_batches(batchsize))
valid_generator = (batch_preprocessor(batch)
                   for batch in valid.yield_verification_batches(batchsize))

#################
# Training Loop #
#################
for f, emb, drop in product(filters, embedding, dropout):
    # Define model
    encoder = get_baseline_convolutional_encoder(f, emb, dropout=drop)
    siamese = build_siamese_net(encoder, (input_length, 1))
    opt = Adam(clipnorm=1., decay=2e-5)
    siamese.compile(loss='binary_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])

    # Train
    param_str = 'siamese__filters_{}__embed_{}__drop_{}'.format(f, emb, drop)
    print(param_str)
    siamese.fit_generator(
        generator=train_generator,
        steps_per_epoch=evaluate_every_n_batches,
        validation_data=valid_generator,
        validation_steps=100,
        epochs=num_epochs,