Exemplo n.º 1
0
    def create_net(self):
        from .vgg_thin import resnet_2D_v1
        import keras
        from keras.layers import Conv2D, AveragePooling2D, Reshape, Flatten, Dense
        from keras.models import Model, Sequential
        
        bottleneck_dim = 512
        l2_regularization = 0.01

        # Import Thin ResNet34
        # 
        inputs, x = resnet_2D_v1(self.input, mode='train')
        
        from keras import backend
        backend.set_image_data_format('channels_first')

        x_fc = Conv2D(bottleneck_dim, (7, 1),
            strides=(1, 1),
            activation='relu',
            kernel_initializer='orthogonal',
            use_bias=True, trainable=True,
            padding='same',
            kernel_regularizer=keras.regularizers.l2(l2_regularization),
            bias_regularizer=keras.regularizers.l2(l2_regularization),
            name='x_fc')(x)

        x = AveragePooling2D((1, 5), strides=(1, 1), name='avg_pool')(x)
        x = Flatten()(x)

        x = keras.layers.Dense(bottleneck_dim, activation='relu',
                               kernel_initializer='orthogonal',
                               use_bias=True, trainable=True,
                               kernel_regularizer=keras.regularizers.l2(l2_regularization),
                               bias_regularizer=keras.regularizers.l2(l2_regularization),
                               name='fc6')(x)

        dense_model = Sequential()
        add_final_layers(dense_model, self.config)

        x = dense_model(x)
        model = Model(inputs, x)

        adam = keras.optimizers.Adam(
            lr=wandb.config.learning_rate, # 0.0001 @ VGG
            beta_1=wandb.config.beta_1,
            beta_2=wandb.config.beta_2,
            epsilon=wandb.config.epsilon,
            decay=wandb.config.decay
        )

        loss_function = get_loss(self.config)
        model.compile(loss=loss_function, optimizer=adam, metrics=['accuracy'])
        model.summary()

        return model
    def create_net(self):
        model = Sequential()
        model.add(
            Bidirectional(LSTM(self.n_hidden1, return_sequences=True),
                          input_shape=self.input))
        model.add(Dropout(0.50))
        model.add(Bidirectional(LSTM(self.n_hidden2)))
        model.add(Dense(self.n_speakers * 10))
        model.add(Dropout(0.25))
        model.add(Dense(self.n_speakers * 5))
        add_final_layers(model, self.config)

        loss_function = get_loss(self.config)
        adam = keras.optimizers.Adam(self.adam_lr, self.adam_beta_1,
                                     self.adam_beta_2, self.adam_epsilon,
                                     self.adam_decay)
        model.compile(loss=loss_function, optimizer=adam, metrics=['accuracy'])
        return model
Exemplo n.º 3
0
def create_network_n_speakers(num_speakers, config):
    # Read parameters from config
    seg_size = config.getint('pairwise_kldiv', 'seg_size')
    spectrogram_height = config.getint('pairwise_kldiv', 'spectrogram_height')
    lr = config.getfloat('pairwise_kldiv', 'adadelta_learning_rate')
    rho = config.getfloat('pairwise_kldiv', 'adadelta_rho')
    epsilon = config.getfloat('pairwise_kldiv', 'adadelta_epsilon')

    # Initialize model
    model = Sequential()

    # convolution layer 1
    model.add(
        Conv2D(filters=32,
               kernel_size=(4, 4),
               activation='relu',
               input_shape=(1, seg_size, spectrogram_height),
               data_format='channels_first'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(2, 2)))

    # convolution layer 2
    model.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(4, 4), strides=(2, 2)))

    # dense layer
    model.add(Flatten())
    model.add(Dense(units=(num_speakers * 10), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(rate=0.5))
    model.add(Dense(units=(num_speakers * 5), activation='relu'))
    add_final_layers(model, config)

    loss_function = get_loss(config)

    # Create Optimizer
    adadelta = Adadelta(lr=lr, rho=rho, epsilon=epsilon, decay=0.0)

    # Compile model
    model.compile(loss=loss_function, optimizer=adadelta, metrics=['accuracy'])

    return model
Exemplo n.º 4
0
    def create_net__classification_component(self, model):
        model.add(Dense(self.config.getint('pairwise_lstm', 'n_dense1')))
        model.add(Dropout(0.25))

        model.add(Dense(self.config.getint('pairwise_lstm', 'n_dense2')))

        # This adds the final (Dense) layer
        #
        add_final_layers(model, self.config)

        loss_function = get_loss(self.config)

        adam = keras.optimizers.Adam(
            lr=self.config.getfloat('pairwise_lstm', 'adam_lr'),
            beta_1=self.config.getfloat('pairwise_lstm', 'adam_beta_1'),
            beta_2=self.config.getfloat('pairwise_lstm', 'adam_beta_2'),
            epsilon=self.config.getfloat('pairwise_lstm', 'adam_epsilon'),
            decay=self.config.getfloat('pairwise_lstm', 'adam_decay'))

        return model, loss_function, adam
Exemplo n.º 5
0
    def create_net(self):
        model = Sequential()

        model.add(
            Bidirectional(LSTM(self.n_hidden1, return_sequences=True),
                          input_shape=self.input))
        model.add(Dropout(0.50))
        model.add(Bidirectional(LSTM(self.n_hidden2)))

        model.add(Dense(self.dense_factor * 10))
        model.add(Dropout(0.25))
        model.add(Dense(self.dense_factor * 5))
        add_final_layers(model, self.config)

        loss_function = get_loss(self.config)
        adam = keras.optimizers.Adam(lr=0.001,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08,
                                     decay=0.0)

        model.compile(loss=loss_function, optimizer=adam, metrics=['accuracy'])
        model.summary()
        return model