def mfom_model(in_dim, nclass):
    # input block
    feat_input = Input(shape=(in_dim, ), name='main_input')
    # layer 1
    x = Dense(10, name='dense1')(feat_input)
    x = Activation(activation='sigmoid', name='act1')(x)
    # layer 2
    x = Dense(10, name='dense2')(x)
    x = Activation(activation='sigmoid', name='act2')(x)
    # output layer
    x = Dense(nclass, name='pre_activation')(x)
    y_pred = Activation(activation='sigmoid', name='output')(x)

    # === MFoM head ===
    # misclassification layer, feed Y
    y_true = Input(shape=(nclass, ), name='y_true')
    psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, y_pred])

    # class Loss function layer
    # NOTE: you may want to add regularization or constraints
    out = mfom.SmoothErrorCounter(
        name='smooth_error_counter',
        # alpha_constraint=constraints.min_max_norm(min_value=-4., max_value=4.),
        # alpha_regularizer=regs.l1(0.001),
        # beta_constraint=constraints.min_max_norm(min_value=-4., max_value=4.),
        # beta_regularizer=regs.l1(0.001)
    )(psi)

    # compile model
    model = Model(input=[y_true, feat_input], output=out)
    return model
예제 #2
0
    def _compile_model(self, input, output, params):
        """
        Compile network structure with particular loss and optimizer
        """
        # ===
        # choose loss
        # ===
        if params['loss'] in obj.MFOM_OBJECTIVES:
            # add 2 layers for Maximal Figure-of-Merit
            y_true = Input(shape=(self.nclass,), name='y_true')
            psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, output])
            y_pred = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi)

            # MFoM need labels info during training
            input = [y_true, input]
            output = y_pred
            loss = obj.MFOM_OBJECTIVES[params['loss']]
        elif params['loss'] == obj.mfom_eer_embed.__name__:
            loss = obj.mfom_eer_embed
        else:
            loss = params['loss']
        # ===
        # choose optimizer
        # ===
        if params['optimizer'] == 'adam':
            optimizer = Adam(lr=params['learn_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        elif params['optimizer'] == 'sgd':
            optimizer = SGD(lr=params['learn_rate'], decay=1e-6, momentum=0.9, nesterov=True)
        else:
            optimizer = params['optimizer']

        self.model = Model(input=input, output=output)
        self.model.compile(loss=loss, optimizer=optimizer)
        self.model.summary()
예제 #3
0
    nclass = 14

    # 3D input as fbank
    feat_dim, time_step = 28, 100
    feat_input = Input(shape=(feat_dim, time_step), name='main_input')
    x = Permute((2, 1))(feat_input)
    for _f in [256, 64]:
        x = TimeDistributed(Dense(_f))(x)
        # x = Activation(activation='elu')(x)
        x = Dropout(0.5)(x)
    x = TimeDistributed(Dense(nclass))(x)
    y_pred = Activation(activation='tanh', name='output')(x)

    # misclassification layer, feed Y
    y_true = Input(shape=(time_step, nclass), name='y_true')
    psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, y_pred])

    # class Loss function layer
    out = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi)
    # out = BatchNormalization()(psi)

    # compile model
    model = Model(input=[y_true, feat_input], output=out)
    model.compile(loss=obj.mfom_eer_normalized,
                  optimizer='Adadelta')  # Adam, Adadelta
    model.summary()

    # train
    all_X, all_Y = [], []
    for i in range(10000 // time_step):
        X, Y = generate_dataset(output_dim=nclass, num_examples=time_step)