Пример #1
0
def make_mlp(xxx_todo_changeme, xxx_todo_changeme1, xxx_todo_changeme2,
             xxx_todo_changeme3, args):
    (X_train, y_train) = xxx_todo_changeme
    (X_val, y_val) = xxx_todo_changeme1
    (X_test, y_test) = xxx_todo_changeme2
    (W, hb) = xxx_todo_changeme3
    dense_params = {}
    if W is not None and hb is not None:
        dense_params['weights'] = (W, hb)

    # define and initialize MLP model
    mlp = Sequential([
        Dense(args.n_hidden,
              input_shape=(784, ),
              kernel_regularizer=regularizers.l2(args.mlp_l2),
              kernel_initializer=glorot_uniform(seed=1111),
              **dense_params),
        Activation('sigmoid'),
        Dense(10, kernel_initializer=glorot_uniform(seed=2222)),
        Activation('softmax'),
    ])
    mlp.compile(optimizer=MultiAdam(lr=0.001,
                                    lr_multipliers={
                                        'dense_1': args.mlp_lrm[0],
                                        'dense_2': args.mlp_lrm[1]
                                    }),
                loss='categorical_crossentropy',
                metrics=['accuracy'])

    # train and evaluate classifier
    with Stopwatch(verbose=True) as s:
        early_stopping = EarlyStopping(monitor=args.mlp_val_metric,
                                       patience=12,
                                       verbose=2)
        reduce_lr = ReduceLROnPlateau(monitor=args.mlp_val_metric,
                                      factor=0.2,
                                      verbose=2,
                                      patience=6,
                                      min_lr=1e-5)
        callbacks = [early_stopping, reduce_lr]
        try:
            mlp.fit(X_train,
                    one_hot(y_train, n_classes=10),
                    epochs=args.mlp_epochs,
                    batch_size=args.mlp_batch_size,
                    shuffle=False,
                    validation_data=(X_val, one_hot(y_val, n_classes=10)),
                    callbacks=callbacks)
        except KeyboardInterrupt:
            pass

    y_pred = mlp.predict(X_test)
    y_pred = unhot(one_hot_decision_function(y_pred), n_classes=10)
    print("Test accuracy: {:.4f}".format(accuracy_score(y_test, y_pred)))

    # save predictions, targets, and fine-tuned weights
    np.save(args.mlp_save_prefix + 'y_pred.npy', y_pred)
    np.save(args.mlp_save_prefix + 'y_test.npy', y_test)
    W_finetuned, _ = mlp.layers[0].get_weights()
    np.save(args.mlp_save_prefix + 'W_finetuned.npy', W_finetuned)
Пример #2
0
                                      patience=6,
                                      min_lr=1e-5)
        callbacks = [early_stopping, reduce_lr]
        try:
            mlp.fit(X_train,
                    one_hot(y_train, n_classes=10),
                    epochs=args.mlp_epochs,
                    batch_size=args.mlp_batch_size,
                    shuffle=False,
                    validation_data=(X_val, one_hot(y_val, n_classes=10)),
                    callbacks=callbacks)
        except KeyboardInterrupt:
            pass

    y_pred = mlp.predict(X_test)
    y_pred = unhot(one_hot_decision_function(y_pred), n_classes=10)
    print "Test accuracy: {:.4f}".format(accuracy_score(y_test, y_pred))

    # save predictions, targets, and fine-tuned weights
    np.save(args.mlp_save_prefix + 'y_pred.npy', y_pred)
    np.save(args.mlp_save_prefix + 'y_test.npy', y_test)
    W_finetuned, _ = mlp.layers[0].get_weights()
    np.save(args.mlp_save_prefix + 'W_finetuned.npy', W_finetuned)


def main():
    # training settings
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)

    # general