def run(gParameters):

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    candle.verify_path(gParameters['save_path'])
    prefix = '{}{}'.format(gParameters['save_path'], ext)
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else prefix + '.log'
    #candle.set_up_logger(logfile, p1b2.logger, gParameters['verbose'])
    #p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = candle.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    (X_train, y_train), (X_test, y_test) = p1b2.load_data2(gParameters, seed)

    print("Shape X_test: ", X_test.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # load json and create model
    json_file = open('p1b2.model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model_json = model_from_json(loaded_model_json)

    # load weights into new model
    loaded_model_json.load_weights('p1b2.model.h5')
    print("Loaded model from disk")

    # evaluate loaded model on test data
    loaded_model_json.compile(loss=gParameters['loss'],
                              optimizer=optimizer,
                              metrics=['accuracy'])
    y_pred = loaded_model_json.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Exemple #2
0
def run(gParameters):

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else gParameters['output_dir'] + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = candle.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    input_vector = Input(shape=(input_dim, ))
    output_dim = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = candle.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.)

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            if i == 0:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(
                              gParameters['penalty']))(input_vector)
            else:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(gParameters['penalty']))(x)
            if gParameters['drop']:
                x = Dropout(gParameters['drop'])(x)
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(x)
    else:
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(input_vector)

    # Build MLP model
    mlp = Model(outputs=output, inputs=input_vector)
    p1b2.logger.debug('Model: {}'.format(mlp.to_json()))

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # Compile and display model
    mlp.compile(loss=gParameters['loss'],
                optimizer=optimizer,
                metrics=['accuracy'])
    mlp.summary()

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(X_train,
            y_train,
            batch_size=gParameters['batch_size'],
            epochs=gParameters['epochs'],
            validation_data=(X_val, y_val))

    # model save
    #save_filepath = "model_mlp_W_" + ext
    #mlp.save_weights(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Exemple #3
0
def main():

    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.mx')
    logfile = args.logfile if args.logfile else args.save + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_val, y_val), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    # Set input and target to X_train
    train_iter = mx.io.NDArrayIter(X_train,
                                   y_train,
                                   gParameters['batch_size'],
                                   shuffle=gParameters['shuffle'])
    val_iter = mx.io.NDArrayIter(X_val, y_val, gParameters['batch_size'])
    test_iter = mx.io.NDArrayIter(X_test, y_test, gParameters['batch_size'])

    net = mx.sym.Variable('data')  #X')
    out = mx.sym.Variable('softmax_label')  #y')
    num_classes = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = p1_common_mxnet.build_initializer(
        gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_mxnet.build_initializer(
        'constant', kerasDefaults, 0.)
    init = mx.initializer.Mixed(['bias', '.*'],
                                [initializer_bias, initializer_weights])

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            net = mx.sym.FullyConnected(data=net, num_hidden=l)
            net = mx.sym.Activation(data=net, act_type=activation)
            if gParameters['drop']:
                net = mx.sym.Dropout(data=net, p=gParameters['drop'])

    net = mx.sym.FullyConnected(data=net, num_hidden=num_classes)  # 1)
    net = mx.symbol.SoftmaxOutput(data=net, label=out)

    # Display model
    p1_common_mxnet.plot_network(net, 'net' + ext)

    devices = mx.cpu()
    if gParameters['gpus']:
        devices = [mx.gpu(i) for i in gParameters['gpus']]

    # Build MLP model
    mlp = mx.mod.Module(symbol=net, context=devices)

    # Define optimizer
    optimizer = p1_common_mxnet.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    metric = p1_common_mxnet.get_function(gParameters['loss'])()

    # Seed random generator for training
    mx.random.seed(seed)

    mlp.fit(
        train_iter,
        eval_data=val_iter,
        #            eval_metric=metric,
        optimizer=optimizer,
        num_epoch=gParameters['epochs'],
        initializer=init)

    # model save
    #save_filepath = "model_mlp_" + ext
    #mlp.save(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(test_iter).asnumpy()
    #print ("Shape y_pred: ", y_pred.shape)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)