def run(gParameters):

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    candle.verify_path(gParameters['save_path'])
    prefix = '{}{}'.format(gParameters['save_path'], ext)
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else prefix + '.log'
    #candle.set_up_logger(logfile, p1b2.logger, gParameters['verbose'])
    #p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = candle.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    (X_train, y_train), (X_test, y_test) = p1b2.load_data2(gParameters, seed)

    print("Shape X_test: ", X_test.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # load json and create model
    json_file = open('p1b2.model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model_json = model_from_json(loaded_model_json)

    # load weights into new model
    loaded_model_json.load_weights('p1b2.model.h5')
    print("Loaded model from disk")

    # evaluate loaded model on test data
    loaded_model_json.compile(loss=gParameters['loss'],
                              optimizer=optimizer,
                              metrics=['accuracy'])
    y_pred = loaded_model_json.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Пример #2
0
def run(gParameters):

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else gParameters['output_dir'] + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = candle.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    input_vector = Input(shape=(input_dim, ))
    output_dim = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = candle.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.)

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            if i == 0:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(
                              gParameters['penalty']))(input_vector)
            else:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(gParameters['penalty']))(x)
            if gParameters['drop']:
                x = Dropout(gParameters['drop'])(x)
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(x)
    else:
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(input_vector)

    # Build MLP model
    mlp = Model(outputs=output, inputs=input_vector)
    p1b2.logger.debug('Model: {}'.format(mlp.to_json()))

    # Define optimizer
    optimizer = candle.build_optimizer(gParameters['optimizer'],
                                       gParameters['learning_rate'],
                                       kerasDefaults)

    # Compile and display model
    mlp.compile(loss=gParameters['loss'],
                optimizer=optimizer,
                metrics=['accuracy'])
    mlp.summary()

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(X_train,
            y_train,
            batch_size=gParameters['batch_size'],
            epochs=gParameters['epochs'],
            validation_data=(X_val, y_val))

    # model save
    #save_filepath = "model_mlp_W_" + ext
    #mlp.save_weights(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Пример #3
0
def main():

    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.mx')
    logfile = args.logfile if args.logfile else args.save + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_val, y_val), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    # Set input and target to X_train
    train_iter = mx.io.NDArrayIter(X_train,
                                   y_train,
                                   gParameters['batch_size'],
                                   shuffle=gParameters['shuffle'])
    val_iter = mx.io.NDArrayIter(X_val, y_val, gParameters['batch_size'])
    test_iter = mx.io.NDArrayIter(X_test, y_test, gParameters['batch_size'])

    net = mx.sym.Variable('data')  #X')
    out = mx.sym.Variable('softmax_label')  #y')
    num_classes = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = p1_common_mxnet.build_initializer(
        gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_mxnet.build_initializer(
        'constant', kerasDefaults, 0.)
    init = mx.initializer.Mixed(['bias', '.*'],
                                [initializer_bias, initializer_weights])

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            net = mx.sym.FullyConnected(data=net, num_hidden=l)
            net = mx.sym.Activation(data=net, act_type=activation)
            if gParameters['drop']:
                net = mx.sym.Dropout(data=net, p=gParameters['drop'])

    net = mx.sym.FullyConnected(data=net, num_hidden=num_classes)  # 1)
    net = mx.symbol.SoftmaxOutput(data=net, label=out)

    # Display model
    p1_common_mxnet.plot_network(net, 'net' + ext)

    devices = mx.cpu()
    if gParameters['gpus']:
        devices = [mx.gpu(i) for i in gParameters['gpus']]

    # Build MLP model
    mlp = mx.mod.Module(symbol=net, context=devices)

    # Define optimizer
    optimizer = p1_common_mxnet.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    metric = p1_common_mxnet.get_function(gParameters['loss'])()

    # Seed random generator for training
    mx.random.seed(seed)

    mlp.fit(
        train_iter,
        eval_data=val_iter,
        #            eval_metric=metric,
        optimizer=optimizer,
        num_epoch=gParameters['epochs'],
        initializer=init)

    # model save
    #save_filepath = "model_mlp_" + ext
    #mlp.save(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(test_iter).asnumpy()
    #print ("Shape y_pred: ", y_pred.shape)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Пример #4
0
def main():
    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)

    # Correct for arguments set by default by neon parser
    # (i.e. instead of taking the neon parser default value fall back to the config file,
    # if effectively the command-line was used, then use the command-line value)
    # This applies to conflictive parameters: batch_size, epochs and rng_seed
    if not any("--batch_size" in ag or "-z" in ag for ag in sys.argv):
        args.batch_size = fileParameters['batch_size']
    if not any("--epochs" in ag or "-e" in ag for ag in sys.argv):
        args.epochs = fileParameters['epochs']
    if not any("--rng_seed" in ag or "-r" in ag for ag in sys.argv):
        args.rng_seed = fileParameters['rng_seed']

    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Determine verbosity level
    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')
    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.neon')

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train, y_train), (X_val,
                         y_val), (X_test,
                                  y_test) = p1b2.load_data(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    num_classes = int(np.max(y_train)) + 1
    output_dim = num_classes  # The backend will represent the classes using one-hot representation (but requires an integer class as input !)

    # Re-generate the backend after consolidating parsing and file config
    gen_backend(backend=args.backend,
                rng_seed=seed,
                device_id=args.device_id,
                batch_size=gParameters['batch_size'],
                datatype=gParameters['data_type'],
                max_devices=args.max_devices,
                compat_mode=args.compat_mode)

    train = ArrayIterator(X=X_train, y=y_train, nclass=num_classes)
    val = ArrayIterator(X=X_val, y=y_val, nclass=num_classes)
    test = ArrayIterator(X=X_test, y=y_test, nclass=num_classes)

    # Initialize weights and learning rule
    initializer_weights = p1_common_neon.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = p1_common_neon.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = p1_common_neon.get_function(gParameters['activation'])()

    # Define MLP architecture
    layers = []
    reshape = None

    for layer in gParameters['dense']:
        if layer:
            layers.append(
                Affine(nout=layer,
                       init=initializer_weights,
                       bias=initializer_bias,
                       activation=activation))
        if gParameters['dropout']:
            layers.append(Dropout(keep=(1 - gParameters['dropout'])))

    layers.append(
        Affine(nout=output_dim,
               init=initializer_weights,
               bias=initializer_bias,
               activation=activation))

    # Build MLP model
    mlp = Model(layers=layers)

    # Define cost and optimizer
    cost = GeneralizedCost(p1_common_neon.get_function(gParameters['loss'])())
    optimizer = p1_common_neon.build_optimizer(gParameters['optimizer'],
                                               gParameters['learning_rate'],
                                               kerasDefaults)

    callbacks = Callbacks(mlp, eval_set=val, metric=Accuracy(), eval_freq=1)

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(train,
            optimizer=optimizer,
            num_epochs=gParameters['epochs'],
            cost=cost,
            callbacks=callbacks)

    # model save
    #save_fname = "model_mlp_W_" + ext
    #mlp.save_params(save_fname)

    # Evalute model on test set
    print('Model evaluation by neon: ', mlp.eval(test, metric=Accuracy()))
    y_pred = mlp.get_outputs(test)
    #print ("Shape y_pred: ", y_pred.shape)
    scores = p1b2.evaluate_accuracy(p1_common.convert_to_class(y_pred), y_test)
    print('Evaluation on test data:', scores)