Exemplo n.º 1
0
def initialize_parameters():
    # Get command-line parameters
    parser = get_combo_parser()
    args = parser.parse_args()
    # Get parameters from configuration file
    file_params = combo.read_config_file(args.config_file)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    params = p1_common.args_overwrite_config(args, file_params)
    # print(params)
    return params
Exemplo n.º 2
0
def initialize_parameters():
    # Get command-line parameters
    parser = get_nt3_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    return gParameters
def main():

    # Get command-line parameters
    parser = get_p1b1_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b1.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Construct extension to save model
    ext = p1b1.extension_from_parameters(gParameters, '.pt')
    logfile = args.logfile if args.logfile else args.save + ext + '.log'
    p1b1.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    X_train, X_val, X_test = p1b1.load_data(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))

    # Set input and target to X_train
    train_data = torch.from_numpy(X_train)
    train_tensor = data.TensorDataset(train_data, train_data)
    train_iter = data.DataLoader(train_tensor,
                                 batch_size=gParameters['batch_size'],
                                 shuffle=gParameters['shuffle'])

    # Validation set
    val_data = torch.from_numpy(X_val)
    val_tensor = torch.utils.data.TensorDataset(val_data, val_data)
    val_iter = torch.utils.data.DataLoader(
        val_tensor,
        batch_size=gParameters['batch_size'],
        shuffle=gParameters['shuffle'])

    # Test set
    test_data = torch.from_numpy(X_test)
    test_tensor = torch.utils.data.TensorDataset(test_data, test_data)
    test_iter = torch.utils.data.DataLoader(
        test_tensor,
        batch_size=gParameters['batch_size'],
        shuffle=gParameters['shuffle'])

    #net = mx.sym.Variable('data')
    #out = mx.sym.Variable('softmax_label')
    input_dim = X_train.shape[1]
    output_dim = input_dim

    # Define Autoencoder architecture
    layers = gParameters['dense']
    activation = p1_common_pytorch.build_activation(gParameters['activation'])
    loss_fn = p1_common_pytorch.get_function(gParameters['loss'])
    '''
    N1 = layers[0]
    NE = layers[1]

    net = nn.Sequential(
      nn.Linear(input_dim,N1),
      activation,
      nn.Linear(N1,NE),
      activation,
      nn.Linear(NE,N1),
      activation,
      nn.Linear(N1,output_dim),
      activation,
    )
    '''

    # Documentation indicates this should work
    net = nn.Sequential()

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        # Encoder Part
        for i, l in enumerate(layers):
            if i == 0:
                net.add_module('in_dense', nn.Linear(input_dim, l))
                net.add_module('in_act', activation)
                insize = l
            else:
                net.add_module('en_dense%d' % i, nn.Linear(insize, l))
                net.add_module('en_act%d' % i, activation)
                insize = l

        # Decoder Part
        for i, l in reversed(list(enumerate(layers))):
            if i < len(layers) - 1:
                net.add_module('de_dense%d' % i, nn.Linear(insize, l))
                net.add_module('de_act%d' % i, activation)
                insize = l

    net.add_module('out_dense', nn.Linear(insize, output_dim))
    net.add_module('out_act', activation)

    # Initialize weights
    for m in net.modules():
        if isinstance(m, nn.Linear):
            p1_common_pytorch.build_initializer(m.weight,
                                                gParameters['initialization'],
                                                kerasDefaults)
            p1_common_pytorch.build_initializer(m.bias, 'constant',
                                                kerasDefaults, 0.0)

    # Display model
    print(net)

    # Define context

    # Define optimizer
    optimizer = p1_common_pytorch.build_optimizer(net,
                                                  gParameters['optimizer'],
                                                  gParameters['learning_rate'],
                                                  kerasDefaults)

    # Seed random generator for training
    torch.manual_seed(seed)

    #use_gpu = torch.cuda.is_available()
    use_gpu = 0

    train_loss = 0

    freq_log = 1
    for epoch in range(gParameters['epochs']):
        for batch, (in_train, _) in enumerate(train_iter):
            in_train = Variable(in_train)
            #print(in_train.data.shape())
            if use_gpu:
                in_train = in_train.cuda()
            optimizer.zero_grad()
            output = net(in_train)

            loss = loss_fn(output, in_train)
            loss.backward()
            train_loss += loss.data[0]
            optimizer.step()
            if batch % freq_log == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch * len(in_train), len(train_iter.dataset),
                    100. * batch / len(train_iter),
                    loss.data[0]))  # / len(in_train)))
        print('====> Epoch: {} Average loss: {:.4f}'.format(
            epoch, train_loss / len(train_iter.dataset)))

        # model save
        #save_filepath = "model_ae_" + ext
        #ae.save(save_filepath)

        # Evalute model on valdation set
        for i, (in_val, _) in enumerate(val_iter):
            in_val = Variable(in_val)
            X_pred = net(in_val).data.numpy()
            if i == 0:
                in_all = in_val.data.numpy()
                out_all = X_pred
            else:
                in_all = np.append(in_all, in_val.data.numpy(), axis=0)
                out_all = np.append(out_all, X_pred, axis=0)

        #print ("Shape in_all: ", in_all.shape)
        #print ("Shape out_all: ", out_all.shape)

        scores = p1b1.evaluate_autoencoder(in_all, out_all)
        print('Evaluation on validation data:', scores)

    # Evalute model on test set
    for i, (in_test, _) in enumerate(test_iter):
        in_test = Variable(in_test)
        X_pred = net(in_test).data.numpy()
        if i == 0:
            in_all = in_test.data.numpy()
            out_all = X_pred
        else:
            in_all = np.append(in_all, in_test.data.numpy(), axis=0)
            out_all = np.append(out_all, X_pred, axis=0)

    #print ("Shape in_all: ", in_all.shape)
    #print ("Shape out_all: ", out_all.shape)

    scores = p1b1.evaluate_autoencoder(in_all, out_all)
    print('Evaluation on test data:', scores)

    diff = in_all - out_all
    plt.hist(diff.ravel(), bins='auto')
    plt.title("Histogram of Errors with 'auto' bins")
    plt.savefig('histogram_mx.pdf')
Exemplo n.º 4
0
def main():

    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.mx')
    logfile = args.logfile if args.logfile else args.save + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_val, y_val), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    # Set input and target to X_train
    train_iter = mx.io.NDArrayIter(X_train,
                                   y_train,
                                   gParameters['batch_size'],
                                   shuffle=gParameters['shuffle'])
    val_iter = mx.io.NDArrayIter(X_val, y_val, gParameters['batch_size'])
    test_iter = mx.io.NDArrayIter(X_test, y_test, gParameters['batch_size'])

    net = mx.sym.Variable('data')  #X')
    out = mx.sym.Variable('softmax_label')  #y')
    num_classes = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = p1_common_mxnet.build_initializer(
        gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_mxnet.build_initializer(
        'constant', kerasDefaults, 0.)
    init = mx.initializer.Mixed(['bias', '.*'],
                                [initializer_bias, initializer_weights])

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            net = mx.sym.FullyConnected(data=net, num_hidden=l)
            net = mx.sym.Activation(data=net, act_type=activation)
            if gParameters['drop']:
                net = mx.sym.Dropout(data=net, p=gParameters['drop'])

    net = mx.sym.FullyConnected(data=net, num_hidden=num_classes)  # 1)
    net = mx.symbol.SoftmaxOutput(data=net, label=out)

    # Display model
    p1_common_mxnet.plot_network(net, 'net' + ext)

    devices = mx.cpu()
    if gParameters['gpus']:
        devices = [mx.gpu(i) for i in gParameters['gpus']]

    # Build MLP model
    mlp = mx.mod.Module(symbol=net, context=devices)

    # Define optimizer
    optimizer = p1_common_mxnet.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    metric = p1_common_mxnet.get_function(gParameters['loss'])()

    # Seed random generator for training
    mx.random.seed(seed)

    mlp.fit(
        train_iter,
        eval_data=val_iter,
        #            eval_metric=metric,
        optimizer=optimizer,
        num_epoch=gParameters['epochs'],
        initializer=init)

    # model save
    #save_filepath = "model_mlp_" + ext
    #mlp.save(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(test_iter).asnumpy()
    #print ("Shape y_pred: ", y_pred.shape)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
def main():
    # Get command-line parameters
    parser = get_p1b3_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b3.read_config_file(args.config_file)
    #print ('Params:', fileParameters)

    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Determine verbosity level
    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')
    # Construct extension to save model
    ext = p1b3.extension_from_parameters(gParameters, '.neon')

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Build dataset loader object
    loader = p1b3.DataLoader(
        seed=seed,
        dtype=gParameters['datatype'],
        val_split=gParameters['validation_split'],
        test_cell_split=gParameters['test_cell_split'],
        cell_features=gParameters['cell_features'],
        drug_features=gParameters['drug_features'],
        feature_subsample=gParameters['feature_subsample'],
        scaling=gParameters['scaling'],
        scramble=gParameters['scramble'],
        min_logconc=gParameters['min_logconc'],
        max_logconc=gParameters['max_logconc'],
        subsample=gParameters['subsample'],
        category_cutoffs=gParameters['category_cutoffs'])

    net = mx.sym.Variable('concat_features')
    out = mx.sym.Variable('growth')

    # Initialize weights and learning rule
    initializer_weights = p1_common_mxnet.build_initializer(
        gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_mxnet.build_initializer(
        'constant', kerasDefaults, 0.)
    init = mx.initializer.Mixed(['bias', '.*'],
                                [initializer_bias, initializer_weights])

    activation = gParameters['activation']

    # Define model architecture
    layers = []
    reshape = None

    if 'dense' in gParameters:  # Build dense layers
        for layer in gParameters['dense']:
            if layer:
                net = mx.sym.FullyConnected(data=net, num_hidden=layer)
                net = mx.sym.Activation(data=net, act_type=activation)
            if gParameters['drop']:
                net = mx.sym.Dropout(data=net, p=gParameters['drop'])
    else:  # Build convolutional layers
        net = mx.sym.Reshape(data=net,
                             shape=(gParameters['batch_size'], 1,
                                    loader.input_dim, 1))
        layer_list = list(range(0, len(args.convolution), 3))
        for l, i in enumerate(layer_list):
            nb_filter = gParameters['conv'][i]
            filter_len = gParameters['conv'][i + 1]
            stride = gParameters['conv'][i + 2]
            if nb_filter <= 0 or filter_len <= 0 or stride <= 0:
                break
            net = mx.sym.Convolution(data=net,
                                     num_filter=nb_filter,
                                     kernel=(filter_len, 1),
                                     stride=(stride, 1))
            net = mx.sym.Activation(data=net, act_type=activation)
            if gParameters['pool']:
                net = mx.sym.Pooling(data=net,
                                     pool_type="max",
                                     kernel=(gParameters['pool'], 1),
                                     stride=(1, 1))
        net = mx.sym.Flatten(data=net)

        reshape = (1, loader.input_dim, 1)
        layer_list = list(range(0, len(gParameters['conv']), 3))
        for l, i in enumerate(layer_list):
            nb_filter = gParameters['conv'][i]
            filter_len = gParameters['conv'][i + 1]
            stride = gParameters['conv'][i + 2]
            # print(nb_filter, filter_len, stride)
            # fshape: (height, width, num_filters).
            layers.append(
                Conv((1, filter_len, nb_filter),
                     strides={
                         'str_h': 1,
                         'str_w': stride
                     },
                     init=initializer_weights,
                     activation=activation))
            if gParameters['pool']:
                layers.append(Pooling((1, gParameters['pool'])))

    net = mx.sym.FullyConnected(data=net, num_hidden=1)
    net = mx.symbol.LinearRegressionOutput(data=net, label=out)

    # Display model
    p1_common_mxnet.plot_network(net, 'net' + ext)

    # Define mxnet data iterators
    train_samples = int(loader.n_train)
    val_samples = int(loader.n_val)

    if 'train_samples' in gParameters:
        train_samples = gParameters['train_samples']
    if 'val_samples' in gParameters:
        val_samples = gParameters['val_samples']

    train_iter = ConcatDataIter(loader,
                                batch_size=gParameters['batch_size'],
                                num_data=train_samples)
    val_iter = ConcatDataIter(loader,
                              partition='val',
                              batch_size=gParameters['batch_size'],
                              num_data=val_samples)

    devices = mx.cpu()
    if gParameters['gpus']:
        devices = [mx.gpu(i) for i in gParameters['gpus']]

    mod = mx.mod.Module(net,
                        data_names=('concat_features', ),
                        label_names=('growth', ),
                        context=devices)

    # Define optimizer
    optimizer = p1_common_mxnet.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    # Seed random generator for training
    mx.random.seed(seed)

    freq_log = 1

    #initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
    mod.fit(train_iter,
            eval_data=val_iter,
            eval_metric=gParameters['loss'],
            optimizer=optimizer,
            num_epoch=gParameters['epochs'],
            initializer=init,
            epoch_end_callback=mx.callback.Speedometer(
                gParameters['batch_size'], 20))
def main():

    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.keras')
    logfile = args.logfile if args.logfile else args.save + ext + '.log'
    p1b2.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train,
     y_train), (X_val,
                y_val), (X_test,
                         y_test) = p1b2.load_data_one_hot(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    input_vector = Input(shape=(input_dim, ))
    output_dim = y_train.shape[1]

    # Initialize weights and learning rule
    initializer_weights = p1_common_keras.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = p1_common_keras.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = gParameters['activation']

    # Define MLP architecture
    layers = gParameters['dense']

    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        for i, l in enumerate(layers):
            if i == 0:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(
                              gParameters['penalty']))(input_vector)
            else:
                x = Dense(l,
                          activation=activation,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias,
                          kernel_regularizer=l2(gParameters['penalty']),
                          activity_regularizer=l2(gParameters['penalty']))(x)
            if gParameters['drop']:
                x = Dropout(gParameters['drop'])(x)
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(x)
    else:
        output = Dense(output_dim,
                       activation=activation,
                       kernel_initializer=initializer_weights,
                       bias_initializer=initializer_bias)(input_vector)

    # Build MLP model
    mlp = Model(outputs=output, inputs=input_vector)
    p1b2.logger.debug('Model: {}'.format(mlp.to_json()))

    # Define optimizer
    optimizer = p1_common_keras.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    # Compile and display model
    mlp.compile(loss=gParameters['loss'],
                optimizer=optimizer,
                metrics=['accuracy'])
    mlp.summary()

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(X_train,
            y_train,
            batch_size=gParameters['batch_size'],
            epochs=gParameters['epochs'],
            validation_data=(X_val, y_val))

    # model save
    #save_filepath = "model_mlp_W_" + ext
    #mlp.save_weights(save_filepath)

    # Evalute model on test set
    y_pred = mlp.predict(X_test)
    scores = p1b2.evaluate_accuracy_one_hot(y_pred, y_test)
    print('Evaluation on test data:', scores)
Exemplo n.º 7
0
def main():
    # Get command-line parameters
    parser = get_p1b1_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b1.read_config_file(args.config_file)
    #print ('Params:', fileParameters)

    # Correct for arguments set by default by neon parser
    # (i.e. instead of taking the neon parser default value fall back to the config file,
    # if effectively the command-line was used, then use the command-line value)
    # This applies to conflictive parameters: batch_size, epochs and rng_seed
    if not any("--batch_size" in ag or "-z" in ag for ag in sys.argv):
        args.batch_size = fileParameters['batch_size']
    if not any("--epochs" in ag or "-e" in ag for ag in sys.argv):
        args.epochs = fileParameters['epochs']
    if not any("--rng_seed" in ag or "-r" in ag for ag in sys.argv):
        args.rng_seed = fileParameters['rng_seed']

    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Determine verbosity level
    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')
    # Construct extension to save model
    ext = p1b1.extension_from_parameters(gParameters, '.neon')

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    X_train, X_val, X_test = p1b1.load_data(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))

    input_dim = X_train.shape[1]
    output_dim = input_dim

    # Re-generate the backend after consolidating parsing and file config
    gen_backend(backend=args.backend,
                rng_seed=seed,
                device_id=args.device_id,
                batch_size=gParameters['batch_size'],
                datatype=gParameters['datatype'],
                max_devices=args.max_devices,
                compat_mode=args.compat_mode)

    # Set input and target to X_train
    train = ArrayIterator(X_train)
    val = ArrayIterator(X_val)
    test = ArrayIterator(X_test)

    # Initialize weights and learning rule
    initializer_weights = p1_common_neon.build_initializer(
        gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_neon.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = p1_common_neon.get_function(gParameters['activation'])()

    # Define Autoencoder architecture
    layers = []
    reshape = None

    # Autoencoder
    layers_params = gParameters['dense']

    if layers_params != None:
        if type(layers_params) != list:
            layers_params = list(layers_params)
        # Encoder Part
        for i, l in enumerate(layers_params):
            layers.append(
                Affine(nout=l,
                       init=initializer_weights,
                       bias=initializer_bias,
                       activation=activation))
        # Decoder Part
        for i, l in reversed(list(enumerate(layers_params))):
            if i < len(layers) - 1:
                layers.append(
                    Affine(nout=l,
                           init=initializer_weights,
                           bias=initializer_bias,
                           activation=activation))

    layers.append(
        Affine(nout=output_dim,
               init=initializer_weights,
               bias=initializer_bias,
               activation=activation))

    # Build Autoencoder model
    ae = Model(layers=layers)

    # Define cost and optimizer
    cost = GeneralizedCost(p1_common_neon.get_function(gParameters['loss'])())
    optimizer = p1_common_neon.build_optimizer(gParameters['optimizer'],
                                               gParameters['learning_rate'],
                                               kerasDefaults)

    callbacks = Callbacks(ae, eval_set=val, eval_freq=1)

    # Seed random generator for training
    np.random.seed(seed)

    ae.fit(train,
           optimizer=optimizer,
           num_epochs=gParameters['epochs'],
           cost=cost,
           callbacks=callbacks)

    # model save
    #save_fname = "model_ae_W" + ext
    #ae.save_params(save_fname)

    # Compute errors
    X_pred = ae.get_outputs(test)
    scores = p1b1.evaluate_autoencoder(X_pred, X_test)
    print('Evaluation on test data:', scores)

    diff = X_pred - X_test
    # Plot histogram of errors comparing input and output of autoencoder
    plt.hist(diff.ravel(), bins='auto')
    plt.title("Histogram of Errors with 'auto' bins")
    plt.savefig('histogram_neon.png')
Exemplo n.º 8
0
def main():
    # Get command-line parameters
    parser = get_p1b2_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b2.read_config_file(args.config_file)
    #print ('Params:', fileParameters)

    # Correct for arguments set by default by neon parser
    # (i.e. instead of taking the neon parser default value fall back to the config file,
    # if effectively the command-line was used, then use the command-line value)
    # This applies to conflictive parameters: batch_size, epochs and rng_seed
    if not any("--batch_size" in ag or "-z" in ag for ag in sys.argv):
        args.batch_size = fileParameters['batch_size']
    if not any("--epochs" in ag or "-e" in ag for ag in sys.argv):
        args.epochs = fileParameters['epochs']
    if not any("--rng_seed" in ag or "-r" in ag for ag in sys.argv):
        args.rng_seed = fileParameters['rng_seed']

    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Determine verbosity level
    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')
    # Construct extension to save model
    ext = p1b2.extension_from_parameters(gParameters, '.neon')

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    #(X_train, y_train), (X_test, y_test) = p1b2.load_data(gParameters, seed)
    (X_train, y_train), (X_val,
                         y_val), (X_test,
                                  y_test) = p1b2.load_data(gParameters, seed)

    print("Shape X_train: ", X_train.shape)
    print("Shape X_val: ", X_val.shape)
    print("Shape X_test: ", X_test.shape)
    print("Shape y_train: ", y_train.shape)
    print("Shape y_val: ", y_val.shape)
    print("Shape y_test: ", y_test.shape)

    print("Range X_train --> Min: ", np.min(X_train), ", max: ",
          np.max(X_train))
    print("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))
    print("Range y_train --> Min: ", np.min(y_train), ", max: ",
          np.max(y_train))
    print("Range y_val --> Min: ", np.min(y_val), ", max: ", np.max(y_val))
    print("Range y_test --> Min: ", np.min(y_test), ", max: ", np.max(y_test))

    input_dim = X_train.shape[1]
    num_classes = int(np.max(y_train)) + 1
    output_dim = num_classes  # The backend will represent the classes using one-hot representation (but requires an integer class as input !)

    # Re-generate the backend after consolidating parsing and file config
    gen_backend(backend=args.backend,
                rng_seed=seed,
                device_id=args.device_id,
                batch_size=gParameters['batch_size'],
                datatype=gParameters['data_type'],
                max_devices=args.max_devices,
                compat_mode=args.compat_mode)

    train = ArrayIterator(X=X_train, y=y_train, nclass=num_classes)
    val = ArrayIterator(X=X_val, y=y_val, nclass=num_classes)
    test = ArrayIterator(X=X_test, y=y_test, nclass=num_classes)

    # Initialize weights and learning rule
    initializer_weights = p1_common_neon.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = p1_common_neon.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = p1_common_neon.get_function(gParameters['activation'])()

    # Define MLP architecture
    layers = []
    reshape = None

    for layer in gParameters['dense']:
        if layer:
            layers.append(
                Affine(nout=layer,
                       init=initializer_weights,
                       bias=initializer_bias,
                       activation=activation))
        if gParameters['dropout']:
            layers.append(Dropout(keep=(1 - gParameters['dropout'])))

    layers.append(
        Affine(nout=output_dim,
               init=initializer_weights,
               bias=initializer_bias,
               activation=activation))

    # Build MLP model
    mlp = Model(layers=layers)

    # Define cost and optimizer
    cost = GeneralizedCost(p1_common_neon.get_function(gParameters['loss'])())
    optimizer = p1_common_neon.build_optimizer(gParameters['optimizer'],
                                               gParameters['learning_rate'],
                                               kerasDefaults)

    callbacks = Callbacks(mlp, eval_set=val, metric=Accuracy(), eval_freq=1)

    # Seed random generator for training
    np.random.seed(seed)

    mlp.fit(train,
            optimizer=optimizer,
            num_epochs=gParameters['epochs'],
            cost=cost,
            callbacks=callbacks)

    # model save
    #save_fname = "model_mlp_W_" + ext
    #mlp.save_params(save_fname)

    # Evalute model on test set
    print('Model evaluation by neon: ', mlp.eval(test, metric=Accuracy()))
    y_pred = mlp.get_outputs(test)
    #print ("Shape y_pred: ", y_pred.shape)
    scores = p1b2.evaluate_accuracy(p1_common.convert_to_class(y_pred), y_test)
    print('Evaluation on test data:', scores)
def main():

    # Get command-line parameters
    parser = get_p1b1_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b1.read_config_file(args.config_file)
    #print ('Params:', fileParameters)
    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print ('Params:', gParameters)

    # Construct extension to save model
    ext = p1b1.extension_from_parameters(gParameters, '.mx')
    logfile = args.logfile if args.logfile else args.save+ext+'.log'
    p1b1.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Load dataset
    X_train, X_val, X_test = p1b1.load_data(gParameters, seed)
    
    print ("Shape X_train: ", X_train.shape)
    print ("Shape X_val: ", X_val.shape)
    print ("Shape X_test: ", X_test.shape)

    print ("Range X_train --> Min: ", np.min(X_train), ", max: ", np.max(X_train))
    print ("Range X_val --> Min: ", np.min(X_val), ", max: ", np.max(X_val))
    print ("Range X_test --> Min: ", np.min(X_test), ", max: ", np.max(X_test))


    # Set input and target to X_train
    train_iter = mx.io.NDArrayIter(X_train, X_train, gParameters['batch_size'], shuffle=gParameters['shuffle'])
    val_iter = mx.io.NDArrayIter(X_val, X_val, gParameters['batch_size'])
    test_iter = mx.io.NDArrayIter(X_test, X_test, gParameters['batch_size'])
    
    net = mx.sym.Variable('data')
    out = mx.sym.Variable('softmax_label')
    input_dim = X_train.shape[1]
    output_dim = input_dim

    # Initialize weights and learning rule
    initializer_weights = p1_common_mxnet.build_initializer(gParameters['initialization'], kerasDefaults)
    initializer_bias = p1_common_mxnet.build_initializer('constant', kerasDefaults, 0.)
    init = mx.initializer.Mixed(['bias', '.*'], [initializer_bias, initializer_weights])
    
    activation = gParameters['activation']

    # Define Autoencoder architecture
    layers = gParameters['dense']
    
    if layers != None:
        if type(layers) != list:
            layers = list(layers)
        # Encoder Part
        for i,l in enumerate(layers):
            net = mx.sym.FullyConnected(data=net, num_hidden=l)
            net = mx.sym.Activation(data=net, act_type=activation)
        # Decoder Part
        for i,l in reversed( list(enumerate(layers)) ):
            if i < len(layers)-1:
                net = mx.sym.FullyConnected(data=net, num_hidden=l)
                net = mx.sym.Activation(data=net, act_type=activation)
                    
    net = mx.sym.FullyConnected(data=net, num_hidden=output_dim)
    #net = mx.sym.Activation(data=net, act_type=activation)
    net = mx.symbol.LinearRegressionOutput(data=net, label=out)


    # Display model
    p1_common_mxnet.plot_network(net, 'net'+ext)

    # Define context
    devices = mx.cpu()
    if gParameters['gpus']:
        devices = [mx.gpu(i) for i in gParameters['gpus']]
    

    # Build Autoencoder model
    ae = mx.mod.Module(symbol=net, context=devices)

    # Define optimizer
    optimizer = p1_common_mxnet.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    # Seed random generator for training
    mx.random.seed(seed)

    freq_log = 1
    ae.fit(train_iter, eval_data=val_iter,
           eval_metric=gParameters['loss'],
           optimizer=optimizer,
           num_epoch=gParameters['epochs'])#,
           #epoch_end_callback = mx.callback.Speedometer(gParameters['batch_size'], freq_log))

    # model save
    #save_filepath = "model_ae_" + ext
    #ae.save(save_filepath)

    # Evalute model on test set
    X_pred = ae.predict(test_iter).asnumpy()
    #print ("Shape X_pred: ", X_pred.shape)
    
    scores = p1b1.evaluate_autoencoder(X_pred, X_test)
    print('Evaluation on test data:', scores)

    diff = X_pred - X_test
    plt.hist(diff.ravel(), bins='auto')
    plt.title("Histogram of Errors with 'auto' bins")
    plt.savefig('histogram_mx.png')
Exemplo n.º 10
0
def main():
    # Get command-line parameters
    parser = get_p1b3_parser()
    args = parser.parse_args()
    #print('Args:', args)
    # Get parameters from configuration file
    fileParameters = p1b3.read_config_file(args.config_file)
    #print ('Params:', fileParameters)

    # Correct for arguments set by default by neon parser
    # (i.e. instead of taking the neon parser default value fall back to the config file,
    # if effectively the command-line was used, then use the command-line value)
    # This applies to conflictive parameters: batch_size, epochs and rng_seed
    if not any("--batch_size" in ag or "-z" in ag for ag in sys.argv):
        args.batch_size = fileParameters['batch_size']
    if not any("--epochs" in ag or "-e" in ag for ag in sys.argv):
        args.epochs = fileParameters['epochs']
    if not any("--rng_seed" in ag or "-r" in ag for ag in sys.argv):
        args.rng_seed = fileParameters['rng_seed']

    # Consolidate parameter set. Command-line parameters overwrite file configuration
    gParameters = p1_common.args_overwrite_config(args, fileParameters)
    print('Params:', gParameters)

    # Determine verbosity level
    loggingLevel = logging.DEBUG if args.verbose else logging.INFO
    logging.basicConfig(level=loggingLevel, format='')
    # Construct extension to save model
    ext = p1b3.extension_from_parameters(gParameters, '.neon')

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Build dataset loader object
    loader = p1b3.DataLoader(
        seed=seed,
        dtype=gParameters['datatype'],
        val_split=gParameters['validation_split'],
        test_cell_split=gParameters['test_cell_split'],
        cell_features=gParameters['cell_features'],
        drug_features=gParameters['drug_features'],
        feature_subsample=gParameters['feature_subsample'],
        scaling=gParameters['scaling'],
        scramble=gParameters['scramble'],
        min_logconc=gParameters['min_logconc'],
        max_logconc=gParameters['max_logconc'],
        subsample=gParameters['subsample'],
        category_cutoffs=gParameters['category_cutoffs'])

    # Re-generate the backend after consolidating parsing and file config
    gen_backend(backend=args.backend,
                rng_seed=seed,
                device_id=args.device_id,
                batch_size=gParameters['batch_size'],
                datatype=gParameters['datatype'],
                max_devices=args.max_devices,
                compat_mode=args.compat_mode)

    # Initialize weights and learning rule
    initializer_weights = p1_common_neon.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = p1_common_neon.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = p1_common_neon.get_function(gParameters['activation'])()

    # Define model architecture
    layers = []
    reshape = None

    if 'dense' in gParameters:  # Build dense layers
        for layer in gParameters['dense']:
            if layer:
                layers.append(
                    Affine(nout=layer,
                           init=initializer_weights,
                           bias=initializer_bias,
                           activation=activation))
            if gParameters['drop']:
                layers.append(Dropout(keep=(1 - gParameters['drop'])))
    else:  # Build convolutional layers
        reshape = (1, loader.input_dim, 1)
        layer_list = list(range(0, len(gParameters['conv']), 3))
        for l, i in enumerate(layer_list):
            nb_filter = gParameters['conv'][i]
            filter_len = gParameters['conv'][i + 1]
            stride = gParameters['conv'][i + 2]
            # print(nb_filter, filter_len, stride)
            # fshape: (height, width, num_filters).
            layers.append(
                Conv((1, filter_len, nb_filter),
                     strides={
                         'str_h': 1,
                         'str_w': stride
                     },
                     init=initializer_weights,
                     activation=activation))
            if gParameters['pool']:
                layers.append(Pooling((1, gParameters['pool'])))

    layers.append(
        Affine(nout=1,
               init=initializer_weights,
               bias=initializer_bias,
               activation=neon.transforms.Identity()))

    # Build model
    model = Model(layers=layers)

    # Define neon data iterators
    train_samples = int(loader.n_train)
    val_samples = int(loader.n_val)

    if 'train_samples' in gParameters:
        train_samples = gParameters['train_samples']
    if 'val_samples' in gParameters:
        val_samples = gParameters['val_samples']

    train_iter = ConcatDataIter(loader,
                                ndata=train_samples,
                                lshape=reshape,
                                datatype=gParameters['datatype'])
    val_iter = ConcatDataIter(loader,
                              partition='val',
                              ndata=val_samples,
                              lshape=reshape,
                              datatype=gParameters['datatype'])

    # Define cost and optimizer
    cost = GeneralizedCost(p1_common_neon.get_function(gParameters['loss'])())
    optimizer = p1_common_neon.build_optimizer(gParameters['optimizer'],
                                               gParameters['learning_rate'],
                                               kerasDefaults)

    callbacks = Callbacks(model, eval_set=val_iter,
                          eval_freq=1)  #**args.callback_args)

    model.fit(train_iter,
              optimizer=optimizer,
              num_epochs=gParameters['epochs'],
              cost=cost,
              callbacks=callbacks)