示例#1
0
def run(gParameters):

    print('Params:', gParameters)

    file_train = gParameters['train_data']
    file_test = gParameters['test_data']
    url = gParameters['data_url']

    train_file = data_utils.get_file(file_train,
                                     url + file_train,
                                     cache_subdir='Pilot1')
    test_file = data_utils.get_file(file_test,
                                    url + file_test,
                                    cache_subdir='Pilot1')

    X_train, Y_train, X_test, Y_test = load_data(train_file, test_file,
                                                 gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)

    x_train_len = X_train.shape[1]

    # this reshaping is critical for the Conv1D to work

    X_train = np.expand_dims(X_train, axis=2)
    X_test = np.expand_dims(X_test, axis=2)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    model = Sequential()

    layer_list = list(range(0, len(gParameters['conv']), 3))
    for l, i in enumerate(layer_list):
        filters = gParameters['conv'][i]
        filter_len = gParameters['conv'][i + 1]
        stride = gParameters['conv'][i + 2]
        print(int(i / 3), filters, filter_len, stride)
        if gParameters['pool']:
            pool_list = gParameters['pool']
            if type(pool_list) != list:
                pool_list = list(pool_list)

        if filters <= 0 or filter_len <= 0 or stride <= 0:
            break
        if 'locally_connected' in gParameters:
            model.add(
                LocallyConnected1D(filters,
                                   filter_len,
                                   strides=stride,
                                   padding='valid',
                                   input_shape=(x_train_len, 1)))
        else:
            #input layer
            if i == 0:
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=filter_len,
                           strides=stride,
                           padding='valid',
                           input_shape=(x_train_len, 1)))
            else:
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=filter_len,
                           strides=stride,
                           padding='valid'))
        model.add(Activation(gParameters['activation']))
        if gParameters['pool']:
            model.add(MaxPooling1D(pool_size=pool_list[int(i / 3)]))

    model.add(Flatten())

    for layer in gParameters['dense']:
        if layer:
            model.add(Dense(layer))
            model.add(Activation(gParameters['activation']))
            # This has to be disabled for tensorrt otherwise I am getting an error
            if False and gParameters['drop']:
                model.add(Dropout(gParameters['drop']))
    #model.add(Dense(gParameters['classes']))
    #model.add(Activation(gParameters['out_act']), name='activation_5')
    model.add(
        Dense(gParameters['classes'],
              activation=gParameters['out_act'],
              name='activation_5'))
    #Reference case
    #model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
    #model.add(Activation('relu'))
    #model.add(MaxPooling1D(pool_size=1))
    #model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
    #model.add(Activation('relu'))
    #model.add(MaxPooling1D(pool_size=10))
    #model.add(Flatten())
    #model.add(Dense(200))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.1))
    #model.add(Dense(20))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.1))
    #model.add(Dense(CLASSES))
    #model.add(Activation('softmax'))

    kerasDefaults = p1_common.keras_default_config()

    # Define optimizer
    optimizer = p1_common_keras.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    model.summary()
    for layer in model.layers:
        print(layer.name)

    print([x.op.name for x in model.outputs])

    model.compile(loss=gParameters['loss'],
                  optimizer=optimizer,
                  metrics=[gParameters['metrics']])

    output_dir = gParameters['save']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # calculate trainable and non-trainable params
    gParameters.update(compute_trainable_params(model))

    # set up a bunch of callbacks to do work during model training..
    model_name = gParameters['model_name']
    path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
    # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger('{}/training.log'.format(output_dir))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=10,
                                  verbose=1,
                                  mode='auto',
                                  epsilon=0.0001,
                                  cooldown=0,
                                  min_lr=0)
    candleRemoteMonitor = CandleRemoteMonitor(params=gParameters)
    timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
    history = model.fit(
        X_train,
        Y_train,
        batch_size=gParameters['batch_size'],
        epochs=2,  #gParameters['epochs'],
        verbose=1,
        validation_data=(X_test, Y_test),
        callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])

    score = model.evaluate(X_test, Y_test, verbose=0)

    #Begin tensorrt code
    config = {
        # Where to save models (Tensorflow + TensorRT)
        "graphdef_file":
        "/gpfs/jlse-fs0/users/pbalapra/tensorrt/Benchmarks/Pilot1/NT3/nt3.pb",
        "frozen_model_file":
        "/gpfs/jlse-fs0/users/pbalapra/tensorrt/Benchmarks/Pilot1/NT3/nt3_frozen_model.pb",
        "snapshot_dir":
        "/gpfs/jlse-fs0/users/pbalapra/tensorrt/Benchmarks/Pilot1/NT3/snapshot",
        "engine_save_dir":
        "/gpfs/jlse-fs0/users/pbalapra/tensorrt/Benchmarks/Pilot1/NT3",

        # Needed for TensorRT
        "inference_batch_size": 1,  # inference batch size
        "input_layer":
        "conv1d_1",  # name of the input tensor in the TF computational graph
        "out_layer":
        "activation_5/Softmax",  # name of the output tensorf in the TF conputational graph
        "output_size": 2,  # number of classes in output (5)
        "precision":
        "fp32"  # desired precision (fp32, fp16) "test_image_path" : "/home/data/val/roses"
    }

    # Now, let's use the Tensorflow backend to get the TF graphdef and frozen graph
    K.set_learning_phase(0)
    sess = K.get_session()
    saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)

    # save model weights in TF checkpoint
    checkpoint_path = saver.save(sess,
                                 config['snapshot_dir'],
                                 global_step=0,
                                 latest_filename='checkpoint_state')

    # remove nodes not needed for inference from graph def
    train_graph = sess.graph
    inference_graph = tf.graph_util.remove_training_nodes(
        train_graph.as_graph_def())

    #print(len([n.name for n in tf.get_default_graph().as_graph_def().node]))

    # write the graph definition to a file.
    # You can view this file to see your network structure and
    # to determine the names of your network's input/output layers.
    graph_io.write_graph(inference_graph, '.', config['graphdef_file'])

    # specify which layer is the output layer for your graph.
    # In this case, we want to specify the softmax layer after our
    # last dense (fully connected) layer.
    out_names = config['out_layer']

    # freeze your inference graph and save it for later! (Tensorflow)
    freeze_graph.freeze_graph(config['graphdef_file'], '', False,
                              checkpoint_path, out_names, "save/restore_all",
                              "save/Const:0", config['frozen_model_file'],
                              False, "")

    if False:
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        # serialize model to JSON
        model_json = model.to_json()
        with open("{}/{}.model.json".format(output_dir, model_name),
                  "w") as json_file:
            json_file.write(model_json)

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("{}/{}.model.yaml".format(output_dir, model_name),
                  "w") as yaml_file:
            yaml_file.write(model_yaml)

        # serialize weights to HDF5
        model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
        print("Saved model to disk")

        # load json and create model
        json_file = open('{}/{}.model.json'.format(output_dir, model_name),
                         'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model_json = model_from_json(loaded_model_json)

        # load yaml and create model
        yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name),
                         'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        loaded_model_yaml = model_from_yaml(loaded_model_yaml)

        # load weights into new model
        loaded_model_json.load_weights('{}/{}.weights.h5'.format(
            output_dir, model_name))
        print("Loaded json model from disk")

        # evaluate json loaded model on test data
        loaded_model_json.compile(loss=gParameters['loss'],
                                  optimizer=gParameters['optimizer'],
                                  metrics=[gParameters['metrics']])
        score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

        print('json Test score:', score_json[0])
        print('json Test accuracy:', score_json[1])

        print("json %s: %.2f%%" %
              (loaded_model_json.metrics_names[1], score_json[1] * 100))

        # load weights into new model
        loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(
            output_dir, model_name))
        print("Loaded yaml model from disk")

        # evaluate loaded model on test data
        loaded_model_yaml.compile(loss=gParameters['loss'],
                                  optimizer=gParameters['optimizer'],
                                  metrics=[gParameters['metrics']])
        score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

        print('yaml Test score:', score_yaml[0])
        print('yaml Test accuracy:', score_yaml[1])

        print("yaml %s: %.2f%%" %
              (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100))

    return history
示例#2
0
def run_mtl(features_train=[],
            truths_train=[],
            features_test=[],
            truths_test=[],
            shared_nnet_spec=[],
            individual_nnet_spec=[],
            learning_rate=0.01,
            batch_size=10,
            n_epochs=100,
            dropout=0.0,
            verbose=1,
            activation='relu',
            out_act='softmax',
            loss='categorical_crossentropy',
            optimizer='sgd',
            run_id=None,
            fold=None,
            gParameters=None):

    labels_train = []
    labels_test = []

    n_out_nodes = []

    for l in range(len(truths_train)):
        truth_train_0 = truths_train[l]
        truth_test_0 = truths_test[l]

        truth_train_0 = np.array(truth_train_0, dtype='int32')
        truth_test_0 = np.array(truth_test_0, dtype='int32')

        mv = int(np.max(truth_train_0))
        label_train_0 = np.zeros((len(truth_train_0), mv + 1))
        for i in range(len(truth_train_0)):
            label_train_0[i, truth_train_0[i]] = 1
        label_test_0 = np.zeros((len(truth_test_0), mv + 1))
        for i in range(len(truth_test_0)):
            label_test_0[i, truth_test_0[i]] = 1

        labels_train.append(label_train_0)
        labels_test.append(label_test_0)

        n_out_nodes.append(mv + 1)

    shared_layers = []

    # input layer
    layer = Input(shape=(len(features_train[0][0]), ), name='input')
    shared_layers.append(layer)

    # shared layers
    for k in range(len(shared_nnet_spec)):
        layer = Dense(shared_nnet_spec[k],
                      activation=activation,
                      name='shared_layer_' + str(k))(shared_layers[-1])
        shared_layers.append(layer)
        if dropout > 0:
            layer = Dropout(dropout)(shared_layers[-1])
            shared_layers.append(layer)

    # individual layers
    indiv_layers_arr = []
    models = []

    trainable_count = 0
    non_trainable_count = 0

    for l in range(len(individual_nnet_spec)):
        indiv_layers = [shared_layers[-1]]
        for k in range(len(individual_nnet_spec[l]) + 1):
            if k < len(individual_nnet_spec[l]):
                layer = Dense(individual_nnet_spec[l][k],
                              activation=activation,
                              name='indiv_layer_' + str(l) + '_' + str(k))(
                                  indiv_layers[-1])
                indiv_layers.append(layer)
                if dropout > 0:
                    layer = Dropout(dropout)(indiv_layers[-1])
                    indiv_layers.append(layer)
            else:
                layer = Dense(n_out_nodes[l],
                              activation=out_act,
                              name='out_' + str(l))(indiv_layers[-1])
                indiv_layers.append(layer)

        indiv_layers_arr.append(indiv_layers)

        model = Model(input=[shared_layers[0]], output=[indiv_layers[-1]])

        # calculate trainable/non-trainable param count for each model
        param_counts = compute_trainable_params(model)
        trainable_count += param_counts['trainable_params']
        non_trainable_count += param_counts['non_trainable_params']

        models.append(model)

    # capture total param counts
    gParameters['trainable_params'] = trainable_count
    gParameters['non_trainable_params'] = non_trainable_count
    gParameters['total_params'] = trainable_count + non_trainable_count

    kerasDefaults = p3c.keras_default_config()
    optimizer = p3ck.build_optimizer(optimizer, learning_rate, kerasDefaults)

    # DEBUG - verify
    if verbose == 1:
        for k in range(len(models)):
            model = models[k]
            print('Model:', k)
            model.summary()

    for k in range(len(models)):
        model = models[k]
        model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])

    # fix naming problem
    base_run_id = run_id

    # train
    for epoch in range(n_epochs):
        for k in range(len(models)):
            feature_train = features_train[k]
            label_train = labels_train[k]
            feature_test = features_test[k]
            label_test = labels_test[k]
            model = models[k]

            gParameters['run_id'] = base_run_id + ".{}.{}.{}".format(
                fold, epoch, k)
            candleRemoteMonitor = CandleRemoteMonitor(params=gParameters)
            timeoutMonitor = TerminateOnTimeOut(TIMEOUT)

            model.fit({'input': feature_train}, {'out_' + str(k): label_train},
                      epochs=1,
                      verbose=verbose,
                      callbacks=[candleRemoteMonitor, timeoutMonitor],
                      batch_size=batch_size,
                      validation_data=(feature_test, label_test))

    gParameters['run_id'] = base_run_id

    # retrieve truth-pred pair
    avg_loss = 0.0
    ret = []

    for k in range(len(models)):
        ret_k = []

        feature_test = features_test[k]
        truth_test = truths_test[k]
        label_test = labels_test[k]
        model = models[k]

        loss = model.evaluate(feature_test, label_test)
        avg_loss = avg_loss + loss[0]

        pred = model.predict(feature_test)

        ret_k.append(truth_test)
        ret_k.append(np.argmax(pred, axis=1))

        ret.append(ret_k)

    avg_loss = avg_loss / float(len(models))
    ret.append(avg_loss)

    return ret
示例#3
0
def run(gParameters):

    print ('Params:', gParameters)

    file_train = gParameters['train_data']
    file_test = gParameters['test_data']
    url = gParameters['data_url']
    '''path = '/home/orlandomelchor/Desktop/Research 2018-2019/CANDLE/'
    tr_file = 'nt_train2.csv'
    te_file = 'nt_train2.csv'

    train_file = path + tr_file
    test_file = path + te_file
    X_train, Y_train, X_test, Y_test = load_data(train_file, test_file, gParameters)'''
    path = '../data-05-31-2018/'
    full_data_file = 'formatted_full_data.csv'
    X_train, Y_train, X_test, Y_test = load_data(path+full_data_file, gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)

    x_train_len = X_train.shape[1]

    # this reshaping is critical for the Conv1D to work

    model = Sequential()

    for layer in gParameters['dense']:
        if layer:
            model.add(Dense(layer,input_shape=(x_train_len,)))
            model.add(Activation(gParameters['activation']))
            if gParameters['drop']:
                    model.add(Dropout(gParameters['drop']))
    model.add(Dense(gParameters['classes']))
    model.add(Activation(gParameters['out_act']))

#Reference case
#model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=1))
#model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
#model.add(Activation('relu'))
#model.add(MaxPooling1D(pool_size=10))
#model.add(Flatten())
#model.add(Dense(200))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(20))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Dense(CLASSES))
#model.add(Activation('softmax'))

    kerasDefaults = p1_common.keras_default_config()

    # Define optimizer
    optimizer = p1_common_keras.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    model.summary()
    model.compile(loss=gParameters['loss'],
                  optimizer=optimizer,
                  metrics=[gParameters['metrics']])

    output_dir = gParameters['save']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # calculate trainable and non-trainable params
    gParameters.update(compute_trainable_params(model))

    # set up a bunch of callbacks to do work during model training..
    model_name = gParameters['model_name']
    path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
    # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger('{}/training.log'.format(output_dir))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
    candleRemoteMonitor = CandleRemoteMonitor(params=gParameters)
    timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
    history = model.fit(X_train, Y_train,
                    batch_size=gParameters['batch_size'],
                    epochs=gParameters['epochs'],
                    verbose=1,
                    validation_data=(X_test, Y_test),
                    callbacks = [csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])

    score = model.evaluate(X_test, Y_test, verbose=0)

    if True:
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        # serialize model to JSON
        model_json = model.to_json()
        with open("{}/{}.model.json".format(output_dir, model_name), "w") as json_file:
            json_file.write(model_json)

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("{}/{}.model.yaml".format(output_dir, model_name), "w") as yaml_file:
            yaml_file.write(model_yaml)

        # serialize model to HDF5
        model.save('{}/{}_network{}.h5'.format(output_dir, model_name, i))
        print("Saved model to disk")

        # load json and create model
        json_file = open('{}/{}.model.json'.format(output_dir, model_name), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model_json = model_from_json(loaded_model_json)


        # load yaml and create model
        yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name), 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        loaded_model_yaml = model_from_yaml(loaded_model_yaml)


        # load into new model
        loaded_model_json.load_weights('{}/{}_network{}.h5'.format(output_dir, model_name, i))
        print("Loaded json model from disk")

        # evaluate json loaded model on test data
        loaded_model_json.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

        print('json Test score:', score_json[0])
        print('json Test accuracy:', score_json[1])

        print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1]*100))

        # load weights into new model
        loaded_model_yaml.load_weights('{}/{}_network{}.h5'.format(output_dir, model_name, i))
        print("Loaded yaml model from disk")

        # evaluate loaded model on test data
        loaded_model_yaml.compile(loss=gParameters['loss'],
            optimizer=gParameters['optimizer'],
            metrics=[gParameters['metrics']])
        score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

        print('yaml Test score:', score_yaml[0])
        print('yaml Test accuracy:', score_yaml[1])

        print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1]*100))

        acc_file = open('{}/{}_accuracy.txt'.format(output_dir, model_name),'w')
        acc_file.write(str(round(score_yaml[1],4)*100))
        acc_file.close()
    return history
def run(params):
    args = Struct(**params)
    set_seed(args.rng_seed)
    ext = extension_from_parameters(args)
    verify_path(args.save)
    prefix = args.save + ext
    logfile = args.logfile if args.logfile else prefix + '.log'
    set_up_logger(logfile, args.verbose)
    logger.info('Params: {}'.format(params))

    loader = CombinedDataLoader(seed=args.rng_seed)
    loader.load(
        cache=args.cache,
        ncols=args.feature_subsample,
        cell_features=args.cell_features,
        drug_features=args.drug_features,
        drug_median_response_min=args.drug_median_response_min,
        drug_median_response_max=args.drug_median_response_max,
        use_landmark_genes=args.use_landmark_genes,
        use_filtered_genes=args.use_filtered_genes,
        preprocess_rnaseq=args.preprocess_rnaseq,
        single=args.single,
        train_sources=args.train_sources,
        test_sources=args.test_sources,
        embed_feature_source=not args.no_feature_source,
        encode_response_source=not args.no_response_source,
    )

    val_split = args.validation_split
    train_split = 1 - val_split

    if args.export_data:
        fname = args.export_data
        loader.partition_data(cv_folds=args.cv,
                              train_split=train_split,
                              val_split=val_split,
                              cell_types=args.cell_types,
                              by_cell=args.by_cell,
                              by_drug=args.by_drug)
        train_gen = CombinedDataGenerator(loader,
                                          batch_size=args.batch_size,
                                          shuffle=args.shuffle)
        val_gen = CombinedDataGenerator(loader,
                                        partition='val',
                                        batch_size=args.batch_size,
                                        shuffle=args.shuffle)
        x_train_list, y_train = train_gen.get_slice(size=train_gen.size,
                                                    dataframe=True,
                                                    single=args.single)
        x_val_list, y_val = val_gen.get_slice(size=val_gen.size,
                                              dataframe=True,
                                              single=args.single)
        df_train = pd.concat([y_train] + x_train_list, axis=1)
        df_val = pd.concat([y_val] + x_val_list, axis=1)
        df = pd.concat([df_train, df_val]).reset_index(drop=True)
        if args.growth_bins > 1:
            df = uno_data.discretize(df, 'Growth', bins=args.growth_bins)
        df.to_csv(fname, sep='\t', index=False, float_format="%.3g")
        return

    loader.partition_data(cv_folds=args.cv,
                          train_split=train_split,
                          val_split=val_split,
                          cell_types=args.cell_types,
                          by_cell=args.by_cell,
                          by_drug=args.by_drug)

    model = build_model(loader, args)
    logger.info('Combined model:')
    # model.summary(print_fn=logger.info)
    # plot_model(model, to_file=prefix+'.model.png', show_shapes=True)

    if args.cp:
        model_json = model.to_json()
        with open(prefix + '.model.json', 'w') as f:
            print(model_json, file=f)

    def warmup_scheduler(epoch):
        lr = args.learning_rate or base_lr * args.batch_size / 100
        if epoch <= 5:
            K.set_value(model.optimizer.lr,
                        (base_lr * (5 - epoch) + lr * epoch) / 5)
        logger.debug('Epoch {}: lr={:.5g}'.format(
            epoch, K.get_value(model.optimizer.lr)))
        return K.get_value(model.optimizer.lr)

    df_pred_list = []

    cv_ext = ''
    cv = args.cv if args.cv > 1 else 1

    for fold in range(cv):
        if args.cv > 1:
            logger.info('Cross validation fold {}/{}:'.format(fold + 1, cv))
            cv_ext = '.cv{}'.format(fold + 1)

        model = build_model(loader, args, silent=True)

        optimizer = optimizers.deserialize({
            'class_name': args.optimizer,
            'config': {}
        })
        base_lr = args.base_lr or K.get_value(optimizer.lr)
        if args.learning_rate:
            K.set_value(optimizer.lr, args.learning_rate)

        model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])

        # calculate trainable and non-trainable params
        params.update(candle.compute_trainable_params(model))

        candle_monitor = candle.CandleRemoteMonitor(params=params)
        timeout_monitor = candle.TerminateOnTimeOut(params['timeout'])

        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.5,
                                      patience=5,
                                      min_lr=0.00001)
        warmup_lr = LearningRateScheduler(warmup_scheduler)
        checkpointer = ModelCheckpoint(prefix + cv_ext + '.weights.h5',
                                       save_best_only=True,
                                       save_weights_only=True)
        tensorboard = TensorBoard(log_dir="tb/tb{}{}".format(ext, cv_ext))
        history_logger = LoggingCallback(logger.debug)
        model_recorder = ModelRecorder()

        # callbacks = [history_logger, model_recorder]
        callbacks = [
            candle_monitor, timeout_monitor, history_logger, model_recorder
        ]
        if args.reduce_lr:
            callbacks.append(reduce_lr)
        if args.warmup_lr:
            callbacks.append(warmup_lr)
        if args.cp:
            callbacks.append(checkpointer)
        if args.tb:
            callbacks.append(tensorboard)

        train_gen = CombinedDataGenerator(loader,
                                          fold=fold,
                                          batch_size=args.batch_size,
                                          shuffle=args.shuffle)
        val_gen = CombinedDataGenerator(loader,
                                        partition='val',
                                        fold=fold,
                                        batch_size=args.batch_size,
                                        shuffle=args.shuffle)

        df_val = val_gen.get_response(copy=True)
        y_val = df_val['Growth'].values
        y_shuf = np.random.permutation(y_val)
        log_evaluation(evaluate_prediction(y_val, y_shuf),
                       description='Between random pairs in y_val:')

        candleRemoteMonitor = CandleRemoteMonitor(params=params)

        callbacks.append(candleRemoteMonitor)

        if args.no_gen:
            x_train_list, y_train = train_gen.get_slice(size=train_gen.size,
                                                        single=args.single)
            x_val_list, y_val = val_gen.get_slice(size=val_gen.size,
                                                  single=args.single)
            history = model.fit(x_train_list,
                                y_train,
                                batch_size=args.batch_size,
                                epochs=args.epochs,
                                callbacks=callbacks,
                                validation_data=(x_val_list, y_val))
        else:
            logger.info('Data points per epoch: train = %d, val = %d',
                        train_gen.size, val_gen.size)
            logger.info('Steps per epoch: train = %d, val = %d',
                        train_gen.steps, val_gen.steps)
            history = model.fit_generator(
                train_gen.flow(single=args.single),
                train_gen.steps,
                epochs=args.epochs,
                callbacks=callbacks,
                validation_data=val_gen.flow(single=args.single),
                validation_steps=val_gen.steps)

        if args.cp:
            model.load_weights(prefix + cv_ext + '.weights.h5')
        # model = model_recorder.best_model

        if args.no_gen:
            y_val_pred = model.predict(x_val_list, batch_size=args.batch_size)
        else:
            val_gen.reset()
            y_val_pred = model.predict_generator(
                val_gen.flow(single=args.single), val_gen.steps)
            y_val_pred = y_val_pred[:val_gen.size]

        y_val_pred = y_val_pred.flatten()

        scores = evaluate_prediction(y_val, y_val_pred)
        log_evaluation(scores)

        df_val = df_val.assign(PredictedGrowth=y_val_pred,
                               GrowthError=y_val_pred - y_val)
        df_pred_list.append(df_val)

        plot_history(prefix, history, 'loss')
        plot_history(prefix, history, 'r2')

    pred_fname = prefix + '.predicted.tsv'
    df_pred = pd.concat(df_pred_list)
    df_pred.sort_values(
        ['Source', 'Sample', 'Drug1', 'Drug2', 'Dose1', 'Dose2', 'Growth'],
        inplace=True)
    df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g')

    if args.cv > 1:
        scores = evaluate_prediction(df_pred['Growth'],
                                     df_pred['PredictedGrowth'])
        log_evaluation(scores, description='Combining cross validation folds:')

    for test_source in loader.test_sep_sources:
        test_gen = CombinedDataGenerator(loader,
                                         partition='test',
                                         batch_size=args.batch_size,
                                         source=test_source)
        df_test = test_gen.get_response(copy=True)
        y_test = df_test['Growth'].values
        n_test = len(y_test)
        if n_test == 0:
            continue
        if args.no_gen:
            x_test_list, y_test = test_gen.get_slice(size=test_gen.size,
                                                     single=args.single)
            y_test_pred = model.predict(x_test_list,
                                        batch_size=args.batch_size)
        else:
            y_test_pred = model.predict_generator(
                test_gen.flow(single=args.single), test_gen.steps)
            y_test_pred = y_test_pred[:test_gen.size]
        y_test_pred = y_test_pred.flatten()
        scores = evaluate_prediction(y_test, y_test_pred)
        log_evaluation(scores,
                       description='Testing on data from {} ({})'.format(
                           test_source, n_test))

    if K.backend() == 'tensorflow':
        K.clear_session()

    logger.handlers = []

    return history
示例#5
0
def run(params):
    args = Struct(**params)
    set_seed(args.rng_seed)
    ext = extension_from_parameters(args)
    prefix = args.save + ext
    logfile = args.logfile if args.logfile else prefix + '.log'
    set_up_logger(logfile, args.verbose)
    logger.info('Params: {}'.format(params))

    loader = ComboDataLoader(seed=args.rng_seed,
                             val_split=args.validation_split,
                             cell_features=args.cell_features,
                             drug_features=args.drug_features,
                             use_landmark_genes=args.use_landmark_genes,
                             use_combo_score=args.use_combo_score,
                             cv_partition=args.cv_partition,
                             cv=args.cv)
    # test_loader(loader)
    # test_generator(loader)

    train_gen = ComboDataGenerator(loader, batch_size=args.batch_size).flow()
    val_gen = ComboDataGenerator(loader,
                                 partition='val',
                                 batch_size=args.batch_size).flow()

    train_steps = int(loader.n_train / args.batch_size)
    val_steps = int(loader.n_val / args.batch_size)

    model = build_model(loader, args, verbose=True)
    model.summary()
    # plot_model(model, to_file=prefix+'.model.png', show_shapes=True)

    if args.cp:
        model_json = model.to_json()
        with open(prefix + '.model.json', 'w') as f:
            print(model_json, file=f)

    def warmup_scheduler(epoch):
        lr = args.learning_rate or base_lr * args.batch_size / 100
        if epoch <= 5:
            K.set_value(model.optimizer.lr,
                        (base_lr * (5 - epoch) + lr * epoch) / 5)
        logger.debug('Epoch {}: lr={}'.format(epoch,
                                              K.get_value(model.optimizer.lr)))
        return K.get_value(model.optimizer.lr)

    df_pred_list = []

    cv_ext = ''
    cv = args.cv if args.cv > 1 else 1

    fold = 0
    while fold < cv:
        if args.cv > 1:
            logger.info('Cross validation fold {}/{}:'.format(fold + 1, cv))
            cv_ext = '.cv{}'.format(fold + 1)

        model = build_model(loader, args)

        optimizer = optimizers.deserialize({
            'class_name': args.optimizer,
            'config': {}
        })
        base_lr = args.base_lr or K.get_value(optimizer.lr)
        if args.learning_rate:
            K.set_value(optimizer.lr, args.learning_rate)

        model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])

        # calculate trainable and non-trainable params
        params.update(compute_trainable_params(model))

        candle_monitor = CandleRemoteMonitor(params=params)
        timeout_monitor = TerminateOnTimeOut(params['timeout'])

        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.5,
                                      patience=5,
                                      min_lr=0.00001)
        warmup_lr = LearningRateScheduler(warmup_scheduler)
        checkpointer = ModelCheckpoint(prefix + cv_ext + '.weights.h5',
                                       save_best_only=True,
                                       save_weights_only=True)
        tensorboard = TensorBoard(log_dir="tb/tb{}{}".format(ext, cv_ext))
        history_logger = LoggingCallback(logger.debug)
        model_recorder = ModelRecorder()

        # callbacks = [history_logger, model_recorder]
        callbacks = [
            candle_monitor, timeout_monitor, history_logger, model_recorder
        ]
        if args.reduce_lr:
            callbacks.append(reduce_lr)
        if args.warmup_lr:
            callbacks.append(warmup_lr)
        if args.cp:
            callbacks.append(checkpointer)
        if args.tb:
            callbacks.append(tensorboard)

        if args.gen:
            history = model.fit_generator(train_gen,
                                          train_steps,
                                          epochs=args.epochs,
                                          callbacks=callbacks,
                                          validation_data=val_gen,
                                          validation_steps=val_steps)
        else:
            if args.cv > 1:
                x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data_cv(
                    fold)
            else:
                x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data(
                )

            y_shuf = np.random.permutation(y_val)
            log_evaluation(evaluate_prediction(y_val, y_shuf),
                           description='Between random pairs in y_val:')
            history = model.fit(x_train_list,
                                y_train,
                                batch_size=args.batch_size,
                                shuffle=args.shuffle,
                                epochs=args.epochs,
                                callbacks=callbacks,
                                validation_data=(x_val_list, y_val))

        if args.cp:
            model.load_weights(prefix + cv_ext + '.weights.h5')

        if not args.gen:
            y_val_pred = model.predict(x_val_list,
                                       batch_size=args.batch_size).flatten()
            scores = evaluate_prediction(y_val, y_val_pred)
            if args.cv > 1 and scores[args.loss] > args.max_val_loss:
                logger.warn(
                    'Best val_loss {} is greater than {}; retrain the model...'
                    .format(scores[args.loss], args.max_val_loss))
                continue
            else:
                fold += 1
            log_evaluation(scores)
            df_val.is_copy = False
            df_val['GROWTH_PRED'] = y_val_pred
            df_val['GROWTH_ERROR'] = y_val_pred - y_val
            df_pred_list.append(df_val)

        if args.cp:
            # model.save(prefix+'.model.h5')
            model_recorder.best_model.save(prefix + '.model.h5')

            # test reloadded model prediction
            new_model = keras.models.load_model(prefix + '.model.h5')
            new_model.load_weights(prefix + cv_ext + '.weights.h5')
            new_pred = new_model.predict(x_val_list,
                                         batch_size=args.batch_size).flatten()
            # print('y_val:', y_val[:10])
            # print('old_pred:', y_val_pred[:10])
            # print('new_pred:', new_pred[:10])

        plot_history(prefix, history, 'loss')
        plot_history(prefix, history, 'r2')

        if K.backend() == 'tensorflow':
            K.clear_session()

    pred_fname = prefix + '.predicted.growth.tsv'
    if args.use_combo_score:
        pred_fname = prefix + '.predicted.score.tsv'
    df_pred = pd.concat(df_pred_list)
    df_pred.to_csv(pred_fname, sep='\t', index=False, float_format='%.4g')

    logger.handlers = []

    return history
示例#6
0
def run(gParameters):

    print('Params:', gParameters)

    file_train = gParameters['train_data']
    file_test = gParameters['test_data']
    url = gParameters['data_url']

    train_file = data_utils.get_file(file_train,
                                     url + file_train,
                                     cache_subdir='Pilot1')
    test_file = data_utils.get_file(file_test,
                                    url + file_test,
                                    cache_subdir='Pilot1')

    X_train, Y_train, X_test, Y_test = load_data(train_file, test_file,
                                                 gParameters)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    print('Y_train shape:', Y_train.shape)
    print('Y_test shape:', Y_test.shape)

    x_train_len = X_train.shape[1]

    # this reshaping is critical for the Conv1D to work

    X_train = np.expand_dims(X_train, axis=2)
    X_test = np.expand_dims(X_test, axis=2)

    print('X_train shape:', X_train.shape)
    print('X_test shape:', X_test.shape)

    model = Sequential()

    layer_list = list(range(0, len(gParameters['conv']), 3))
    for l, i in enumerate(layer_list):
        filters = gParameters['conv'][i]
        filter_len = gParameters['conv'][i + 1]
        stride = gParameters['conv'][i + 2]
        print(int(i / 3), filters, filter_len, stride)
        if gParameters['pool']:
            pool_list = gParameters['pool']
            if type(pool_list) != list:
                pool_list = list(pool_list)

        if filters <= 0 or filter_len <= 0 or stride <= 0:
            break
        if 'locally_connected' in gParameters:
            model.add(
                LocallyConnected1D(filters,
                                   filter_len,
                                   strides=stride,
                                   padding='valid',
                                   input_shape=(x_train_len, 1)))
        else:
            #input layer
            if i == 0:
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=filter_len,
                           strides=stride,
                           padding='valid',
                           input_shape=(x_train_len, 1)))
            else:
                model.add(
                    Conv1D(filters=filters,
                           kernel_size=filter_len,
                           strides=stride,
                           padding='valid'))
        model.add(Activation(gParameters['activation']))
        if gParameters['pool']:
            model.add(MaxPooling1D(pool_size=pool_list[int(i / 3)]))

    model.add(Flatten())

    for layer in gParameters['dense']:
        if layer:
            model.add(Dense(layer))
            model.add(Activation(gParameters['activation']))
            if gParameters['drop']:
                model.add(Dropout(gParameters['drop']))
    model.add(Dense(gParameters['classes']))
    model.add(Activation(gParameters['out_act']))

    #Reference case
    #model.add(Conv1D(filters=128, kernel_size=20, strides=1, padding='valid', input_shape=(P, 1)))
    #model.add(Activation('relu'))
    #model.add(MaxPooling1D(pool_size=1))
    #model.add(Conv1D(filters=128, kernel_size=10, strides=1, padding='valid'))
    #model.add(Activation('relu'))
    #model.add(MaxPooling1D(pool_size=10))
    #model.add(Flatten())
    #model.add(Dense(200))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.1))
    #model.add(Dense(20))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.1))
    #model.add(Dense(CLASSES))
    #model.add(Activation('softmax'))

    kerasDefaults = p1_common.keras_default_config()

    # Define optimizer
    optimizer = p1_common_keras.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    model.summary()
    model.compile(loss=gParameters['loss'],
                  optimizer=optimizer,
                  metrics=[gParameters['metrics']])

    output_dir = gParameters['save']

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # calculate trainable and non-trainable params
    gParameters.update(compute_trainable_params(model))

    # set up a bunch of callbacks to do work during model training..
    model_name = gParameters['model_name']
    path = '{}/{}.autosave.model.h5'.format(output_dir, model_name)
    # checkpointer = ModelCheckpoint(filepath=path, verbose=1, save_weights_only=False, save_best_only=True)
    csv_logger = CSVLogger('{}/training.log'.format(output_dir))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=10,
                                  verbose=1,
                                  mode='auto',
                                  epsilon=0.0001,
                                  cooldown=0,
                                  min_lr=0)
    candleRemoteMonitor = CandleRemoteMonitor(params=gParameters)
    timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
    history = model.fit(
        X_train,
        Y_train,
        batch_size=gParameters['batch_size'],
        epochs=gParameters['epochs'],
        verbose=1,
        validation_data=(X_test, Y_test),
        callbacks=[csv_logger, reduce_lr, candleRemoteMonitor, timeoutMonitor])

    score = model.evaluate(X_test, Y_test, verbose=0)

    if False:
        print('Test score:', score[0])
        print('Test accuracy:', score[1])
        # serialize model to JSON
        model_json = model.to_json()
        with open("{}/{}.model.json".format(output_dir, model_name),
                  "w") as json_file:
            json_file.write(model_json)

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("{}/{}.model.yaml".format(output_dir, model_name),
                  "w") as yaml_file:
            yaml_file.write(model_yaml)

        # serialize weights to HDF5
        model.save_weights("{}/{}.weights.h5".format(output_dir, model_name))
        print("Saved model to disk")

        # load json and create model
        json_file = open('{}/{}.model.json'.format(output_dir, model_name),
                         'r')
        loaded_model_json = json_file.read()
        json_file.close()
        loaded_model_json = model_from_json(loaded_model_json)

        # load yaml and create model
        yaml_file = open('{}/{}.model.yaml'.format(output_dir, model_name),
                         'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        loaded_model_yaml = model_from_yaml(loaded_model_yaml)

        # load weights into new model
        loaded_model_json.load_weights('{}/{}.weights.h5'.format(
            output_dir, model_name))
        print("Loaded json model from disk")

        # evaluate json loaded model on test data
        loaded_model_json.compile(loss=gParameters['loss'],
                                  optimizer=gParameters['optimizer'],
                                  metrics=[gParameters['metrics']])
        score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)

        print('json Test score:', score_json[0])
        print('json Test accuracy:', score_json[1])

        print("json %s: %.2f%%" %
              (loaded_model_json.metrics_names[1], score_json[1] * 100))

        # load weights into new model
        loaded_model_yaml.load_weights('{}/{}.weights.h5'.format(
            output_dir, model_name))
        print("Loaded yaml model from disk")

        # evaluate loaded model on test data
        loaded_model_yaml.compile(loss=gParameters['loss'],
                                  optimizer=gParameters['optimizer'],
                                  metrics=[gParameters['metrics']])
        score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)

        print('yaml Test score:', score_yaml[0])
        print('yaml Test accuracy:', score_yaml[1])

        print("yaml %s: %.2f%%" %
              (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100))

    return history
def run(params):
    # Construct extension to save model
    ext = p1b1.extension_from_parameters(params, '.keras')
    prefix = '{}{}'.format(params['save'], ext)
    logfile = params['logfile'] if params['logfile'] else prefix + '.log'

    verify_path(logfile)
    logger = set_up_logger(logfile, params['verbose'])

    logger.info('Params: {}'.format(params))

    # Get default parameters for initialization and optimizer functions
    keras_defaults = p1_common.keras_default_config()
    seed = params['rng_seed']
    set_seed(seed)

    # Load dataset
    x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels = p1b1.load_data(
        params, seed)

    start = time.time()
    # cache_file = 'data_l1000_cache.h5'
    # save_cache(cache_file, x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels)
    # x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels = load_cache(cache_file)

    logger.info("Shape x_train: {}".format(x_train.shape))
    logger.info("Shape x_val:   {}".format(x_val.shape))
    logger.info("Shape x_test:  {}".format(x_test.shape))

    logger.info("Range x_train: [{:.3g}, {:.3g}]".format(
        np.min(x_train), np.max(x_train)))
    logger.info("Range x_val:   [{:.3g}, {:.3g}]".format(
        np.min(x_val), np.max(x_val)))
    logger.info("Range x_test:  [{:.3g}, {:.3g}]".format(
        np.min(x_test), np.max(x_test)))

    logger.debug('Class labels')
    for i, label in enumerate(y_labels):
        logger.debug('  {}: {}'.format(i, label))

    # clf = build_type_classifier(x_train, y_train, x_val, y_val)

    n_classes = len(y_labels)
    cond_train = y_train
    cond_val = y_val
    cond_test = y_test

    input_dim = x_train.shape[1]
    cond_dim = cond_train.shape[1]
    latent_dim = params['latent_dim']

    activation = params['activation']
    dropout = params['drop']
    dense_layers = params['dense']
    dropout_layer = keras.layers.noise.AlphaDropout if params[
        'alpha_dropout'] else Dropout

    # Initialize weights and learning rule
    initializer_weights = p1_common_keras.build_initializer(
        params['initialization'], keras_defaults, seed)
    initializer_bias = p1_common_keras.build_initializer(
        'constant', keras_defaults, 0.)

    if dense_layers is not None:
        if type(dense_layers) != list:
            dense_layers = list(dense_layers)
    else:
        dense_layers = []

    # Encoder Part
    x_input = Input(shape=(input_dim, ))
    cond_input = Input(shape=(cond_dim, ))
    h = x_input
    if params['model'] == 'cvae':
        h = keras.layers.concatenate([x_input, cond_input])

    for i, layer in enumerate(dense_layers):
        if layer > 0:
            x = h
            h = Dense(layer,
                      activation=activation,
                      kernel_initializer=initializer_weights,
                      bias_initializer=initializer_bias)(h)
            if params['residual']:
                try:
                    h = keras.layers.add([h, x])
                except ValueError:
                    pass
            if params['batch_normalization']:
                h = BatchNormalization()(h)
            if dropout > 0:
                h = dropout_layer(dropout)(h)

    if params['model'] == 'ae':
        encoded = Dense(latent_dim,
                        activation=activation,
                        kernel_initializer=initializer_weights,
                        bias_initializer=initializer_bias)(h)
    else:
        epsilon_std = params['epsilon_std']
        z_mean = Dense(latent_dim, name='z_mean')(h)
        z_log_var = Dense(latent_dim, name='z_log_var')(h)
        encoded = z_mean

        def vae_loss(x, x_decoded_mean):
            xent_loss = binary_crossentropy(x, x_decoded_mean)
            kl_loss = -0.5 * K.sum(
                1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
            return K.mean(xent_loss + kl_loss / input_dim)

        def sampling(params):
            z_mean_, z_log_var_ = params
            batch_size = K.shape(z_mean_)[0]
            epsilon = K.random_normal(shape=(batch_size, latent_dim),
                                      mean=0.,
                                      stddev=epsilon_std)
            return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

        z = Lambda(sampling, output_shape=(latent_dim, ))([z_mean, z_log_var])
        if params['model'] == 'cvae':
            z_cond = keras.layers.concatenate([z, cond_input])

    # Decoder Part
    decoder_input = Input(shape=(latent_dim, ))
    h = decoder_input
    if params['model'] == 'cvae':
        h = keras.layers.concatenate([decoder_input, cond_input])

    for i, layer in reversed(list(enumerate(dense_layers))):
        if layer > 0:
            x = h
            h = Dense(layer,
                      activation=activation,
                      kernel_initializer=initializer_weights,
                      bias_initializer=initializer_bias)(h)
            if params['residual']:
                try:
                    h = keras.layers.add([h, x])
                except ValueError:
                    pass
            if params['batch_normalization']:
                h = BatchNormalization()(h)
            if dropout > 0:
                h = dropout_layer(dropout)(h)

    decoded = Dense(input_dim,
                    activation='sigmoid',
                    kernel_initializer=initializer_weights,
                    bias_initializer=initializer_bias)(h)

    # Build autoencoder model
    if params['model'] == 'cvae':
        encoder = Model([x_input, cond_input], encoded)
        decoder = Model([decoder_input, cond_input], decoded)
        model = Model([x_input, cond_input], decoder([z, cond_input]))
        loss = vae_loss
        metrics = [xent, corr, mse]
    elif params['model'] == 'vae':
        encoder = Model(x_input, encoded)
        decoder = Model(decoder_input, decoded)
        model = Model(x_input, decoder(z))
        loss = vae_loss
        metrics = [xent, corr, mse]
    else:
        encoder = Model(x_input, encoded)
        decoder = Model(decoder_input, decoded)
        model = Model(x_input, decoder(encoded))
        loss = params['loss']
        metrics = [xent, corr]

    model.summary()
    decoder.summary()

    if params['cp']:
        model_json = model.to_json()
        with open(prefix + '.model.json', 'w') as f:
            print(model_json, file=f)

    # Define optimizer
    # optimizer = p1_common_keras.build_optimizer(params['optimizer'],
    #                                             params['learning_rate'],
    #                                             keras_defaults)
    optimizer = optimizers.deserialize({
        'class_name': params['optimizer'],
        'config': {}
    })
    base_lr = params['base_lr'] or K.get_value(optimizer.lr)
    if params['learning_rate']:
        K.set_value(optimizer.lr, params['learning_rate'])

    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

    # calculate trainable and non-trainable params
    params.update(compute_trainable_params(model))

    def warmup_scheduler(epoch):
        lr = params['learning_rate'] or base_lr * params['batch_size'] / 100
        if epoch <= 5:
            K.set_value(model.optimizer.lr,
                        (base_lr * (5 - epoch) + lr * epoch) / 5)
        logger.debug('Epoch {}: lr={}'.format(epoch,
                                              K.get_value(model.optimizer.lr)))
        return K.get_value(model.optimizer.lr)

    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=5,
                                  min_lr=0.00001)
    warmup_lr = LearningRateScheduler(warmup_scheduler)
    checkpointer = ModelCheckpoint(params['save'] + ext + '.weights.h5',
                                   save_best_only=True,
                                   save_weights_only=True)
    tensorboard = TensorBoard(log_dir="tb/tb{}".format(ext))
    candle_monitor = CandleRemoteMonitor(params=params)
    timeout_monitor = TerminateOnTimeOut(params['timeout'])
    history_logger = LoggingCallback(logger.debug)

    callbacks = [candle_monitor, timeout_monitor, history_logger]
    if params['reduce_lr']:
        callbacks.append(reduce_lr)
    if params['warmup_lr']:
        callbacks.append(warmup_lr)
    if params['cp']:
        callbacks.append(checkpointer)
    if params['tb']:
        callbacks.append(tensorboard)

    x_val2 = np.copy(x_val)
    np.random.shuffle(x_val2)
    start_scores = p1b1.evaluate_autoencoder(x_val, x_val2)
    logger.info('\nBetween random pairs of validation samples: {}'.format(
        start_scores))

    if params['model'] == 'cvae':
        inputs = [x_train, cond_train]
        val_inputs = [x_val, cond_val]
        test_inputs = [x_test, cond_test]
    else:
        inputs = x_train
        val_inputs = x_val
        test_inputs = x_test

    outputs = x_train
    val_outputs = x_val
    test_outputs = x_test

    history = model.fit(inputs,
                        outputs,
                        verbose=2,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        callbacks=callbacks,
                        validation_data=(val_inputs, val_outputs))

    if False and params['cp']:
        encoder.save(prefix + '.encoder.h5')
        decoder.save(prefix + '.decoder.h5')

    if False:
        plot_history(prefix, history, 'loss')
        plot_history(prefix, history, 'corr', 'streaming pearson correlation')

    # Evalute model on test set
    x_pred = model.predict(test_inputs)
    scores = p1b1.evaluate_autoencoder(x_pred, x_test)
    logger.info('\nEvaluation on test data: {}'.format(scores))

    if False:
        x_test_encoded = encoder.predict(test_inputs,
                                         batch_size=params['batch_size'])
        y_test_classes = np.argmax(y_test, axis=1)
        plot_scatter(x_test_encoded, y_test_classes, prefix + '.latent')

    if False and params['tsne']:
        tsne = TSNE(n_components=2, random_state=seed)
        x_test_encoded_tsne = tsne.fit_transform(x_test_encoded)
        plot_scatter(x_test_encoded_tsne, y_test_classes,
                     prefix + '.latent.tsne')

    logger.handlers = []

    elapsed = time.time() - start

    return history, scores, elapsed
示例#8
0
def run(GP):

    # set the seed
    if GP['seed']:
        np.random.seed(7)
    else:
        np.random.seed(np.random.randint(10000))

    # Set paths
    if not os.path.isdir(GP['home_dir']):
        print('Keras home directory not set')
        sys.exit(0)
    sys.path.append(GP['home_dir'])

    import p2b1_mol_AE as hf
    reload(hf)

    import keras_model_utils as KEU
    reload(KEU)
    reload(p2ck)
    reload(p2ck.optimizers)
    maps = hf.autoencoder_preprocess()

    from keras.optimizers import SGD, RMSprop, Adam
    from keras.datasets import mnist
    from keras.callbacks import LearningRateScheduler, ModelCheckpoint
    from keras import callbacks
    from keras.layers.advanced_activations import ELU
    from keras.preprocessing.image import ImageDataGenerator

    #    GP=hf.ReadConfig(opts.config_file)
    batch_size = GP['batch_size']
    learning_rate = GP['learning_rate']
    kerasDefaults = p2c.keras_default_config()

    ##### Read Data ########
    #(data_files, fields)=p2c.get_list_of_data_files(GP)

    # Read from local directoy
    import helper
    (data_files, fields) = helper.get_local_files(
        '/p/gscratchr/brainusr/datasets/cancer/pilot2/3k_run16_10us.35fs-DPPC.20-DIPC.60-CHOL.20.dir/'
    )

    # Define datagenerator
    datagen = hf.ImageNoiseDataGenerator(corruption_level=GP['noise_factor'])

    # get data dimension ##
    num_samples = 0
    for f in data_files:

        # Seperate different arrays from the data
        (X, nbrs, resnums) = helper.get_data_arrays(f)

        num_samples += X.shape[0]

    (X, nbrs, resnums) = helper.get_data_arrays(data_files[0])
    print(X.shape)

    molecular_hidden_layers = GP['molecular_num_hidden']
    if not molecular_hidden_layers:
        X_train = hf.get_data(X, case=GP['case'])
        input_dim = X_train.shape[1]
    else:
        # computing input dimension for outer AE
        input_dim = X.shape[1] * molecular_hidden_layers[-1]

    print('The input dimension to the State AE is ', input_dim)

    # get data dimension for molecular autoencoder
    molecular_nbrs = np.int(GP['molecular_nbrs'])
    if not GP['type_bool']:
        # only consider molecular location coordinates
        dim = np.prod([X.shape[2], X.shape[3] - 5, molecular_nbrs + 1])

        molecular_input_dim = dim
        molecular_output_dim = dim
        bead_kernel_size = X.shape[3] - 5
        mol_kernel_size = 12  # (X.shape[3]-5)*X.shape[2]
    else:
        dim = np.prod(X.shape[2:] + (molecular_nbrs + 1, ))

        molecular_input_dim = dim
        bead_kernel_size = X.shape[3]
        mol_kernel_size = 12  # X.shape[3]*X.shape[2]

    print('The input/output dimension to the Moelecular AE is ',
          molecular_input_dim)

    print(
        'Data Format:\n  [Frames (%s), Molecules (%s), Beads (%s), %s (%s)]' %
        (num_samples, X.shape[1], X.shape[2], fields.keys(), X.shape[3]))

    ### Define Model, Solver and Compile ##########
    print('Define the model and compile')
    opt = p2ck.build_optimizer(GP['optimizer'], learning_rate, kerasDefaults)
    model_type = 'mlp'
    memo = '%s_%s' % (GP['base_memo'], model_type)

    ######## Define Molecular Model, Solver and Compile #########
    molecular_nonlinearity = GP['molecular_nonlinearity']

    len_molecular_hidden_layers = len(molecular_hidden_layers)
    conv_bool = GP['conv_bool']
    if conv_bool:
        print('Molecular kernel size: ', mol_kernel_size)
        molecular_model, molecular_encoder = hf.conv_dense_mol_auto(
            bead_k_size=bead_kernel_size,
            mol_k_size=mol_kernel_size,
            weights_path=None,
            input_shape=(1, molecular_input_dim, 1),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['weight_decay'],
            drop=GP['drop_prob'])
    else:
        molecular_model = hf.dense_auto(weights_path=None,
                                        input_shape=(molecular_input_dim, ),
                                        nonlinearity=molecular_nonlinearity,
                                        hidden_layers=molecular_hidden_layers,
                                        l2_reg=GP['weight_decay'])

    molecular_model.compile(
        optimizer=opt,
        loss=helper.combined_loss,
        metrics=['mean_squared_error', 'mean_absolute_error'])
    molecular_model.summary()
    ##### set up callbacks and cooling for the molecular_model ##########
    drop = 0.5
    mb_epochs = GP['molecular_epochs']
    initial_lrate = GP['learning_rate']
    epochs_drop = 1 + int(np.floor(mb_epochs / 3))

    def step_decay(epoch):
        global initial_lrate, epochs_drop, drop
        lrate = initial_lrate * np.power(drop,
                                         np.floor((1 + epoch) / epochs_drop))
        return lrate

    lr_scheduler = LearningRateScheduler(step_decay)
    history = callbacks.History()
    # callbacks=[history,lr_scheduler]

    candleRemoteMonitor = CandleRemoteMonitor(params=GP)
    timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
    callbacks = [history, candleRemoteMonitor, timeoutMonitor]
    loss = 0.

    #### Save the Model to disk
    if GP['save_path'] != None:
        if not os.path.exists(GP['save_path']):
            os.makedirs(GP['save_path'])

        model_json = molecular_model.to_json()
        with open(GP['save_path'] + '/model.json', "w") as json_file:
            json_file.write(model_json)
        print('Saved model to disk')


#### Train the Model
    if GP['train_bool']:
        if not str2bool(GP['cool']):
            effec_epochs = GP['epochs']
            ct = hf.Candle_Molecular_Train(
                molecular_model,
                molecular_encoder,
                data_files,
                mb_epochs,
                callbacks,
                batch_size=32,
                case=GP['case'],
                save_path=GP['save_path'],
                len_molecular_hidden_layers=len_molecular_hidden_layers,
                molecular_nbrs=molecular_nbrs,
                conv_bool=conv_bool,
                type_bool=GP['type_bool'])
            #            ct=hf.Candle_Train(datagen,model,data_files,effec_epochs,case=GP['case'])
            frame_loss, frame_mse = ct.train_ac()
        else:
            effec_epochs = GP['epochs'] // 3
            ct = hf.Candle_Train(datagen,
                                 model,
                                 data_files,
                                 effec_epochs,
                                 case=GP['case'])
            loss = []
            for i in range(3):
                lr = GP['learning_rate'] / 10**i
                ct.model.optimizer.lr.set_value(lr)
                if i > 0:
                    ct.print_data = False
                    print('Cooling Learning Rate by factor of 10...')
                loss.extend(ct.train_ac())

    return frame_loss, frame_mse
def run(gParameters):
    """
    Runs the model using the specified set of parameters

    Args:
       gParameters: a python dictionary containing the parameters (e.g. epoch)
       to run the model with.
    """
    #
    if 'dense' in gParameters:
        dval = gParameters['dense']
        if type(dval) != list:
            res = list(dval)
            #try:
            #is_str = isinstance(dval, basestring)
            #except NameError:
            #is_str = isinstance(dval, str)
            #if is_str:
            #res = str2lst(dval)
            gParameters['dense'] = res
        print(gParameters['dense'])

    if 'conv' in gParameters:
        #conv_list = p1_common.parse_conv_list(gParameters['conv'])
        #cval = gParameters['conv']
        #try:
        #is_str = isinstance(cval, basestring)
        #except NameError:
        #is_str = isinstance(cval, str)
        #if is_str:
        #res = str2lst(cval)
        #gParameters['conv'] = res
        print('Conv input', gParameters['conv'])
    # print('Params:', gParameters)
    # Construct extension to save model
    ext = p1b3.extension_from_parameters(gParameters, '.keras')
    logfile = gParameters['logfile'] if gParameters[
        'logfile'] else gParameters['save'] + ext + '.log'

    fh = logging.FileHandler(logfile)
    fh.setFormatter(
        logging.Formatter("[%(asctime)s %(process)d] %(message)s",
                          datefmt="%Y-%m-%d %H:%M:%S"))
    fh.setLevel(logging.DEBUG)

    sh = logging.StreamHandler()
    sh.setFormatter(logging.Formatter(''))
    sh.setLevel(logging.DEBUG if gParameters['verbose'] else logging.INFO)

    p1b3.logger.setLevel(logging.DEBUG)
    p1b3.logger.addHandler(fh)
    p1b3.logger.addHandler(sh)
    p1b3.logger.info('Params: {}'.format(gParameters))

    # Get default parameters for initialization and optimizer functions
    kerasDefaults = p1_common.keras_default_config()
    seed = gParameters['rng_seed']

    # Build dataset loader object
    loader = p1b3.DataLoader(
        seed=seed,
        dtype=gParameters['datatype'],
        val_split=gParameters['validation_split'],
        test_cell_split=gParameters['test_cell_split'],
        cell_features=gParameters['cell_features'],
        drug_features=gParameters['drug_features'],
        feature_subsample=gParameters['feature_subsample'],
        scaling=gParameters['scaling'],
        scramble=gParameters['scramble'],
        min_logconc=gParameters['min_logconc'],
        max_logconc=gParameters['max_logconc'],
        subsample=gParameters['subsample'],
        category_cutoffs=gParameters['category_cutoffs'])

    # Initialize weights and learning rule
    initializer_weights = p1_common_keras.build_initializer(
        gParameters['initialization'], kerasDefaults, seed)
    initializer_bias = p1_common_keras.build_initializer(
        'constant', kerasDefaults, 0.)

    activation = gParameters['activation']

    # Define model architecture
    gen_shape = None
    out_dim = 1

    model = Sequential()
    if 'dense' in gParameters:  # Build dense layers
        for layer in gParameters['dense']:
            if layer:
                model.add(
                    Dense(layer,
                          input_dim=loader.input_dim,
                          kernel_initializer=initializer_weights,
                          bias_initializer=initializer_bias))
                if gParameters['batch_normalization']:
                    model.add(BatchNormalization())
                model.add(Activation(gParameters['activation']))
                if gParameters['drop']:
                    model.add(Dropout(gParameters['drop']))
    else:  # Build convolutional layers
        gen_shape = 'add_1d'
        layer_list = list(range(0, len(gParameters['conv'])))
        lc_flag = False
        if 'locally_connected' in gParameters:
            lc_flag = True

        for l, i in enumerate(layer_list):
            if i == 0:
                add_conv_layer(model,
                               gParameters['conv'][i],
                               input_dim=loader.input_dim,
                               locally_connected=lc_flag)
            else:
                add_conv_layer(model,
                               gParameters['conv'][i],
                               locally_connected=lc_flag)
            if gParameters['batch_normalization']:
                model.add(BatchNormalization())
            model.add(Activation(gParameters['activation']))
            if gParameters['pool']:
                model.add(MaxPooling1D(pool_size=gParameters['pool']))
        model.add(Flatten())

    model.add(Dense(out_dim))

    # Define optimizer
    optimizer = p1_common_keras.build_optimizer(gParameters['optimizer'],
                                                gParameters['learning_rate'],
                                                kerasDefaults)

    # Compile and display model
    model.compile(loss=gParameters['loss'], optimizer=optimizer)
    model.summary()
    p1b3.logger.debug('Model: {}'.format(model.to_json()))

    train_gen = p1b3.DataGenerator(
        loader,
        batch_size=gParameters['batch_size'],
        shape=gen_shape,
        name='train_gen',
        cell_noise_sigma=gParameters['cell_noise_sigma']).flow()
    val_gen = p1b3.DataGenerator(loader,
                                 partition='val',
                                 batch_size=gParameters['batch_size'],
                                 shape=gen_shape,
                                 name='val_gen').flow()
    val_gen2 = p1b3.DataGenerator(loader,
                                  partition='val',
                                  batch_size=gParameters['batch_size'],
                                  shape=gen_shape,
                                  name='val_gen2').flow()
    test_gen = p1b3.DataGenerator(loader,
                                  partition='test',
                                  batch_size=gParameters['batch_size'],
                                  shape=gen_shape,
                                  name='test_gen').flow()

    train_steps = int(loader.n_train / gParameters['batch_size'])
    val_steps = int(loader.n_val / gParameters['batch_size'])
    test_steps = int(loader.n_test / gParameters['batch_size'])

    if 'train_steps' in gParameters:
        train_steps = gParameters['train_steps']
    if 'val_steps' in gParameters:
        val_steps = gParameters['val_steps']
    if 'test_steps' in gParameters:
        test_steps = gParameters['test_steps']

    checkpointer = ModelCheckpoint(filepath=gParameters['save'] + '.model' +
                                   ext + '.h5',
                                   save_best_only=True)
    progbar = MyProgbarLogger(train_steps * gParameters['batch_size'])
    loss_history = MyLossHistory(
        progbar=progbar,
        val_gen=val_gen2,
        test_gen=test_gen,
        val_steps=val_steps,
        test_steps=test_steps,
        metric=gParameters['loss'],
        category_cutoffs=gParameters['category_cutoffs'],
        ext=ext,
        pre=gParameters['save'])

    # Seed random generator for training
    np.random.seed(seed)

    candleRemoteMonitor = CandleRemoteMonitor(params=gParameters)

    history = model.fit_generator(
        train_gen,
        train_steps,
        epochs=gParameters['epochs'],
        validation_data=val_gen,
        validation_steps=val_steps,
        verbose=0,
        callbacks=[checkpointer, loss_history, progbar, candleRemoteMonitor],
        pickle_safe=True,
        workers=gParameters['workers'])

    p1b3.logger.removeHandler(fh)
    p1b3.logger.removeHandler(sh)

    return history
示例#10
0
def run(GP):

    # set the seed
    if GP['seed']:
        np.random.seed(7)
    else:
        np.random.seed(np.random.randint(10000))

    # Set paths
    if not os.path.isdir(GP['home_dir']):
        print('Keras home directory not set')
        sys.exit(0)
    sys.path.append(GP['home_dir'])

    import p2b1 as hf
    reload(hf)

    import keras_model_utils as KEU
    reload(KEU)
    reload(p2ck)
    reload(p2ck.optimizers)
    maps = hf.autoencoder_preprocess()

    from keras.optimizers import SGD, RMSprop, Adam
    from keras.datasets import mnist
    from keras.callbacks import LearningRateScheduler, ModelCheckpoint
    from keras import callbacks
    from keras.layers.advanced_activations import ELU
    from keras.preprocessing.image import ImageDataGenerator

    #    GP=hf.ReadConfig(opts.config_file)
    batch_size = GP['batch_size']
    learning_rate = GP['learning_rate']
    kerasDefaults = p2c.keras_default_config()

    ##### Read Data ########
    import helper
    (data_files, fields) = p2c.get_list_of_data_files(GP)
    # Read from local directoy
    #(data_files, fields) = helper.get_local_files('/p/gscratchr/brainusr/datasets/cancer/pilot2/3k_run16_10us.35fs-DPPC.20-DIPC.60-CHOL.20.dir/')
    #(data_files, fields) = helper.get_local_files('3k_run16', '/p/lscratchf/brainusr/datasets/cancer/pilot2/')

    # Define datagenerator
    datagen = hf.ImageNoiseDataGenerator(corruption_level=GP['noise_factor'])

    # get data dimension ##
    num_samples = 0
    for f in data_files:

        # Seperate different arrays from the data
        (X, nbrs, resnums) = helper.get_data_arrays(f)

        num_samples += X.shape[0]

    (X, nbrs, resnums) = helper.get_data_arrays(data_files[0])
    print('\nData chunk shape: ', X.shape)

    molecular_hidden_layers = GP['molecular_num_hidden']
    if not molecular_hidden_layers:
        X_train = hf.get_data(X, case=GP['case'])
        input_dim = X_train.shape[1]
    else:
        # computing input dimension for outer AE
        input_dim = X.shape[1] * molecular_hidden_layers[-1]

    print('\nState AE input/output dimension: ', input_dim)

    # get data dimension for molecular autoencoder
    molecular_nbrs = np.int(GP['molecular_nbrs'])
    num_molecules = X.shape[1]
    num_beads = X.shape[2]

    if GP['nbr_type'] == 'relative':
        # relative x, y, z positions
        num_loc_features = 3
        loc_feat_vect = ['rel_x', 'rel_y', 'rel_z']
    elif GP['nbr_type'] == 'invariant':
        # relative distance and angle
        num_loc_features = 2
        loc_feat_vect = ['rel_dist', 'rel_angle']
    else:
        print('Invalid nbr_type!!')
        exit()

    if not GP['type_bool']:
        # only consider molecular location coordinates
        num_type_features = 0
        type_feat_vect = []
    else:
        num_type_features = 5
        type_feat_vect = fields.keys()[3:8]

    num_features = num_loc_features + num_type_features + num_beads
    dim = np.prod([num_beads, num_features, molecular_nbrs + 1])
    bead_kernel_size = num_features
    molecular_input_dim = dim
    mol_kernel_size = num_beads

    feature_vector = loc_feat_vect + type_feat_vect + list(fields.keys())[8:]

    print('\nMolecular AE input/output dimension: ', molecular_input_dim)

    print(
        '\nData Format:\n[Frames (%s), Molecules (%s), Beads (%s), %s (%s)]' %
        (num_samples, num_molecules, num_beads, feature_vector, num_features))

    ### Define Model, Solver and Compile ##########
    print('\nDefine the model and compile')
    opt = p2ck.build_optimizer(GP['optimizer'], learning_rate, kerasDefaults)
    model_type = 'mlp'
    memo = '%s_%s' % (GP['base_memo'], model_type)

    ######## Define Molecular Model, Solver and Compile #########
    molecular_nonlinearity = GP['molecular_nonlinearity']

    len_molecular_hidden_layers = len(molecular_hidden_layers)
    conv_bool = GP['conv_bool']
    full_conv_bool = GP['full_conv_bool']
    if conv_bool:
        molecular_model, molecular_encoder = AE_models.conv_dense_mol_auto(
            bead_k_size=bead_kernel_size,
            mol_k_size=mol_kernel_size,
            weights_path=None,
            input_shape=(1, molecular_input_dim, 1),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))
    elif full_conv_bool:
        molecular_model, molecular_encoder = AE_models.full_conv_mol_auto(
            bead_k_size=bead_kernel_size,
            mol_k_size=mol_kernel_size,
            weights_path=None,
            input_shape=(1, molecular_input_dim, 1),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))

    else:
        molecular_model, molecular_encoder = AE_models.dense_auto(
            weights_path=None,
            input_shape=(molecular_input_dim, ),
            nonlinearity=molecular_nonlinearity,
            hidden_layers=molecular_hidden_layers,
            l2_reg=GP['l2_reg'],
            drop=float(GP['drop_prob']))

    if GP['loss'] == 'mse':
        loss_func = 'mse'
    elif GP['loss'] == 'custom':
        loss_func = helper.combined_loss

    molecular_model.compile(
        optimizer=opt,
        loss=loss_func,
        metrics=['mean_squared_error', 'mean_absolute_error'])
    print('\nModel Summary: \n')
    molecular_model.summary()
    ##### set up callbacks and cooling for the molecular_model ##########
    drop = 0.5
    mb_epochs = GP['epochs']
    initial_lrate = GP['learning_rate']
    epochs_drop = 1 + int(np.floor(mb_epochs / 3))

    def step_decay(epoch):
        global initial_lrate, epochs_drop, drop
        lrate = initial_lrate * np.power(drop,
                                         np.floor((1 + epoch) / epochs_drop))
        return lrate

    lr_scheduler = LearningRateScheduler(step_decay)
    history = callbacks.History()
    # callbacks=[history,lr_scheduler]

    candleRemoteMonitor = CandleRemoteMonitor(params=GP)
    timeoutMonitor = TerminateOnTimeOut(TIMEOUT)
    callbacks = [history, candleRemoteMonitor, timeoutMonitor]
    loss = 0.

    #### Save the Model to disk
    if GP['save_path'] != None:
        save_path = GP['save_path']
        if not os.path.exists(save_path):
            os.makedirs(save_path)
    else:
        save_path = '.'

    model_json = molecular_model.to_json()
    with open(save_path + '/model.json', "w") as json_file:
        json_file.write(model_json)

    encoder_json = molecular_encoder.to_json()
    with open(save_path + '/encoder.json', "w") as json_file:
        json_file.write(encoder_json)

    print('Saved model to disk')

    #### Train the Model
    if GP['train_bool']:
        ct = hf.Candle_Molecular_Train(
            molecular_model,
            molecular_encoder,
            data_files,
            mb_epochs,
            callbacks,
            batch_size=32,
            nbr_type=GP['nbr_type'],
            save_path=GP['save_path'],
            len_molecular_hidden_layers=len_molecular_hidden_layers,
            molecular_nbrs=molecular_nbrs,
            conv_bool=conv_bool,
            full_conv_bool=full_conv_bool,
            type_bool=GP['type_bool'],
            sampling_density=GP['sampling_density'])
        frame_loss, frame_mse = ct.train_ac()
    else:
        frame_mse = []
        frame_loss = []

    return frame_loss, frame_mse