示例#1
0
def get_concise_model():
    param, X_feat, X_seq, y, id_vec = load_example_data(trim_seq_len=1000)
    dc = concise_model(pooling_layer="sum",
                       init_motifs=["TGCGAT", "TATTTAT"],
                       n_splines=10,
                       n_covariates=0,
                       seq_length=X_seq.shape[1],
                       **param)

    dc.fit([X_seq], y, epochs=1, validation_data=([X_seq], y))

    return {"model": dc, "out_annotation": np.array(["output_1"])}
示例#2
0
def test_serialization_disk(tmpdir):
    param, X_feat, X_seq, y, id_vec = load_example_data()
    dc = concise_model(pooling_layer="sum",
                       init_motifs=["TGCGAT", "TATTTAT"],
                       n_splines=10,
                       n_covariates=X_feat.shape[1],
                       seq_length=X_seq.shape[1],
                       **param)

    dc.fit([X_seq, X_feat], y, epochs=1,
           validation_data=([X_seq, X_feat], y))

    fn = tmpdir.mkdir('data').join('test_keras.h5')

    dc.save(str(fn))
    dc = keras.models.load_model(str(fn))
    assert isinstance(dc, keras.models.Model)
示例#3
0
def get_list_input_model():
    import keras
    from keras.layers.merge import concatenate
    from keras.models import Sequential, Model
    from keras.layers.core import Dense, Dropout, Flatten
    from keras.layers.convolutional import Conv1D, MaxPooling1D

    seq_len = 1000
    param, X_feat, X_seq, y, id_vec = load_example_data(trim_seq_len=seq_len)

    # set hyperparameters
    opt = keras.optimizers.Adam(lr=0.0001,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=1e-08,
                                decay=0.0)
    ls = 'binary_crossentropy'

    nf = 100
    nf_fc = 200
    fl = 8
    drops = 0.1
    pooling_length = 4
    cnninit = 'glorot_normal'
    winit = "glorot_uniform"
    n_output = 1
    ls_metrics = ['accuracy']

    n1 = Sequential()
    n1.add(Dropout(drops, input_shape=(seq_len, 4)))
    n1.add(
        Conv1D(kernel_size=fl,
               kernel_initializer=cnninit,
               activation="relu",
               filters=nf))
    n1.add(MaxPooling1D(pool_size=pooling_length))
    n1.add(Dropout(drops))
    n1.add(Flatten())
    n1.add(Dense(nf_fc, activation='relu', kernel_initializer=winit))

    n2 = Sequential()
    n2.add(Dropout(drops, input_shape=(seq_len, 4)))
    n2.add(
        Conv1D(kernel_size=fl,
               kernel_initializer=cnninit,
               activation="relu",
               filters=nf))
    n2.add(MaxPooling1D(pool_size=pooling_length))
    n2.add(Dropout(drops))
    n2.add(Flatten())
    n2.add(Dense(nf_fc, activation='relu', kernel_initializer=winit))

    merged = concatenate([n1.output, n2.output])
    out = Dense(n_output, activation='sigmoid',
                kernel_initializer=winit)(merged)

    merged_model = Model(inputs=[n1.input, n2.input], outputs=out)
    merged_model.compile(optimizer=opt, loss=ls, metrics=ls_metrics)

    # this is slow - subset
    merged_model.fit([X_seq[:500], X_seq[:500]],
                     y[:500],
                     batch_size=128,
                     epochs=1,
                     validation_data=([X_seq, X_seq], y))
    return {"model": merged_model, "out_annotation": np.array(["output_1"])}
示例#4
0
 def setup_class(cls):
     cls.data = load_example_data(trim_seq_len=1, standardize_features=False)
     cls.data[0]["n_motifs"] = 1
     cls.data[0]["motif_length"] = 1
     cls.data[0]["step_size"] = 0.001
     cls.data[0]["early_stop_patience"] = 3
示例#5
0
 def setup_class(cls):
     cls.data = load_example_data(num_tasks=3)
示例#6
0
 def setup_class(cls):
     cls.data = load_example_data()