from tensorflow import keras
import helper
from tfomics import utils, explain, metrics

from model_zoo import cnn_deep
#------------------------------------------------------------------------------------------------

activations = ['relu', 'exp']
num_trials = 10
model_name = 'cnn-deep'
initializations = [
    'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform',
    'lecun_normal', 'lecun_uniform'
]
# save path
results_path = utils.make_directory('../../results', 'initialization')
params_path = utils.make_directory(results_path, 'model_params')
save_path = utils.make_directory(results_path, 'conv_filters')

#------------------------------------------------------------------------------------------------

# load dataset
data_path = '../../data/synthetic_dataset.h5'
data = helper.load_data(data_path)
x_train, y_train, x_valid, y_valid, x_test, y_test = data

file_path = os.path.join(results_path, 'performance_initializations.tsv')
with open(file_path, 'w') as f:
    f.write('%s\t%s\t%s\n' % ('model', 'ave roc', 'ave pr'))

    for initialization in initializations:
motifs = [[''], arid3, cebpb, fosl1, gabpa, mafk, max1, mef2a, nfyb, sp1, srf,
          stat1, yy1]
motifnames = [
    '', 'arid3', 'cebpb', 'fosl1', 'gabpa', 'mafk', 'max', 'mef2a', 'nfyb',
    'sp1', 'srf', 'stat1', 'yy1'
]

#----------------------------------------------------------------------------------------------------

thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]

num_trials = 10
model_name = 'cnn-deep'
activations = ['relu', 'exponential']
results_path = utils.make_directory('../../results', 'task1')
size = 32

# save results to file
with open(os.path.join(results_path, 'threshold_sweep_filter_results.tsv'),
          'w') as f:
    f.write('%s\t%s\t%s\n' % ('model', 'match JASPAR', 'match ground truth'))

    results = {}
    for activation in activations:
        results[activation] = {}
        for thresh in thresholds:
            trial_match_any = []
            trial_qvalue = []
            trial_match_fraction = []
            trial_coverage = []
import os
import numpy as np
from six.moves import cPickle
from tensorflow import keras
from tensorflow import keras
import helper
from tfomics import utils, metrics

#------------------------------------------------------------------------

num_trials = 10
model_names = ['cnn-dist', 'cnn-local']
activations = ['relu', 'exponential', 'sigmoid', 'tanh', 'softplus', 'linear', 'elu',
               'shift_scale_relu', 'shift_scale_tanh', 'shift_scale_sigmoid', 'exp_relu']

results_path = utils.make_directory('../results', 'task3')
params_path = utils.make_directory(results_path, 'model_params')

#------------------------------------------------------------------------

# load data
data_path = '../data/synthetic_code_dataset.h5'
data = helper.load_data(data_path)
x_train, y_train, x_valid, y_valid, x_test, y_test = data

#------------------------------------------------------------------------

with open(os.path.join(results_path, 'task3_classification_performance.tsv'), 'w') as f:
    f.write('%s\t%s\t%s\n'%('model', 'ave roc', 'ave pr'))

    results = {}
예제 #4
0
from six.moves import cPickle
import matplotlib.pyplot as plt
from tensorflow import keras
import helper
from tfomics import utils, explain, metrics

#------------------------------------------------------------------------------------------------

num_trials = 10
model_names = ['cnn-deep', 'cnn-2', 'cnn-50']
activations = ['relu', 'exponential', 'sigmoid', 'tanh', 'softplus', 'linear', 'elu',
               'shift_scale_relu', 'shift_scale_tanh', 'shift_scale_sigmoid', 'exp_relu', 
               'shift_relu', 'scale_relu', 'shift_tanh', 'scale_tanh', 'shift_sigmoid', 'scale_sigmoid']

# save path
results_path = utils.make_directory('../results', 'task1')
params_path = utils.make_directory(results_path, 'model_params')
save_path = utils.make_directory(results_path, 'conv_filters')

#------------------------------------------------------------------------------------------------

# load dataset
data_path = '../data/synthetic_dataset.h5'
data = helper.load_data(data_path)
x_train, y_train, x_valid, y_valid, x_test, y_test = data

# save results to file
file_path = os.path.join(results_path, 'task1_classification_performance.tsv')
with open(file_path, 'w') as f:
    f.write('%s\t%s\t%s\n'%('model', 'ave roc', 'ave pr'))
import numpy as np
from six.moves import cPickle
from tensorflow import keras
import helper
from tfomics import utils, explain

#------------------------------------------------------------------------

num_trials = 10
model_names = ['cnn-dist', 'cnn-local']
activations = ['relu', 'exponential', 'sigmoid', 'tanh', 'softplus', 'linear', 'elu']


results_path = os.path.join('../results', 'task3')
params_path = os.path.join(results_path, 'model_params')
save_path = utils.make_directory(results_path, 'scores')

#------------------------------------------------------------------------

# load data
data_path = '../data/synthetic_code_dataset.h5'
data = helper.load_data(data_path)
x_train, y_train, x_valid, y_valid, x_test, y_test = data

# load ground truth values
test_model = helper.load_synthetic_models(data_path, dataset='test')
true_index = np.where(y_test[:,0] == 1)[0]
X = x_test[true_index][:500]
X_model = test_model[true_index][:500]

#------------------------------------------------------------------------
from tensorflow import keras
import helper
from tfomics import utils, explain, metrics
from six.moves import cPickle
from tensorflow.keras import backend as K
#from model_zoo import cnn_deep_nobn
from model_zoo import cnn_deep

#------------------------------------------------------------------------------------------------

num_trials = 1
model_name = 'cnn-deep'
activations = ['exponential', 'relu']

# save path
results_path = utils.make_directory('../../results', 'gradient_analysis')
params_path = utils.make_directory(results_path, 'model_params')
save_path = utils.make_directory(results_path, 'conv_filters')

#------------------------------------------------------------------------------------------------


# Define the Gradient Function
def get_gradient_func(model):
    grads = K.gradients(model.total_loss, model.layers[1].kernel)
    inputs = model._feed_inputs + model._feed_targets
    func = K.function(inputs, grads)
    return func


# Define the Required Callback Function

Gmeb1 = ['MA0615.1']

motifs = [[''],arid3, cebpb, fosl1, gabpa, mafk, max1, mef2a, nfyb, sp1, srf, stat1, yy1]
motifnames = [ '','arid3', 'cebpb', 'fosl1', 'gabpa', 'mafk', 'max', 'mef2a', 'nfyb', 'sp1', 'srf', 'stat1', 'yy1']

#----------------------------------------------------------------------------------------------------



num_trials = 10
model_name = 'cnn-deep'
activations = ['relu', 'exp']
sigmas = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1.0, 2.0, 3, 4, 5]
results_path = utils.make_directory('../../results', 'initialization_sweep')
save_path = utils.make_directory(results_path, 'conv_filters')
size = 32

# save results to file
with open(os.path.join(results_path, 'filter_results.tsv'), 'w') as f:
    f.write('%s\t%s\t%s\n'%('model', 'match JASPAR', 'match ground truth'))

    results = {}
    for activation in activations:
        results[activation] = {}
        for sigma in sigmas:
            trial_match_any = []
            trial_qvalue = []
            trial_match_fraction = []
            trial_coverage = []
motifs = [[''], arid3, cebpb, fosl1, gabpa, mafk, max1, mef2a, nfyb, sp1, srf,
          stat1, yy1]
motifnames = [
    '', 'arid3', 'cebpb', 'fosl1', 'gabpa', 'mafk', 'max', 'mef2a', 'nfyb',
    'sp1', 'srf', 'stat1', 'yy1'
]

#----------------------------------------------------------------------------------------------------

num_trials = 10
model_name = 'cnn-deep'
activations = ['relu', 'exponential']

scales = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 2, 3, 4, 5]

results_path = utils.make_directory('../../results', 'exp_scale_sweep')
save_path = utils.make_directory(results_path, 'conv_filters')
size = 32

# save results to file
with open(os.path.join(results_path, 'filter_results.tsv'), 'w') as f:
    f.write('%s\t%s\t%s\n' % ('model', 'match JASPAR', 'match ground truth'))

    results = {}
    for scale in scales:
        trial_match_any = []
        trial_qvalue = []
        trial_match_fraction = []
        trial_coverage = []
        for trial in range(num_trials):
            try: