コード例 #1
0
def make_phos_homepage_viz():

    from clustergrammer import Network
    net = Network()

    filename = 'lung_cellline_3_1_16/lung_cellline_phospho/' + \
      'lung_cellline_TMT_phospho_combined_ratios.tsv'

    net.load_file(filename)

    # quantile normalize to normalize cell lines
    net.normalize(axis='col', norm_type='qn')

    # only keep most differentially regulated PTMs
    net.filter_N_top('row', 250, 'sum')

    # take zscore of rows
    net.normalize(axis='row', norm_type='zscore', keep_orig=True)

    net.swap_nan_for_zero()

    # threshold filter PTMs
    net.filter_threshold('row', threshold=1.75, num_occur=3)

    views = ['N_row_sum', 'N_row_var']
    net.make_clust(dist_type='cos',
                   views=views,
                   dendro=True,
                   sim_mat=True,
                   calc_cat_pval=True)

    net.write_json_to_file('viz', 'json/homepage_phos.json', 'indent')
コード例 #2
0
ファイル: old_load_gct.py プロジェクト: MaayanLab/LINCS_GCT
def make_viz_from_df(df, filename):
    from clustergrammer import Network

    net = Network()

    net.df_to_dat(df)
    net.swap_nan_for_zero()

    # zscore first to get the columns distributions to be similar
    net.normalize(axis='col', norm_type='zscore', keep_orig=True)

    # filter the rows to keep the perts with the largest normalizes values
    net.filter_N_top('row', 2000)

    num_coluns = net.dat['mat'].shape[1]

    if num_coluns < 50:
        # views = ['N_row_sum', 'N_row_var']
        views = ['N_row_sum']
        net.make_clust(dist_type='cos', views=views)

        filename = 'json/' + filename.split('/')[1].replace('.gct',
                                                            '') + '.json'

        net.write_json_to_file('viz', filename)
コード例 #3
0
def make_enr_vect_clust():
  import enrichr_functions as enr_fun 
  from clustergrammer import Network

  net = Network()

  g2e_post = net.load_json_to_dict('json/g2e_enr_vect.json')

  net = enr_fun.make_enr_vect_clust(g2e_post, 0.001, 1)

  net.write_json_to_file('viz','json/enr_vect_example.json')
コード例 #4
0
def make_viz_json(inst_df, name):
  from clustergrammer import Network
  net = Network()

  filename = 'json/'+name
  load_df = {}
  load_df['mat'] = inst_df
  net.df_to_dat(load_df)
  net.swap_nan_for_zero()
  net.make_clust(views=[])
  net.write_json_to_file('viz', filename, 'no-indent')
コード例 #5
0
def cluster():
  from clustergrammer import Network

  net = Network()

  vect_post = net.load_json_to_dict('fake_vect_post.json')  

  net.load_vect_post_to_net(vect_post)

  net.swap_nan_for_zero()
  
  # net.N_top_views()
  net.make_clust(dist_type='cos',views=['N_row_sum','N_row_var'], dendro=True)

  net.write_json_to_file('viz','json/large_vect_post_example.json','indent')  
コード例 #6
0
def clustergrammer_load():
    # import network class from Network.py
    from clustergrammer import Network

    net = Network()

    net.pandas_load_file('mat_cats.tsv')

    net.make_clust(dist_type='cos', views=['N_row_sum', 'N_row_var'])

    net.write_json_to_file('viz', 'json/mult_cats.json', 'indent')

    print('\n**********************')
    print(net.dat['node_info']['row'].keys())

    print('\n\n')
コード例 #7
0
def clustergrammer_load():
  # import network class from Network.py
  from clustergrammer import Network

  net = Network()

  net.pandas_load_file('mat_cats.tsv')  

  net.make_clust(dist_type='cos',views=['N_row_sum','N_row_var'])

  net.write_json_to_file('viz','json/mult_cats.json','indent')  

  print('\n**********************')
  print(net.dat['node_info']['row'].keys())

  print('\n\n')
コード例 #8
0
def cluster():
    from clustergrammer import Network

    net = Network()

    vect_post = net.load_json_to_dict('fake_vect_post.json')

    net.load_vect_post_to_net(vect_post)

    net.swap_nan_for_zero()

    # net.N_top_views()
    net.make_clust(dist_type='cos',
                   views=['N_row_sum', 'N_row_var'],
                   dendro=True)

    net.write_json_to_file('viz', 'json/large_vect_post_example.json',
                           'indent')
コード例 #9
0
def make_exp_homepage_viz():

    from clustergrammer import Network
    net = Network()

    net.load_file('CCLE_gene_expression/CCLE_NSCLC_all_genes.txt')

    # threshold filter expression
    net.filter_threshold('row', threshold=3.0, num_occur=4)

    views = ['N_row_sum', 'N_row_var']
    net.make_clust(dist_type='cos',
                   views=views,
                   dendro=True,
                   sim_mat=True,
                   calc_cat_pval=False)

    net.write_json_to_file('viz', 'json/homepage_exp.json', 'indent')
コード例 #10
0
def main():

  import time
  start_time = time.time()
  import pandas as pd
  import StringIO

  # import network class from Network.py
  from clustergrammer import Network

  net = Network()

  # load data to dataframe 
  # net.load_tsv_to_net('txt/example_tsv_network.txt')
  # net.load_tsv_to_net('txt/mat_1mb.txt')

  # choose file 
  ################
  # file_buffer = open('txt/col_categories.txt')
  file_buffer = open('txt/example_tsv_network.txt'  )


  buff = StringIO.StringIO( file_buffer.read() )
  net.pandas_load_tsv_to_net(buff)

  # filter rows 
  views = ['filter_row_sum','N_row_sum']

  # distance metric 
  dist_type = 'cosine'

  # linkage type 
  linkage_type = 'average'


  net.make_clust(dist_type=dist_type, views=views, calc_col_cats=True,\
  linkage_type=linkage_type)

  net.write_json_to_file('viz', 'json/mult_view.json', 'no-indent')

  elapsed_time = time.time() - start_time
  print('\n\n\nelapsed time: '+str(elapsed_time))
コード例 #11
0
ファイル: clustergrammerIPG.py プロジェクト: zph001/tools-iuc
def prepare_heatmap(matrix_input, html_file, html_dir, tools_dir, categories, distance, linkage):
    # prepare directory and html
    os.mkdir(html_dir)

    env = Environment(loader=FileSystemLoader(tools_dir + "/templates"))
    template = env.get_template("clustergrammer.template")
    overview = template.render()
    with open(html_file, "w") as outf:
        outf.write(overview)

    json_output = html_dir + "/mult_view.json"

    net = Network()
    net.load_file(matrix_input)
    if (categories['row']):
        net.add_cats('row', categories['row'])
    if (categories['col']):
        net.add_cats('col', categories['col'])
    net.cluster(dist_type=distance, linkage_type=linkage)
    net.write_json_to_file('viz', json_output)
コード例 #12
0
def main():

    import time
    start_time = time.time()
    import pandas as pd
    import StringIO

    # import network class from Network.py
    from clustergrammer import Network

    net = Network()

    # load data to dataframe
    # net.load_tsv_to_net('txt/example_tsv_network.txt')
    # net.load_tsv_to_net('txt/mat_1mb.txt')

    # choose file
    ################
    # file_buffer = open('txt/col_categories.txt')
    file_buffer = open('txt/example_tsv_network.txt')

    buff = StringIO.StringIO(file_buffer.read())
    net.pandas_load_tsv_to_net(buff)

    # filter rows
    views = ['filter_row_sum', 'N_row_sum']

    # distance metric
    dist_type = 'cosine'

    # linkage type
    linkage_type = 'average'


    net.make_clust(dist_type=dist_type, views=views, calc_col_cats=True,\
    linkage_type=linkage_type)

    net.write_json_to_file('viz', 'json/mult_view.json', 'no-indent')

    elapsed_time = time.time() - start_time
    print('\n\n\nelapsed time: ' + str(elapsed_time))
コード例 #13
0
    def get_clustergrammer_json(self, outfile):

        # Create network
        net = Network()

        # Load file
        net.load_df(self.expression_dataframe)

        # Add categories
        try:
            net.add_cats('col', self.sample_cats)
        except:
            pass

        try:
            # calculate clustering using default parameters
            net.cluster()

            # save visualization JSON to file for use by front end
            net.write_json_to_file('viz', outfile)
        except:
            os.system('touch {outfile}'.format(**locals()))
コード例 #14
0
def make_json_from_tsv(name):
  '''
  make a clustergrammer json from a tsv file
  '''
  from clustergrammer import Network

  print('\n' + name)

  net = Network()

  filename = 'txt/'+ name + '.txt'

  net.load_file(filename)

  df = net.dat_to_df()

  net.swap_nan_for_zero()

  # zscore first to get the columns distributions to be similar
  net.normalize(axis='col', norm_type='zscore', keep_orig=True)

  # filter the rows to keep the perts with the largest normalizes values
  net.filter_N_top('row', 1000)

  num_rows = net.dat['mat'].shape[0]
  num_cols = net.dat['mat'].shape[1]

  print('num_rows ' + str(num_rows))
  print('num_cols ' + str(num_cols))

  if num_cols < 50 or num_rows < 1000:

    views = ['N_row_sum']
    net.make_clust(dist_type='cos', views=views)
    export_filename = 'json/' + name + '.json'
    net.write_json_to_file('viz', export_filename)

  else:
    print('did not cluster, too many columns ')
コード例 #15
0
    def prepare_clustergrammer_data(self,
                                    outfname='clustergrammer_data.json',
                                    G=None):
        """for a distance matrix, output a clustergrammer JSON file
        that clustergrammer-js can use

        for now it loads the clustergrammer-py module from local dev files
        TODO: once changes are pulled into clustergrammer-py, we can use the actual module (pip)

        :outfname: filename for the output json
        :G: networkx graph (use self.G_sym by default)

        """
        G = self.G_sym or self.G
        # if Z is None:
        #     G = self.G_sym or self.G
        #     Z = self.get_linkage(G)
        clustergrammer_py_dev_dir = '../clustergrammer/clustergrammer-py/'
        sys.path.insert(0, clustergrammer_py_dev_dir)
        from clustergrammer import Network as ClustergrammerNetwork
        start = timer()
        d = nx.to_numpy_matrix(G)
        df = pd.DataFrame(d, index=G.nodes(), columns=G.nodes())
        net = ClustergrammerNetwork()
        # net.load_file(infname)
        # net.load_file(mat)
        net.load_df(df)
        net.cluster(dist_type='precalculated')
        logger.debug("done loading and clustering. took {}".format(
            format_timespan(timer() - start)))

        logger.debug("writing to {}".format(outfname))
        start = timer()
        net.write_json_to_file('viz', outfname)
        logger.debug("done writing file {}. took {}".format(
            outfname, format_timespan(timer() - start)))
コード例 #16
0
ファイル: old_load_gct.py プロジェクト: MaayanLab/LINCS_GCT
def make_viz_from_df(df, filename):
  from clustergrammer import Network

  net = Network()

  net.df_to_dat(df)
  net.swap_nan_for_zero()

  # zscore first to get the columns distributions to be similar
  net.normalize(axis='col', norm_type='zscore', keep_orig=True)

  # filter the rows to keep the perts with the largest normalizes values
  net.filter_N_top('row', 2000)

  num_coluns = net.dat['mat'].shape[1]

  if num_coluns < 50:
    # views = ['N_row_sum', 'N_row_var']
    views = ['N_row_sum']
    net.make_clust(dist_type='cos', views=views)

    filename = 'json/' + filename.split('/')[1].replace('.gct','') + '.json'

    net.write_json_to_file('viz', filename)
コード例 #17
0
ids = delta_f.columns.map(lambda x: x.split('|')[0])
fout = open("%s_heatmap_matrix.txt" % args.d, 'w')
fout.write("\t\t%s\n" % ('\t'.join(tfs)))

cls = []
for i in ids:
    if ann_dict.get(i, ['NA'])[0] == 'NA':
        cls.append("Cell Line: %s" % ('NA'))
    else:
        cls.append("Cell Line: %s" % (ann_dict[i][0]))
fout.write("\t\t%s\n" % ('\t'.join(cls)))

ts = []
for i in ids:
    if ann_dict.get(i, ['NA', 'NA'])[1] == 'NA':
        ts.append("Tissue: %s" % ('NA'))
    else:
        ts.append("Tissue: %s" % (ann_dict[i][1]))
fout.write("\t\t%s\n" % ('\t'.join(ts)))

for i in range(status.shape[0]):
    fout.write('%s\t%s\t%s\n' %
               ("Gene: %s" % genes[i], "Input Gene: %s" % status[i], '\t'.join(
                   delta_f.iloc[i, :].map(str))))
fout.close()

net.load_file("%s_heatmap_matrix.txt" % args.d)
net.cluster()
net.write_json_to_file('viz', '%s_mult_view.json' % args.d)
コード例 #18
0
# make network object and load file
from clustergrammer import Network

net = Network()
net.load_file('mult_view.tsv')

# Z-score normalize the rows
#net.normalize(axis='row', norm_type='zscore', keep_orig=True)

# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'mult_view.json')

#	needs pandas and sklearn as well
#	pip install --user --upgrade clustergrammer pandas sklearn
コード例 #19
0
# net.enrichrgram('KEA_2015')

# optional filtering and normalization
##########################################
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore', keep_orig=True)
# net.filter_N_top('row', 250, rank_type='sum')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.set_cat_color('col', 1, 'Category: one', 'blue')

# net.make_clust()
# net.dendro_cats('row', 5)

net.cluster(dist_type='cos',
            views=['N_row_sum', 'N_row_var'],
            dendro=True,
            sim_mat=True,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=False,
            run_clustering=True)

# write jsons for front-end visualizations
net.write_json_to_file('viz', '/../../../../../pulmon/json/mult_view.json',
                       'indent')
net.write_json_to_file('sim_row', '/pulmon/json/mult_view_sim_row.json',
                       'no-indent')
net.write_json_to_file('sim_col', '/pulmon/json/mult_view_sim_col.json',
                       'no-indent')
コード例 #20
0
    "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062",
    "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
    "#885578", "#0089A3", "#FF8A9A", "#D157A0", "#BEC459", "#456648",
    "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C",
    "#636375", "#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF",
    "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
    "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600",
    "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#CB7E98",
    "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534",
    "#FDE8DC", "#404E55", "#FAD09F", "#A4E804", "#f58231", "#324E72", "#402334"
]
for i in range(len(color_array3)):
    label = 'SC3 label: _' + str(i) + '_'
    net.set_cat_color(axis='col',
                      cat_index=1,
                      cat_name=label,
                      inst_color=color_array3[i])
#console.log(color_array[i]);

if use_user_label == '1':
    for j in range(len(unique_array)):
        userlabel = 'User\'s label: _' + str(unique_array[j]) + '_'
        net.set_cat_color(axis='col',
                          cat_index=2,
                          cat_name=userlabel,
                          inst_color=color_array3[63 - j])
net.cluster(dist_type='cos', enrichrgram=True, run_clustering=False)
# write jsons for front-end visualizations
out = wd + 'json/' + outname + '.json'
net.write_json_to_file('viz', out, 'indent')
コード例 #21
0
from clustergrammer import Network
import sys
filename = sys.argv[-1]
net = Network()
print("Python is fun.")
print(filename)
filepath = '/Users/snehalpatil/Documents/GithubProjects/gsesuite-data/heatmap/' + (
    filename)
print(filepath)
net.load_file(filepath)
net.cluster()

jsonname = filename.replace(".txt", ".json")

jsonfilepath = '/Users/snehalpatil/Documents/GithubProjects/gsesuite-data/heatmap/' + jsonname
net.write_json_to_file('viz', jsonfilepath)
コード例 #22
0
# make network object and load file
from clustergrammer import Network
net = Network()

b = "cluster.txt"
d = "cluster.json"

net.load_file(b)

# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'cluster.json')
コード例 #23
0
    print('loading file...')
    net = Network()
    # load matrix file
    net.load_file(matrix_filename)
    print('done')

    # cluster using default parameters
    print('clustering the matrix...')
    net.cluster(dist_type='jaccard', linkage_type='complete')
    #    net.cluster(run_clustering=False)
    print('done')

    # save visualization JSON to file for use by front end
    print('saving results in json file...')
    json_filename = matrix_filename + '.json'
    net.write_json_to_file('viz', json_filename)
    print('done')

    # creating the html page
    print('creating the html page...')
    network_data = ''
    file = open(json_filename, 'rt')
    for line in file:
        network_data += line
    file.close()
    print(len(network_data))

    load_viz_new_filename = '/home/meheurap/scripts/proteinCluster/load_viz_new.js'
    load_viz_new = ''
    file = open(load_viz_new_filename, 'rt')
    for line in file:
コード例 #24
0
net.load_file('txt/rc_two_cats.txt')
# net.load_file('txt/example_tsv.txt')
# net.load_file('txt/col_categories.txt')
# net.load_file('txt/mat_cats.tsv')
# net.load_file('txt/mat_1mb.Txt')
# net.load_file('txt/mnist.txt')
# net.load_file('txt/sim_mat_4_cats.txt')

views = ['N_row_sum','N_row_var']

# # filtering rows and cols by sum 
# net.filter_sum('row', threshold=20)
# net.filter_sum('col', threshold=30)
  
# # keep top rows based on sum 
# net.filter_N_top('row', 10, 'sum')

net.make_clust(dist_type='cos',views=views , dendro=True,
               sim_mat=True, filter_sim=0.1)

# net.produce_view({'N_row_sum':10,'dist':'euclidean'})

net.write_json_to_file('viz', 'json/mult_view.json', 'no-indent')
net.write_json_to_file('sim_row', 'json/mult_view_sim_row.json', 'no-indent')
net.write_json_to_file('sim_col', 'json/mult_view_sim_col.json', 'no-indent')

elapsed_time = time.time() - start_time

print('\n\nelapsed time')
print(elapsed_time)
コード例 #25
0
# new_cols = [(x, 'Cat-1: A', 'Cat-2: B', 'Cat-3: C') for x in df.columns]
df.index = new_rows
df.columns = new_cols

net.load_df(df)

net.cluster(dist_type='cos',
            views=['N_row_sum', 'N_row_var'],
            dendro=True,
            sim_mat=False,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=True)

# write jsons for front-end visualizations
net.write_json_to_file('viz', 'data/big_data/custom.json', 'no-indent')

# net.write_json_to_file('sim_row', 'json/mult_view_sim_row.json', 'no-indent')
# net.write_json_to_file('sim_col', 'json/mult_view_sim_col.json', 'no-indent')

# net.normalize(axis='row', norm_type='zscore')

net.cluster(dist_type='cos',
            views=['N_row_sum', 'N_row_var'],
            dendro=True,
            sim_mat=False,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=True)

# write jsons for front-end visualizations
コード例 #26
0
inst_name = 'Tyrosine'
# net.load_file('txt/phos_ratios_all_treat_no_geld_ST.txt')
net.load_file('txt/phos_ratios_all_treat_no_geld_Tyrosine.txt')

net.swap_nan_for_zero()

# net.normalize(axis='row', norm_type='zscore', keep_orig=True)

print(net.dat.keys())

views = ['N_row_sum', 'N_row_var']

net.make_clust(dist_type='cos',
               views=views,
               dendro=True,
               sim_mat=True,
               filter_sim=0.1,
               calc_cat_pval=False)
# run_enrichr=['KEA_2015'])
# run_enrichr=['ENCODE_TF_ChIP-seq_2014'])
# run_enrichr=['GO_Biological_Process_2015'])

net.write_json_to_file('viz', 'json/' + inst_name + '.json', 'no-indent')
net.write_json_to_file('sim_row', 'json/' + inst_name + '_sim_row.json',
                       'no-indent')
net.write_json_to_file('sim_col', 'json/' + inst_name + '_sim_col.json',
                       'no-indent')

elapsed_time = time.time() - start_time
print('\n\nelapsed time: ' + str(elapsed_time))
コード例 #27
0
'''
The clustergrammer python module can be installed using pip:
pip install clustergrammer

or by getting the code from the repo:
https://github.com/MaayanLab/clustergrammer-py
'''
import os
from clustergrammer import Network

for filename in os.listdir("tsv"):
    name = filename.split(".")[0]
    net = Network()
    # load matrix tsv file
    print name
    net.load_file('tsv/' + name + '.tsv')

    # optional filtering and normalization
    ##########################################
    net.swap_nan_for_zero()

    net.make_clust(dist_type='cos',
                   views=['N_row_sum', 'N_row_var'],
                   dendro=True,
                   sim_mat=True,
                   filter_sim=0.1,
                   calc_cat_pval=False)

    # write jsons for front-end visualizations
    net.write_json_to_file('viz', 'output/' + name + '.json', 'indent')
コード例 #28
0
# make network object and load file
from clustergrammer import Network
net = Network()
net.load_file('mult_view.tsv')




# Z-score normalize the rows
#net.normalize(axis='row', norm_type='zscore', keep_orig=True)





# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'mult_view.json')



#	needs pandas and sklearn as well
#	pip install --user --upgrade clustergrammer pandas sklearn
コード例 #29
0
# make network object and load DataFrame, df
import sys
import pandas as pd
from clustergrammer import Network
df = pd.read_csv(sys.argv[1], header=True, index_col=0, sep='\t')
net = Network()
net.load_df(df)

# Z-score normalize the rows
net.normalize(axis='row', norm_type='zscore', keep_orig=True)

# filter for the top 100 columns based on their absolute value sum
net.filter_N_top('col', 100, 'sum')

# cluster using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', sys.argv[2])
コード例 #30
0
from clustergrammer import Network
net = Network()

# choose tsv file
####################
inst_name = 'Tyrosine'
# net.load_file('txt/phos_ratios_all_treat_no_geld_ST.txt')
net.load_file('txt/phos_ratios_all_treat_no_geld_Tyrosine.txt')


net.swap_nan_for_zero()

# net.normalize(axis='row', norm_type='zscore', keep_orig=True)

print(net.dat.keys())

views = ['N_row_sum', 'N_row_var']

net.make_clust(dist_type='cos',views=views , dendro=True,
               sim_mat=True, filter_sim=0.1, calc_cat_pval=False)
               # run_enrichr=['KEA_2015'])
               # run_enrichr=['ENCODE_TF_ChIP-seq_2014'])
               # run_enrichr=['GO_Biological_Process_2015'])

net.write_json_to_file('viz', 'json/'+inst_name+'.json', 'no-indent')
net.write_json_to_file('sim_row', 'json/'+inst_name+'_sim_row.json', 'no-indent')
net.write_json_to_file('sim_col', 'json/'+inst_name+'_sim_col.json', 'no-indent')

elapsed_time = time.time() - start_time
print('\n\nelapsed time: '+str(elapsed_time))
コード例 #31
0
ファイル: make_clustergrammer.py プロジェクト: pchaku/papseek
# net.load_file('txt/tuple_cats.txt')
# net.load_file('txt/example_tsv.txt')

# net.enrichrgram('KEA_2015')

# optional filtering and normalization
##########################################
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore', keep_orig=True)
# net.filter_N_top('row', 250, rank_type='sum')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.set_cat_color('col', 1, 'Category: one', 'blue')

# net.make_clust()
# net.dendro_cats('row', 5)

net.cluster(dist_type='cos',
            views=['N_row_sum', 'N_row_var'],
            dendro=True,
            sim_mat=True,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=True)

# write jsons for front-end visualizations
#net.write_json_to_file('viz', 'json/mult_view.json', 'indent')
net.write_json_to_file('viz', 'json/pooja.json', 'indent')
net.write_json_to_file('sim_row', 'json/mult_view_sim_row.json', 'no-indent')
net.write_json_to_file('sim_col', 'json/mult_view_sim_col.json', 'no-indent')
コード例 #32
0
#from sys import argv
from clustergrammer import Network

net = Network()
net.load_file('mat.txt')
#argv[1]
# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'kbio_mhci_view.json')

net2 = Network()
net2.load_file('mat2.txt')
#argv[1]
# calculate clustering using default parameters
net2.cluster()

# save visualization JSON to file for use by front end
net2.write_json_to_file('viz', 'kbio_mhci_view_summary.json')
コード例 #33
0
# net.load_file('txt/tuple_cats.txt')
# net.load_file('txt/example_tsv.txt')

# net.enrichrgram('KEA_2015')

# optional filtering and normalization
##########################################
# net.filter_sum('row', threshold=20)
# net.normalize(axis='col', norm_type='zscore', keep_orig=True)
# net.filter_N_top('row', 250, rank_type='sum')
# net.filter_threshold('row', threshold=3.0, num_occur=4)
# net.swap_nan_for_zero()
# net.set_cat_color('col', 1, 'Category: one', 'blue')

# net.make_clust()
# net.dendro_cats('row', 5)

net.cluster(dist_type='cos',
            views=['N_row_sum', 'N_row_var'],
            dendro=True,
            sim_mat=True,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=False,
            run_clustering=True)

# write jsons for front-end visualizations
net.write_json_to_file('viz', 'json/out.json', 'indent')
net.write_json_to_file('sim_row', 'json/out.json', 'no-indent')
net.write_json_to_file('sim_col', 'json/out.json', 'no-indent')
コード例 #34
0
import time
start_time = time.time()

# import network class from Network.py
from clustergrammer import Network

net = Network()

net.load_tsv_to_net('txt/example_tsv.txt')

net.make_filtered_views(dist_type='cos',views=['N_row_sum','pct_row_sum'])

net.write_json_to_file('viz', 'json/mult_view.json', 'indent')

# your code
elapsed_time = time.time() - start_time

print('\n\n\nelapsed time')
print(elapsed_time)
コード例 #35
0
'''
Python 2.7
The clustergrammer python module can be installed using pip:
pip install clustergrammer

or by getting the code from the repo:
https://github.com/MaayanLab/clustergrammer-py
'''

from clustergrammer import Network
net = Network()

# load matrix tsv file
net.load_file('txt/heatmap_features.txt')

net.set_cat_color('row', 1, 'Feature Type: Interactivity', 'yellow')
net.set_cat_color('row', 1, 'Feature Type: Sharing', 'blue')
net.set_cat_color('row', 1, 'Feature Type: Usability', 'orange')
net.set_cat_color('row', 1, 'Feature Type: Biology-Specific', 'red')

net.cluster(dist_type='cos',
            views=[],
            dendro=True,
            filter_sim=0.1,
            calc_cat_pval=False,
            enrichrgram=False)

# write jsons for front-end visualizations
net.write_json_to_file('viz', 'json/mult_view.json', 'indent')
コード例 #36
0
#       ],
#       "bb":[
#           "p1",
#           "p2",
#           "p3",
#           "p4"
#       ],
#       "cc":[
#           "p1",
#           "p2",
#           "p4"
#       ],
#       "dd":[
#           "p2"
#       ],
#       "ee":[
#           "p4"
#       ]
#     }
#   }
# ])




# calculate clustering using default parameters
net.cluster()

# save visualization JSON to file for use by front end
net.write_json_to_file('viz', 'json/new_matrix.json')
コード例 #37
0
import time
start_time = time.time()

from clustergrammer import Network
net = Network()

net.load_file('txt/rc_two_cats.txt')
# net.load_file('txt/tmp.txt')

views = ['N_row_sum', 'N_row_var']

net.make_clust(dist_type='cos', views=views, dendro=True, sim_mat=True)

net.write_json_to_file('viz', 'json/mult_view.json')
net.write_json_to_file('sim_row', 'json/mult_view_sim_row.json')
net.write_json_to_file('sim_col', 'json/mult_view_sim_col.json')

elapsed_time = time.time() - start_time

print('\n\nelapsed time')
print(elapsed_time)
コード例 #38
0
# import network class from Network.py
from clustergrammer import Network

# get instance of Network
net = Network()
print(net.__doc__)
print('make tsv clustergram')

# load network from tsv file
##############################
net.load_tsv_to_net('txt/example_tsv_network.txt')

inst_filt = 0.001
inst_meet = 1
net.filter_network_thresh(inst_filt,inst_meet)

# cluster
#############
net.cluster_row_and_col('cos')

# export data visualization to file
######################################
net.write_json_to_file('viz', 'json/default_example.json', 'indent')