Пример #1
0
 def create_btable_from_csv(self, tablename, csv_path, cctypes, postgres_coltypes, colnames):
     with self.open_db_connection(commit=True) as cur:
         ## TODO: warning: m_r and m_c have 0-indexed indices
         ##       but the db has 1-indexed keys
         t, m_r, m_c, header = du.read_data_objects(csv_path, cctypes=cctypes)
         curtime = datetime.datetime.now().ctime()
         cur.execute("INSERT INTO preddb.table_index (tablename, numsamples, uploadtime, analyzetime, t, m_r, m_c, cctypes, path) VALUES (%s, %s, %s, NULL, %s, %s, %s, %s, %s);", (tablename, 0, curtime, json.dumps(t), json.dumps(m_r), json.dumps(m_c), json.dumps(cctypes), csv_path))
Пример #2
0
 def create_btable_from_csv(self, tablename, csv_path, cctypes,
                            postgres_coltypes, colnames):
     with self.open_db_connection(commit=True) as cur:
         ## TODO: warning: m_r and m_c have 0-indexed indices
         ##       but the db has 1-indexed keys
         t, m_r, m_c, header = du.read_data_objects(csv_path,
                                                    cctypes=cctypes)
         curtime = datetime.datetime.now().ctime()
         cur.execute(
             "INSERT INTO preddb.table_index (tablename, numsamples, uploadtime, analyzetime, t, m_r, m_c, cctypes, path) VALUES (%s, %s, %s, NULL, %s, %s, %s, %s, %s);",
             (tablename, 0, curtime, json.dumps(t), json.dumps(m_r),
              json.dumps(m_c), json.dumps(cctypes), csv_path))
Пример #3
0
    def update_datatypes(self, tablename, mappings):
        """
    mappings is a dict of column name to 'continuous', 'multinomial',
    or an int, which signifies multinomial of a specific type.
    TODO: FIX HACKS. Current works by reloading all the data from csv,
    and it ignores multinomials' specific number of outcomes.
    Also, disastrous things may happen if you update a schema after creating models.
    """
        max_chainid = self.persistence_layer.get_max_chain_id(tablename)
        if max_chainid is not None:
            return 'Error: cannot update datatypes after models have already been created. Please create a new table.'

        # First, get existing cctypes, and T, M_c, and M_r.
        cctypes = self.persistence_layer.get_cctypes(tablename)
        m_c, m_r, t = self.persistence_layer.get_metadata_and_table(tablename)

        # Now, update cctypes, T, M_c, and M_r
        for col, mapping in mappings.items():
            ## TODO: fix this hack! See method's docstring.
            if type(mapping) == int:
                mapping = 'multinomial'
            cctypes[m_c['name_to_idx'][col]] = mapping
        t, m_r, m_c, header = du.read_data_objects(csv_abs_path,
                                                   cctypes=cctypes)

        # Now, put cctypes, T, M_c, and M_r back into the DB
        self.persistence_layer.update_cctypes(tablename, cctypes)
        self.persistence_layer.update_metadata_and_table(
            tablename, M_r, M_c, T)

        colnames = [
            m_c['idx_to_name'][str(idx)]
            for idx in range(len(m_c['idx_to_name']))
        ]
        return dict(columns=colnames,
                    data=[cctypes],
                    message='Updated schema:\n')
Пример #4
0
from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as data_utils


data_filename = 'T.csv'
inference_seed = 0
num_full_transitions = 10

# read the data table into internal json representation
data_table, row_metadata, column_metadata, header = \
        data_utils.read_data_objects(data_filename)

# create an engine to run analysis, inference
engine = LocalEngine(seed=inference_seed)

# initialize markov chain samples
initial_latent_state, initial_latent_state_clustering = \
        engine.initialize(column_metadata, row_metadata, data_table)

# run markov chain transition kernels on samples
latent_state, latent_state_clustering = engine.analyze(column_metadata,
        data_table, initial_latent_state, initial_latent_state_clustering,
        n_steps=num_full_transitions)

from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as data_utils


data_filename = "T.csv"
inference_seed = 0
num_full_transitions = 10

# read the data table into internal json representation
data_table, row_metadata, column_metadata, header = data_utils.read_data_objects(data_filename)

# create an engine to run analysis, inference
engine = LocalEngine(seed=inference_seed)

# initialize markov chain samples
initial_latent_state, initial_latent_state_clustering = engine.initialize(column_metadata, row_metadata, data_table)

# run markov chain transition kernels on samples
latent_state, latent_state_clustering = engine.analyze(
    column_metadata, data_table, initial_latent_state, initial_latent_state_clustering, n_steps=num_full_transitions
)
Пример #6
0
#!/usr/bin/env python

from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as data_utils

data_filename = '/vagrant/DREAM5_network_inference_challenge/Network1/input data/net1_chip_features.tsv'
inference_seed = 0
num_full_transitions = 10
data_table, row_metadata, column_metadata, header = data_utils.read_data_objects(data_filename)
engine = LocalEngine(seed=inference_seed)
initial_latent_state, initial_latent_state_clustering = engine.initialize(column_metadata, row_metadata, data_table)
latent_state, latent_state_clustering = engine.analyze(
	column_metadata, data_table, initial_latent_state, initial_latent_state_clustering, n_steps=num_full_transitions)