def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('species_number', valid_values=None) schema.add_categoric_attribute('analysis_network_name', valid_values=None) schema.add_categoric_attribute('edge_type_name', valid_values=None) schema.add_boolean_attribute('selected_by_default') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('job_id') schema.add_json_attribute('scores') schema.add_numeric_attribute('minimum_score') schema.add_json_attribute('gene_ids_to_names') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('job_id') schema.add_json_attribute('user_gene_sets') schema.add_json_attribute('set_level_scores') schema.add_numeric_attribute('minimum_score') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('date_of_run', valid_values=None) schema.add_categoric_attribute('display_name_short', valid_values=None) schema.add_numeric_attribute('num_samples', min_val=0, max_val=None) schema.add_numeric_attribute('num_otus', min_val=0, max_val=None) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('study_key', valid_values=None) schema.add_foreignid_attribute('tornado_run_id') schema.add_categoric_attribute('tornado_sample_key', valid_values=None) schema.add_categoric_attribute('study_sample_key', valid_values=None) #the ordering of these values is the same as index_within_tornado_run in the otus endpoint #FIXME this probably needs to be a new kind of attribute: sparse_numeric_list_attribute #schema.add_numeric_list_attribute('otu_counts', min_val=None, max_val=None, min_num_vals=0, max_num_vals=None) return schema
def generate_schema(): """ A schema that will be given non-standard behavior in the api endpoints. """ schema = TablelikeSchema(COLLECTION_NAME) schema.add_numeric_attribute('flt_val_0_sub', min_val=-10.0, max_val=10.0) schema.add_categoric_attribute('string_val_sub', valid_values=['x', 'y', 'z']) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('species_number', valid_values=None) schema.add_categoric_attribute('short_latin_name', valid_values=None) schema.add_categoric_attribute('name', valid_values=None) schema.add_int_attribute('display_order', min_val=None, max_val=None) schema.add_categoric_attribute('group_name', valid_values=None) schema.add_boolean_attribute('selected_by_default') return schema
def generate_schema(): """ This is the main, global defintion for the data type 'hello_tablelike'. From this definition we can generate Eve configurations for server endpoints that read/write the data, a python client for working with batches of tablelike_entries that conform to this schema and talk to the server, and auto generated smoke tests that exercise the endpoints with randomly generated data. """ schema = TablelikeSchema(COLLECTION_NAME) schema.add_numeric_attribute('flt_val_0', min_val=0.0, max_val=None) schema.add_numeric_attribute('flt_val_1', min_val=-10.0, max_val=10.0) schema.add_categoric_attribute('string_val', valid_values=['x', 'y', 'z']) schema.add_categoric_list_attribute('cat_list_val', valid_values=['a', 'b', 'c']) schema.add_numeric_list_attribute('num_list_val') schema.add_foreignid_attribute('foreignid_val') schema.add_foreignid_list_attribute('foreignid_list_val') schema.add_json_attribute('json_val') schema.add_int_attribute('int_val') schema.add_int_list_attribute('int_list_val') schema.add_index(['string_val']) schema.add_index(['string_val', 'int_val']) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('display_name_short', valid_values=None) schema.add_categoric_attribute('display_name_long', valid_values=None) schema.add_foreignid_attribute('tornado_run_id') schema.add_categoric_attribute('query', valid_values=None) schema.add_numeric_attribute('num_samples', min_val=0, max_val=None) schema.add_categoric_list_attribute('sample_ids', valid_values=None, min_num_vals=0, max_num_vals=None) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('username') schema.add_categoric_attribute('given_name') schema.add_categoric_attribute('family_name') schema.add_categoric_attribute('thumb_url') schema.add_categoric_attribute('passlib_hash') schema.add_boolean_attribute('is_superuser') #origin is simple name for how the user was created, e.g. #users defined in the core nest_config have origin 'config:core' schema.add_categoric_attribute('origin') #optional: if 'origin' is an external system, external_id #allows you to save an identifier (as a string) to #cross-reference across the two systems schema.add_categoric_attribute('external_id') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('job_id') schema.add_json_attribute('top_genes') schema.add_json_attribute('samples') schema.add_json_attribute('genes_heatmap') schema.add_json_attribute('samples_heatmap') schema.add_json_attribute('phenotypes') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('name') return schema
def generate_schema(): """ """ pipelines = [ 'sample_clustering', 'gene_prioritization', 'gene_set_characterization', 'phenotype_prediction' ] status_states = ['running', 'completed', 'failed'] schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('name') schema.add_categoric_attribute('notes') schema.add_foreignid_attribute('project_id') schema.add_categoric_attribute('pipeline', valid_values=pipelines) schema.add_categoric_attribute('status', valid_values=status_states) schema.add_categoric_attribute('error') schema.add_categoric_attribute('_created') schema.add_categoric_attribute('_updated') schema.add_json_attribute('parameters') schema.add_boolean_attribute('favorite') return schema
def generate_schema(): num_quantile_levels = NUM_QUANTILES + 1 schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('cohort_id') schema.add_categoric_attribute('node_level', valid_values=None) schema.add_categoric_attribute('node_name', valid_values=None) schema.add_numeric_attribute('node_idx', min_val=0) schema.add_numeric_attribute('parent_node_idx', min_val=-1) schema.add_index(['cohort_id', 'node_level']) schema.add_index(['cohort_id', 'parent_node_idx']) #schema.add_numeric_attribute('num_unique_otus_median', # min_val=0, max_val=None) schema.add_numeric_attribute('num_unique_otus_mean', min_val=0, max_val=None) #schema.add_numeric_list_attribute('num_unique_otus_quantiles', # min_val=None, max_val=None, # min_num_vals=num_quantile_levels, max_num_vals=num_quantile_levels) schema.add_numeric_list_attribute('num_unique_otus_density_plot_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('num_unique_otus_density_plot_y', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('num_unique_otus_histo_bin_start_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('num_unique_otus_histo_bin_end_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('num_unique_otus_histo_bin_height_y', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_attribute('num_unique_otus_histo_num_zeros', min_val=0.0, max_val=None) schema.add_numeric_attribute('normalized_entropy_mean', min_val=0, max_val=None) schema.add_numeric_list_attribute('normalized_entropy_histo_bin_start_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('normalized_entropy_histo_bin_end_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('normalized_entropy_histo_bin_height_y', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_attribute('normalized_entropy_histo_num_zeros', min_val=0.0, max_val=None) #schema.add_numeric_attribute('relative_abundance_median', # min_val=0, max_val=None) schema.add_numeric_attribute('relative_abundance_mean', min_val=0, max_val=None) schema.add_numeric_list_attribute('relative_abundance_quantiles', min_val=None, max_val=None, min_num_vals=num_quantile_levels, max_num_vals=num_quantile_levels) #schema.add_numeric_list_attribute('relative_abundance_density_plot_x', # min_val=0.0, max_val=None, # min_num_vals=num_quantile_levels, max_num_vals=num_quantile_levels) #schema.add_numeric_list_attribute('relative_abundance_density_plot_y', # min_val=0.0, max_val=None, # min_num_vals=num_quantile_levels, max_num_vals=num_quantile_levels) schema.add_numeric_list_attribute('relative_abundance_histo_bin_start_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('relative_abundance_histo_bin_end_x', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_list_attribute('relative_abundance_histo_bin_height_y', min_val=0.0, max_val=None, min_num_vals=None, max_num_vals=None) schema.add_numeric_attribute('relative_abundance_histo_num_zeros', min_val=0.0, max_val=None) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('project_id') schema.add_categoric_attribute('filename') # TODO change filesize to an int; values are too large to store as current # numeric attribute (TOOL-398) schema.add_categoric_attribute('filesize') schema.add_categoric_attribute('filetype') schema.add_categoric_attribute('uploadername') schema.add_categoric_attribute('_created') schema.add_categoric_attribute('notes') schema.add_boolean_attribute('favorite') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('super_collection', valid_values=None) schema.add_categoric_attribute('species_number', valid_values=None) schema.add_categoric_attribute('edge_type_name', valid_values=None) schema.add_numeric_attribute('super_collection_display_index') schema.add_categoric_attribute('collection', valid_values=None) schema.add_boolean_attribute('collection_selected_by_default') return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('comparison_id') schema.add_categoric_attribute('node_level', valid_values=None) schema.add_categoric_attribute('node_name', valid_values=None) schema.add_numeric_attribute('node_idx', min_val=0) schema.add_numeric_attribute('parent_node_idx', min_val=-1) schema.add_numeric_list_attribute('top_fst_otu_rankings_in_node') schema.add_index(['comparison_id', 'node_level']) schema.add_index(['comparison_id', 'parent_node_idx']) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_foreignid_attribute('tornado_run_id') schema.add_numeric_attribute('index_within_tornado_run', min_val=0, max_val=None) #the 'id' of the otu as an observation in the biom_table associated with the tornado #run schema.add_categoric_attribute('tornado_observation_key', valid_values=None) schema.add_categoric_attribute('otu_name', valid_values=None) for taxa_level in TAXONOMY_LEVELS: schema.add_categoric_attribute(taxa_level, valid_values=None) return schema
def generate_schema(): schema = TablelikeSchema(COLLECTION_NAME) schema.add_categoric_attribute('set_id', valid_values=None) schema.add_categoric_attribute('set_name', valid_values=None) schema.add_numeric_attribute('species_id') schema.add_numeric_attribute('gene_count') schema.add_categoric_attribute('collection', valid_values=None) schema.add_categoric_attribute('edge_type_name', valid_values=None) schema.add_categoric_attribute('supercollection', valid_values=None) schema.add_categoric_attribute('url', valid_values=None) schema.add_index(['set_id']) return schema