def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) if (dataset.metadata.number_of_molecules == 1): dataset.blurb = "1 molecule" else: dataset.blurb = "%s molecules" % dataset.metadata.number_of_molecules dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) if (dataset.metadata.number_of_models == 1): dataset.blurb = "1 alignment" else: dataset.blurb = "%s alignments" % dataset.metadata.number_of_models dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) if (dataset.metadata.number_of_sequences == 1): dataset.blurb = "1 sequence [%s]" % dataset.metadata.name_of_sequences[0] else: seq_list = ', '.join(dataset.metadata.name_of_sequences[0:10]) if len(dataset.metadata.name_of_sequences) > 10: seq_list += ', ...' dataset.blurb = "%s sequences [%s]" % ( dataset.metadata.number_of_sequences, seq_list ) dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = "Faces: %s, Vertices: %s" % (str(dataset.metadata.face), str(dataset.metadata.vertex)) else: dataset.peek = 'File does not exist' dataset.blurb = 'File purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = self.get_blurb(dataset) else: dataset.peek = 'File does not exist' dataset.blurb = 'File purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "pharmacophore" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "grids for docking" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) dataset.blurb = "Open Biomedical Ontology (OBO)" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "SNAP HMM model" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) dataset.blurb = "JavaScript Object Notation (JSON)" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek( self, dataset, is_multi_byte=False ): """Set the peek and blurb text""" if not dataset.dataset.purged: dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) dataset.blurb = "IPython Notebook" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "ESTScan scores matrices" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) dataset.blurb = "Attribute-Relation File Format (ARFF)" dataset.blurb += ", %s comments, %s attributes" % ( dataset.metadata.comment_lines, dataset.metadata.columns ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): """Set the peek and blurb text""" if not dataset.dataset.purged: dataset.peek = data.get_file_peek(dataset.file_name) dataset.blurb = 'splib Spectral Library Format' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): """Set the peek and blurb text""" if not dataset.dataset.purged: dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = 'Spectral Library without index files' else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: atom_numbers = count_special_lines("^ATOM", dataset.file_name) hetatm_numbers = count_special_lines("^HETATM", dataset.file_name) dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "%s atoms and %s HET-atoms" % (atom_numbers, hetatm_numbers) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: root_numbers = count_special_lines("^ROOT", dataset.file_name) branch_numbers = count_special_lines("^BRANCH", dataset.file_name) dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = f"{root_numbers} roots and {branch_numbers} branches" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: atom_numbers = count_special_lines("^ATOM", dataset.file_name) hetatm_numbers = count_special_lines("^HETATM", dataset.file_name) dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = "%s atoms and %s HET-atoms" % (atom_numbers, hetatm_numbers) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "Attribute-Relation File Format (ARFF)" dataset.blurb += ", %s comments, %s attributes" % ( dataset.metadata.comment_lines, dataset.metadata.columns) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): """Set the peek and blurb text""" if not dataset.dataset.purged: dataset.peek = data.get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = self.blurb else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: root_numbers = count_special_lines("^ROOT", dataset.file_name) branch_numbers = count_special_lines("^BRANCH", dataset.file_name) dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "%s roots and %s branches" % (root_numbers, branch_numbers) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) dataset.blurb = "Faces: %s, Vertices: %s" % (str( dataset.metadata.face), str(dataset.metadata.vertex)) else: dataset.peek = 'File does not exist' dataset.blurb = 'File purged from disc'
def set_peek(self, dataset): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) if (dataset.metadata.number_comp == 1): dataset.blurb = "1 significant component" else: dataset.blurb = f"{dataset.metadata.number_comp} significant components" else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: atom_numbers = count_special_lines("^ATOM", dataset.file_name) hetatm_numbers = count_special_lines("^HETATM", dataset.file_name) chain_ids = ','.join(dataset.metadata.chain_ids) if len(dataset.metadata.chain_ids) > 0 else 'None' dataset.peek = get_file_peek(dataset.file_name) dataset.blurb = "%s atoms and %s HET-atoms\nchain_ids: %s" % (atom_numbers, hetatm_numbers, str(chain_ids)) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset): if not dataset.dataset.purged: if (dataset.metadata.number_of_models == 1): dataset.blurb = "1 alignment" else: dataset.blurb = f"{dataset.metadata.number_of_models} alignments" dataset.peek = get_file_peek(dataset.file_name) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) if (dataset.metadata.number_comp == 1): dataset.blurb = "1 significant component" else: dataset.blurb = "%s significant components" % dataset.metadata.number_comp else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) if (dataset.metadata.number_of_models == 1): dataset.blurb = "1 alignment" else: dataset.blurb = "%s alignments" % dataset.metadata.number_of_models else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disc'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) if dataset.metadata.sequences: dataset.blurb = "%s sequences" % util.commaify(str(dataset.metadata.sequences)) else: dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name) if dataset.metadata.sequences: dataset.blurb = f"{util.commaify(str(dataset.metadata.sequences))} sequences" else: dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: if (dataset.metadata.number_of_molecules == 1): dataset.blurb = "1 molecule" else: dataset.blurb = "%s molecules" % dataset.metadata.number_of_molecules dataset.peek = get_file_peek(dataset.file_name) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset): if not dataset.dataset.purged: if dataset.metadata.block_count == 1: dataset.blurb = f"{dataset.metadata.file_type} {dataset.metadata.version_number}: Format {dataset.metadata.file_format}, 1 block, {dataset.metadata.number_of_optional_header_records} headers and {dataset.metadata.number_of_data_columns} columns" else: dataset.blurb = f"{dataset.metadata.file_type} {dataset.metadata.version_number}: Format {dataset.metadata.file_format}, {dataset.metadata.block_count} blocks, {dataset.metadata.number_of_optional_header_records} headers and {dataset.metadata.number_of_data_columns} columns" dataset.peek = get_file_peek(dataset.file_name) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte) if dataset.metadata.sequences: dataset.blurb = "%s sequences" % util.commaify( str(dataset.metadata.sequences)) else: dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: if dataset.metadata.block_count == 1: dataset.blurb = "{} {}: Format {}, 1 block, {} headers and {} columns".format( dataset.metadata.file_type, dataset.metadata.version_number, dataset.metadata.file_format, dataset.metadata.number_of_optional_header_records, dataset.metadata.number_of_data_columns) else: dataset.blurb = "{} {}: Format {}, {} blocks, {} headers and {} columns".format( dataset.metadata.file_type, dataset.metadata.version_number, dataset.metadata.file_format, dataset.metadata.block_count, dataset.metadata.number_of_optional_header_records, dataset.metadata.number_of_data_columns) dataset.peek = get_file_peek(dataset.file_name) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def test_get_file_peek(): # should get the first 5 lines of the file without a trailing newline character assert get_file_peek( 'test-data/1.tabular', line_wrap=False ) == 'chr22\t1000\tNM_17\nchr22\t2000\tNM_18\nchr10\t2200\tNM_10\nchr10\thap\ttest\nchr10\t1200\tNM_11'
def test_get_file_peek(): # should get the first 5 lines of the file without a trailing newline character assert get_file_peek(os.path.join(galaxy_directory(), 'test-data/1.tabular'), line_wrap=False) == 'chr22\t1000\tNM_17\nchr22\t2000\tNM_18\nchr10\t2200\tNM_10\nchr10\thap\ttest\nchr10\t1200\tNM_11\n'
def test_get_file_peek(): # should get the first 5 lines of the file without a trailing newline character assert get_file_peek('test-data/1.tabular', line_wrap=False) == 'chr22\t1000\tNM_17\nchr22\t2000\tNM_18\nchr10\t2200\tNM_10\nchr10\thap\ttest\nchr10\t1200\tNM_11'