def setUp(self): """Create MetadataMap objects that will be used in the tests.""" # Create a map using the overview tutorial mapping file. self.overview_map_str = [ "#SampleID\tBarcodeSequence\tTreatment\tDOB\tDescription", "PC.354\tAGCACGAGCCTA\tControl\t20061218\t354", "PC.355\tAACTCGTCGATG\tControl\t20061218\t355", "PC.356\tACAGACCACTCA\tControl\t20061126\t356", "PC.481\tACCAGCGACTAG\tControl\t20070314\t481", "PC.593\tAGCAGCACTTGT\tControl\t20071210\t593", "PC.607\tAACTGTGCGTAC\tFast\t20071112\t607", "PC.634\tACAGAGTCGGCT\tFast\t20080116\t634", "PC.635\tACCGCAGAGTCA\tFast\t20080116\t635", "PC.636\tACGGTGAGTGTC\tFast\t20080116\t636"] self.overview_map = MetadataMap( *parse_mapping_file_to_dict(self.overview_map_str)) # Create the same overview tutorial map, but this time with some # comments. self.comment = "# Some comments about this mapping file" self.map_with_comments_str = self.overview_map_str[:] self.map_with_comments_str.insert(1, self.comment) self.map_with_comments = MetadataMap(*parse_mapping_file_to_dict( self.map_with_comments_str)) # Create a MetadataMap object that has no metadata (i.e. no sample IDs, # so no metadata about samples). self.empty_map = MetadataMap({}, []) # Create a MetadataMap object that has samples (i.e. sample IDs) but # not associated metadata (i.e. no columns other than SampleID). self.no_metadata_str = ["#SampleID", "PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636"] self.no_metadata = MetadataMap(*parse_mapping_file_to_dict( self.no_metadata_str)) # Create a MetadataMap object that has a category with only one value # throughout the entire column. self.single_value_str = ["#SampleID\tFoo", "PC.354\tfoo", "PC.355\tfoo", "PC.356\tfoo", "PC.481\tfoo", "PC.593\tfoo", "PC.607\tfoo", "PC.634\tfoo", "PC.635\tfoo", "PC.636\tfoo"] self.single_value = MetadataMap(*parse_mapping_file_to_dict( self.single_value_str))
def test_ne(self): """Test whether two MetadataMaps are not equal.""" self.assertTrue(self.empty_map != MetadataMap({}, ["foo"])) self.assertTrue(self.overview_map != MetadataMap( self.overview_map._metadata, ["foo"])) self.assertTrue( self.overview_map != MetadataMap({}, self.overview_map.Comments)) self.assertTrue(self.overview_map != self.empty_map) self.assertTrue(self.overview_map != self.map_with_comments) self.assertTrue(self.overview_map != self.no_metadata)
def format_mapping_file_to_js(mapping_file_data, mapping_file_headers, columns): """Write a javascript representation of the mapping file Parameters ---------- mapping_file_data : list of str contents of the mapping file mapping_file_headers : list of list of str headers of the mapping file columns : list of str valid columns to use, usually a subset of mapping_file_headers Returns ------- str JavaScript representation of the mapping file """ map_str = [] mapping_file_dict = mapping_file_to_dict(mapping_file_data, mapping_file_headers) map_values = [] for k, v in mapping_file_dict.items(): if 'SampleID' in columns: vals = ["'%s'" % k ] + ["'%s'" % v[col] for col in mapping_file_headers[1:]] else: vals = ["'%s'" % v[col] for col in mapping_file_headers[1:]] map_values.append("'%s': [%s]" % (k, ','.join(vals))) if 'SampleID' not in columns: mapping_file_headers = mapping_file_headers[1:] # format the mapping file as javascript objects map_str.append('var g_mappingFileHeaders = [%s];\n' % ','.join(["'%s'" % col for col in mapping_file_headers])) map_str.append('var g_mappingFileData = { %s };\n' % ','.join(map_values)) map_object = MetadataMap(mapping_file_dict, []) # make sure the comparison for SampleID is made first because otherwise # if the metadata map tries to check 'SampleID' it will raise an exception animatable_categories = [ category for category in columns if category != 'SampleID' and map_object.isNumericCategory(category) ] map_str.append('var g_animatableMappingFileHeaders = [%s];\n' % ','.join(["'%s'" % col for col in animatable_categories])) return ''.join(map_str)
def format_mapping_file_to_js(mapping_file_data, mapping_file_headers, columns): """Write a javascript representation of the mapping file Parameters ---------- mapping_file_data : list of str contents of the mapping file mapping_file_headers : list of list of str headers of the mapping file columns : list of str valid columns to use, usually a subset of mapping_file_headers Returns ------- str JavaScript representation of the mapping file """ map_str = [] mapping_file_dict = mapping_file_to_dict(mapping_file_data, mapping_file_headers) map_values = [] for k, v in mapping_file_dict.items(): if 'SampleID' in columns: vals = ["'%s'" % k] + ["'%s'" % v[col] for col in mapping_file_headers[1:]] else: vals = ["'%s'" % v[col] for col in mapping_file_headers[1:]] map_values.append("'%s': [%s]" % (k, ','.join(vals))) if 'SampleID' not in columns: mapping_file_headers = mapping_file_headers[1:] # format the mapping file as javascript objects map_str.append('var g_mappingFileHeaders = [%s];\n' % ','.join(["'%s'" % col for col in mapping_file_headers])) map_str.append('var g_mappingFileData = { %s };\n' % ','.join(map_values)) map_object = MetadataMap(mapping_file_dict, []) # make sure the comparison for SampleID is made first because otherwise # if the metadata map tries to check 'SampleID' it will raise an exception animatable_categories = [category for category in columns if category != 'SampleID' and map_object.isNumericCategory(category)] map_str.append('var g_animatableMappingFileHeaders = [%s];\n' % ','.join(["'%s'" % col for col in animatable_categories])) return ''.join(map_str)
def setUp(self): """Create MetadataMap objects that will be used in the tests.""" # Create a map using the overview tutorial mapping file. self.overview_map_str = [ "#SampleID\tBarcodeSequence\tTreatment\tDOB\tDescription", "PC.354\tAGCACGAGCCTA\tControl\t20061218\t354", "PC.355\tAACTCGTCGATG\tControl\t20061218\t355", "PC.356\tACAGACCACTCA\tControl\t20061126\t356", "PC.481\tACCAGCGACTAG\tControl\t20070314\t481", "PC.593\tAGCAGCACTTGT\tControl\t20071210\t593", "PC.607\tAACTGTGCGTAC\tFast\t20071112\t607", "PC.634\tACAGAGTCGGCT\tFast\t20080116\t634", "PC.635\tACCGCAGAGTCA\tFast\t20080116\t635", "PC.636\tACGGTGAGTGTC\tFast\t20080116\t636" ] self.overview_map = MetadataMap( *parse_mapping_file_to_dict(self.overview_map_str)) # Create the same overview tutorial map, but this time with some # comments. self.comment = "# Some comments about this mapping file" self.map_with_comments_str = self.overview_map_str[:] self.map_with_comments_str.insert(1, self.comment) self.map_with_comments = MetadataMap( *parse_mapping_file_to_dict(self.map_with_comments_str)) # Create a MetadataMap object that has no metadata (i.e. no sample IDs, # so no metadata about samples). self.empty_map = MetadataMap({}, []) # Create a MetadataMap object that has samples (i.e. sample IDs) but # not associated metadata (i.e. no columns other than SampleID). self.no_metadata_str = [ "#SampleID", "PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636" ] self.no_metadata = MetadataMap( *parse_mapping_file_to_dict(self.no_metadata_str)) # Create a MetadataMap object that has a category with only one value # throughout the entire column. self.single_value_str = [ "#SampleID\tFoo", "PC.354\tfoo", "PC.355\tfoo", "PC.356\tfoo", "PC.481\tfoo", "PC.593\tfoo", "PC.607\tfoo", "PC.634\tfoo", "PC.635\tfoo", "PC.636\tfoo" ] self.single_value = MetadataMap( *parse_mapping_file_to_dict(self.single_value_str))
def test_eq(self): """Test whether two MetadataMaps are equal.""" self.assertTrue(self.empty_map == MetadataMap({}, [])) self.assertTrue(self.overview_map == MetadataMap( self.overview_map._metadata, self.overview_map.Comments))
def test_parseMetadataMap(self): """Test parsing a mapping file into a MetadataMap instance.""" obs = MetadataMap.parseMetadataMap(self.overview_map_str) self.assertEqual(obs, self.overview_map)
class MetadataMapTests(TestCase): """Tests for the MetadataMap class.""" def setUp(self): """Create MetadataMap objects that will be used in the tests.""" # Create a map using the overview tutorial mapping file. self.overview_map_str = [ "#SampleID\tBarcodeSequence\tTreatment\tDOB\tDescription", "PC.354\tAGCACGAGCCTA\tControl\t20061218\t354", "PC.355\tAACTCGTCGATG\tControl\t20061218\t355", "PC.356\tACAGACCACTCA\tControl\t20061126\t356", "PC.481\tACCAGCGACTAG\tControl\t20070314\t481", "PC.593\tAGCAGCACTTGT\tControl\t20071210\t593", "PC.607\tAACTGTGCGTAC\tFast\t20071112\t607", "PC.634\tACAGAGTCGGCT\tFast\t20080116\t634", "PC.635\tACCGCAGAGTCA\tFast\t20080116\t635", "PC.636\tACGGTGAGTGTC\tFast\t20080116\t636" ] self.overview_map = MetadataMap( *parse_mapping_file_to_dict(self.overview_map_str)) # Create the same overview tutorial map, but this time with some # comments. self.comment = "# Some comments about this mapping file" self.map_with_comments_str = self.overview_map_str[:] self.map_with_comments_str.insert(1, self.comment) self.map_with_comments = MetadataMap( *parse_mapping_file_to_dict(self.map_with_comments_str)) # Create a MetadataMap object that has no metadata (i.e. no sample IDs, # so no metadata about samples). self.empty_map = MetadataMap({}, []) # Create a MetadataMap object that has samples (i.e. sample IDs) but # not associated metadata (i.e. no columns other than SampleID). self.no_metadata_str = [ "#SampleID", "PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636" ] self.no_metadata = MetadataMap( *parse_mapping_file_to_dict(self.no_metadata_str)) # Create a MetadataMap object that has a category with only one value # throughout the entire column. self.single_value_str = [ "#SampleID\tFoo", "PC.354\tfoo", "PC.355\tfoo", "PC.356\tfoo", "PC.481\tfoo", "PC.593\tfoo", "PC.607\tfoo", "PC.634\tfoo", "PC.635\tfoo", "PC.636\tfoo" ] self.single_value = MetadataMap( *parse_mapping_file_to_dict(self.single_value_str)) def test_parseMetadataMap(self): """Test parsing a mapping file into a MetadataMap instance.""" obs = MetadataMap.parseMetadataMap(self.overview_map_str) self.assertEqual(obs, self.overview_map) def test_parseMetadataMap_empty(self): """Test parsing empty mapping file contents.""" self.assertRaises(QiimeParseError, MetadataMap.parseMetadataMap, []) def test_eq(self): """Test whether two MetadataMaps are equal.""" self.assertTrue(self.empty_map == MetadataMap({}, [])) self.assertTrue(self.overview_map == MetadataMap( self.overview_map._metadata, self.overview_map.Comments)) def test_ne(self): """Test whether two MetadataMaps are not equal.""" self.assertTrue(self.empty_map != MetadataMap({}, ["foo"])) self.assertTrue(self.overview_map != MetadataMap( self.overview_map._metadata, ["foo"])) self.assertTrue( self.overview_map != MetadataMap({}, self.overview_map.Comments)) self.assertTrue(self.overview_map != self.empty_map) self.assertTrue(self.overview_map != self.map_with_comments) self.assertTrue(self.overview_map != self.no_metadata) def test_getSampleMetadata(self): """Test metadata by sample ID accessor with valid sample IDs.""" exp = { 'BarcodeSequence': 'AGCACGAGCCTA', 'Treatment': 'Control', 'DOB': '20061218', 'Description': '354' } obs = self.overview_map.getSampleMetadata('PC.354') self.assertEqual(obs, exp) exp = { 'BarcodeSequence': 'ACCAGCGACTAG', 'Treatment': 'Control', 'DOB': '20070314', 'Description': '481' } obs = self.map_with_comments.getSampleMetadata('PC.481') self.assertEqual(obs, exp) exp = { 'BarcodeSequence': 'ACGGTGAGTGTC', 'Treatment': 'Fast', 'DOB': '20080116', 'Description': '636' } obs = self.map_with_comments.getSampleMetadata('PC.636') self.assertEqual(obs, exp) exp = {} obs = self.no_metadata.getSampleMetadata('PC.636') self.assertEqual(obs, exp) def test_getSampleMetadata_bad_sample_id(self): """Test metadata by sample ID accessor with invalid sample IDs.""" # Nonexistent sample ID. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 'PC.000') self.assertRaises(KeyError, self.no_metadata.getSampleMetadata, 'PC.000') # Integer sample ID. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 42) # Sample ID of type None. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, None) # Sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 's1') # Integer sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 1) # Sample ID of None on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, None) def test_getCategoryValue(self): """Test category value by sample ID/category name accessor.""" exp = "Fast" obs = self.overview_map.getCategoryValue('PC.634', 'Treatment') self.assertEqual(obs, exp) exp = "20070314" obs = self.overview_map.getCategoryValue('PC.481', 'DOB') self.assertEqual(obs, exp) exp = "ACGGTGAGTGTC" obs = self.map_with_comments.getCategoryValue('PC.636', 'BarcodeSequence') self.assertEqual(obs, exp) def test_getCategoryValues(self): """Test category value list by sample ID/category name accessor.""" smpl_ids = [ 'PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607', 'PC.634', 'PC.635', 'PC.636' ] exp = [ 'Control', 'Control', 'Control', 'Control', 'Control', 'Fast', 'Fast', 'Fast', 'Fast' ] obs = self.overview_map.getCategoryValues(smpl_ids, 'Treatment') self.assertEqual(obs, exp) def test_isNumericCategory(self): """Test checking if a category is numeric.""" obs = self.overview_map.isNumericCategory('Treatment') self.assertEqual(obs, False) obs = self.overview_map.isNumericCategory('DOB') self.assertEqual(obs, True) def test_hasUniqueCategoryValues(self): """Test checking if a category has unique values.""" obs = self.overview_map.hasUniqueCategoryValues('Treatment') self.assertEqual(obs, False) obs = self.overview_map.hasUniqueCategoryValues('DOB') self.assertEqual(obs, False) obs = self.overview_map.hasUniqueCategoryValues('Description') self.assertEqual(obs, True) def test_hasSingleCategoryValue(self): """Test checking if a category has only a single value.""" obs = self.overview_map.hasSingleCategoryValue('Treatment') self.assertEqual(obs, False) obs = self.single_value.hasSingleCategoryValue('Foo') self.assertEqual(obs, True) def test_getCategoryValue_bad_sample_id(self): """Test category value by sample ID accessor with bad sample IDs.""" # Nonexistent sample ID. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.000', 'Treatment') self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.000', 'Treatment') # Integer sample ID. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 42, 'DOB') # Sample ID of type None. self.assertRaises(KeyError, self.overview_map.getCategoryValue, None, 'Treatment') # Sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, 's1', 'foo') # Integer sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, 1, 'bar') # Sample ID of None on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, None, 'baz') def test_getCategoryValue_bad_category(self): """Test category value by sample ID accessor with bad categories.""" # Nonexistent category. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', 'foo') # Integer category. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', 42) # Category of type None. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', None) # Category on map with no metadata, but that has sample IDs. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', 'Treatment') # Integer category on map with no metadata. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', 34) # Category of type None on map with no metadata. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', None) def test_SampleIds(self): """Test sample IDs accessor.""" exp = [ "PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636" ] obs = self.overview_map.SampleIds self.assertEqual(obs, exp) obs = self.no_metadata.SampleIds self.assertEqual(obs, exp) obs = self.empty_map.SampleIds self.assertEqual(obs, []) def test_CategoryNames(self): """Test category names accessor.""" exp = ["BarcodeSequence", "DOB", "Description", "Treatment"] obs = self.overview_map.CategoryNames self.assertEqual(obs, exp) obs = self.no_metadata.CategoryNames self.assertEqual(obs, []) obs = self.empty_map.CategoryNames self.assertEqual(obs, []) def test_filterSamples(self): """Test filtering out samples from metadata map.""" exp = ['PC.356', 'PC.593'] self.overview_map.filterSamples(['PC.593', 'PC.356']) obs = self.overview_map.SampleIds self.assertEqual(obs, exp) self.overview_map.filterSamples([]) self.assertEqual(self.overview_map.SampleIds, []) def test_filterSamples_strict(self): """Test strict checking of sample prescence when filtering.""" with self.assertRaises(ValueError): self.overview_map.filterSamples(['PC.356', 'abc123']) with self.assertRaises(ValueError): self.empty_map.filterSamples(['foo']) def test_filterSamples_no_strict(self): """Test missing samples does not raise error.""" self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False) self.assertEqual(self.overview_map.SampleIds, ['PC.356']) self.empty_map.filterSamples(['foo'], strict=False) self.assertEqual(self.empty_map.SampleIds, []) def test_is_valid_git_refname(self): """Test correct validation of refnames""" # valid branchnames self.assertTrue(is_valid_git_refname('master')) self.assertTrue(is_valid_git_refname('debuggatron_2000')) self.assertTrue(is_valid_git_refname('refname/bar')) self.assertTrue(is_valid_git_refname('ref.nameslu/_eggs_/spam')) self.assertTrue(is_valid_git_refname('valid{0}char'.format( unichr(40)))) self.assertTrue(is_valid_git_refname('master@head')) self.assertTrue(is_valid_git_refname('bar{thing}foo')) # case happening with git < 1.6.6 self.assertFalse( is_valid_git_refname( '--abbrev-ref\nbaa350d7b7063d585ca293fc16ef15e0765dc9ee')) # different invalid refnames, for a description of each group see the # man page of git check-ref-format self.assertFalse(is_valid_git_refname('bar/.spam/eggs')) self.assertFalse(is_valid_git_refname('bar.lock/spam/eggs')) self.assertFalse(is_valid_git_refname('bar.lock')) self.assertFalse(is_valid_git_refname('.foobar')) self.assertFalse(is_valid_git_refname('ref..name')) self.assertFalse( is_valid_git_refname(u'invalid{0}char'.format(unichr(177)))) self.assertFalse( is_valid_git_refname('invalid{0}char'.format(unichr(39)))) self.assertFalse(is_valid_git_refname('ref~name/bar')) self.assertFalse(is_valid_git_refname('refname spam')) self.assertFalse(is_valid_git_refname('bar/foo/eggs~spam')) self.assertFalse(is_valid_git_refname('bar:_spam_')) self.assertFalse(is_valid_git_refname('eggtastic^2')) self.assertFalse(is_valid_git_refname('areyourandy?')) self.assertFalse(is_valid_git_refname('bar/*/spam')) self.assertFalse(is_valid_git_refname('bar[spam]/eggs')) self.assertFalse(is_valid_git_refname('/barfooeggs')) self.assertFalse(is_valid_git_refname('barfooeggs/')) self.assertFalse(is_valid_git_refname('bar/foo//////eggs')) self.assertFalse(is_valid_git_refname('dotEnding.')) self.assertFalse(is_valid_git_refname('@{branch')) self.assertFalse(is_valid_git_refname('contains\\slash')) self.assertFalse(is_valid_git_refname('$newbranch')) def test_is_valid_git_sha1(self): """ """ # valid sha1 strings self.assertTrue( is_valid_git_sha1('65a9ba2ef4b126fb5b054ea6b89b457463db4ec6')) self.assertTrue( is_valid_git_sha1('a29a9911e41253405494c43889925a6d79ca26db')) self.assertTrue( is_valid_git_sha1('e099cd5fdea89eba929d6051fbd26cc9e7a0c961')) self.assertTrue( is_valid_git_sha1('44235d322c3386bd5ce872d9d7ea2e10d27c86cb')) self.assertTrue( is_valid_git_sha1('7d2fc23E04540EE92c742948cca9ed5bc54d08d1')) self.assertTrue( is_valid_git_sha1('fb5dc0285a8b11f199c4f3a7547a2da38138373f')) self.assertTrue( is_valid_git_sha1('0b2abAEb195ba7ebc5cfdb53213a66fbaddefdb8')) # invalid length self.assertFalse(is_valid_git_sha1('cca9ed5bc54d08d1')) self.assertFalse(is_valid_git_sha1('')) # invalid characters self.assertFalse( is_valid_git_sha1('fb5dy0f85a8b11f199c4f3a75474a2das8138373')) self.assertFalse( is_valid_git_sha1('0x5dcc816fbc1c2e8eX087d7d2ed8d2950a7c16b'))
def preprocess_mapping_file(data, headers, columns, unique=False, single=False, clones=0): """Process a mapping file to expand the data or remove unuseful fields Inputs: data: mapping file data headers: mapping file headers columns: list of headers to keep, if one of these headers includes two ampersands, this function will create a new column by merging the delimited columns. unique: keep columns where all values are unique single: keep columns where all values are the same clones: number of times to replicate the metadata Outputs: data: processed mapping file data headers: processed mapping file headers """ # The sample ID must always be there, else it's meaningless data if 'SampleID' != columns[0]: columns = ['SampleID'] + columns # process concatenated columns if needed merge = [] for column in columns: # the list can contain None so check "if column" before treating as str if column and '&&' in column: merge.append(column) # each element needs several columns to be merged for new_column in merge: indices = [headers.index(header_name) for header_name in new_column.split('&&')] # join all the fields of the metadata that are listed in indices for line in data: line.append(''.join([line[index] for index in indices])) headers.append(new_column) # remove all unique or singled valued columns that are not included in # the list of categories that should be kept i. e. columns if unique or single: columns_to_remove = [] metadata = MetadataMap(mapping_file_to_dict(data, headers), []) # the --coloy_by option in the script interface allows the user to # specify the categories you want to use in the generated plot, this # the default behaviour is to color by all categories that are not # unique. If the user specifies a category with with the --color_by # option and this category contains a unique values, this category must # still be added thus the structure of the next few lines that # form the structure for the two different routes. (1) where no value # is specified in the CLI (the value of columns will be [None, x1, x2, # x3] where x{1,2,3} are categories requested in other CLI options) and # (2) where a value is specified in the CLI. # # TL;DR # see https://github.com/biocore/emperor/issues/271 if None in columns: columns = headers[:] f_unique = metadata.hasUniqueCategoryValues f_single = metadata.hasSingleCategoryValue else: f_unique = lambda x: metadata.hasUniqueCategoryValues(x) and\ x not in columns f_single = lambda x: metadata.hasSingleCategoryValue(x) and\ x not in columns # find columns that have values that are all unique if unique: for c in headers[1::]: if f_unique(c): columns_to_remove.append(c) # remove categories where there is only one value if single: for c in headers[1::]: if f_single(c): columns_to_remove.append(c) columns_to_remove = list(set(columns_to_remove)) # remove the single or unique columns data, headers = keep_columns_from_mapping_file(data, headers, columns_to_remove, negate=True) else: # when a None is contained in columns, we imply we want to use all the # available categories in the mapping file, thus just overwrite the # value if None in columns: columns = headers[:] # remove anything not specified in the input data, headers = keep_columns_from_mapping_file(data, headers, columns) # sanitize the mapping file data and headers data, headers = sanitize_mapping_file(data, headers) # clones mean: replicate the metadata retagging the sample ids with a suffix if clones: out_data = [] for index in range(0, clones): out_data.extend([[element[0]+'_%d' % index]+element[1::] for element in data]) data = out_data return data, headers
class MetadataMapTests(TestCase): """Tests for the MetadataMap class.""" def setUp(self): """Create MetadataMap objects that will be used in the tests.""" # Create a map using the overview tutorial mapping file. self.overview_map_str = [ "#SampleID\tBarcodeSequence\tTreatment\tDOB\tDescription", "PC.354\tAGCACGAGCCTA\tControl\t20061218\t354", "PC.355\tAACTCGTCGATG\tControl\t20061218\t355", "PC.356\tACAGACCACTCA\tControl\t20061126\t356", "PC.481\tACCAGCGACTAG\tControl\t20070314\t481", "PC.593\tAGCAGCACTTGT\tControl\t20071210\t593", "PC.607\tAACTGTGCGTAC\tFast\t20071112\t607", "PC.634\tACAGAGTCGGCT\tFast\t20080116\t634", "PC.635\tACCGCAGAGTCA\tFast\t20080116\t635", "PC.636\tACGGTGAGTGTC\tFast\t20080116\t636"] self.overview_map = MetadataMap( *parse_mapping_file_to_dict(self.overview_map_str)) # Create the same overview tutorial map, but this time with some # comments. self.comment = "# Some comments about this mapping file" self.map_with_comments_str = self.overview_map_str[:] self.map_with_comments_str.insert(1, self.comment) self.map_with_comments = MetadataMap(*parse_mapping_file_to_dict( self.map_with_comments_str)) # Create a MetadataMap object that has no metadata (i.e. no sample IDs, # so no metadata about samples). self.empty_map = MetadataMap({}, []) # Create a MetadataMap object that has samples (i.e. sample IDs) but # not associated metadata (i.e. no columns other than SampleID). self.no_metadata_str = ["#SampleID", "PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636"] self.no_metadata = MetadataMap(*parse_mapping_file_to_dict( self.no_metadata_str)) # Create a MetadataMap object that has a category with only one value # throughout the entire column. self.single_value_str = ["#SampleID\tFoo", "PC.354\tfoo", "PC.355\tfoo", "PC.356\tfoo", "PC.481\tfoo", "PC.593\tfoo", "PC.607\tfoo", "PC.634\tfoo", "PC.635\tfoo", "PC.636\tfoo"] self.single_value = MetadataMap(*parse_mapping_file_to_dict( self.single_value_str)) def test_parseMetadataMap(self): """Test parsing a mapping file into a MetadataMap instance.""" obs = MetadataMap.parseMetadataMap(self.overview_map_str) self.assertEqual(obs, self.overview_map) def test_parseMetadataMap_empty(self): """Test parsing empty mapping file contents.""" self.assertRaises(QiimeParseError, MetadataMap.parseMetadataMap, []) def test_eq(self): """Test whether two MetadataMaps are equal.""" self.assertTrue(self.empty_map == MetadataMap({}, [])) self.assertTrue(self.overview_map == MetadataMap( self.overview_map._metadata, self.overview_map.Comments)) def test_ne(self): """Test whether two MetadataMaps are not equal.""" self.assertTrue(self.empty_map != MetadataMap({}, ["foo"])) self.assertTrue(self.overview_map != MetadataMap( self.overview_map._metadata, ["foo"])) self.assertTrue(self.overview_map != MetadataMap({}, self.overview_map.Comments)) self.assertTrue(self.overview_map != self.empty_map) self.assertTrue(self.overview_map != self.map_with_comments) self.assertTrue(self.overview_map != self.no_metadata) def test_getSampleMetadata(self): """Test metadata by sample ID accessor with valid sample IDs.""" exp = {'BarcodeSequence': 'AGCACGAGCCTA', 'Treatment': 'Control', 'DOB': '20061218', 'Description': '354'} obs = self.overview_map.getSampleMetadata('PC.354') self.assertEqual(obs, exp) exp = {'BarcodeSequence': 'ACCAGCGACTAG', 'Treatment': 'Control', 'DOB': '20070314', 'Description': '481'} obs = self.map_with_comments.getSampleMetadata('PC.481') self.assertEqual(obs, exp) exp = {'BarcodeSequence': 'ACGGTGAGTGTC', 'Treatment': 'Fast', 'DOB': '20080116', 'Description': '636'} obs = self.map_with_comments.getSampleMetadata('PC.636') self.assertEqual(obs, exp) exp = {} obs = self.no_metadata.getSampleMetadata('PC.636') self.assertEqual(obs, exp) def test_getSampleMetadata_bad_sample_id(self): """Test metadata by sample ID accessor with invalid sample IDs.""" # Nonexistent sample ID. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 'PC.000') self.assertRaises(KeyError, self.no_metadata.getSampleMetadata, 'PC.000') # Integer sample ID. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 42) # Sample ID of type None. self.assertRaises(KeyError, self.overview_map.getSampleMetadata, None) # Sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 's1') # Integer sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 1) # Sample ID of None on empty map. self.assertRaises(KeyError, self.empty_map.getSampleMetadata, None) def test_getCategoryValue(self): """Test category value by sample ID/category name accessor.""" exp = "Fast" obs = self.overview_map.getCategoryValue('PC.634', 'Treatment') self.assertEqual(obs, exp) exp = "20070314" obs = self.overview_map.getCategoryValue('PC.481', 'DOB') self.assertEqual(obs, exp) exp = "ACGGTGAGTGTC" obs = self.map_with_comments.getCategoryValue( 'PC.636', 'BarcodeSequence') self.assertEqual(obs, exp) def test_getCategoryValues(self): """Test category value list by sample ID/category name accessor.""" smpl_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607', 'PC.634', 'PC.635', 'PC.636'] exp = ['Control','Control','Control','Control','Control','Fast' ,'Fast','Fast','Fast'] obs = self.overview_map.getCategoryValues(smpl_ids, 'Treatment') self.assertEqual(obs, exp) def test_isNumericCategory(self): """Test checking if a category is numeric.""" obs = self.overview_map.isNumericCategory('Treatment') self.assertEqual(obs, False) obs = self.overview_map.isNumericCategory('DOB') self.assertEqual(obs, True) def test_hasUniqueCategoryValues(self): """Test checking if a category has unique values.""" obs = self.overview_map.hasUniqueCategoryValues('Treatment') self.assertEqual(obs, False) obs = self.overview_map.hasUniqueCategoryValues('DOB') self.assertEqual(obs, False) obs = self.overview_map.hasUniqueCategoryValues('Description') self.assertEqual(obs, True) def test_hasSingleCategoryValue(self): """Test checking if a category has only a single value.""" obs = self.overview_map.hasSingleCategoryValue('Treatment') self.assertEqual(obs, False) obs = self.single_value.hasSingleCategoryValue('Foo') self.assertEqual(obs, True) def test_getCategoryValue_bad_sample_id(self): """Test category value by sample ID accessor with bad sample IDs.""" # Nonexistent sample ID. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.000', 'Treatment') self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.000', 'Treatment') # Integer sample ID. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 42, 'DOB') # Sample ID of type None. self.assertRaises(KeyError, self.overview_map.getCategoryValue, None, 'Treatment') # Sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, 's1', 'foo') # Integer sample ID on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, 1, 'bar') # Sample ID of None on empty map. self.assertRaises(KeyError, self.empty_map.getCategoryValue, None, 'baz') def test_getCategoryValue_bad_category(self): """Test category value by sample ID accessor with bad categories.""" # Nonexistent category. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', 'foo') # Integer category. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', 42) # Category of type None. self.assertRaises(KeyError, self.overview_map.getCategoryValue, 'PC.354', None) # Category on map with no metadata, but that has sample IDs. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', 'Treatment') # Integer category on map with no metadata. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', 34) # Category of type None on map with no metadata. self.assertRaises(KeyError, self.no_metadata.getCategoryValue, 'PC.354', None) def test_SampleIds(self): """Test sample IDs accessor.""" exp = ["PC.354", "PC.355", "PC.356", "PC.481", "PC.593", "PC.607", "PC.634", "PC.635", "PC.636"] obs = self.overview_map.SampleIds self.assertEqual(obs, exp) obs = self.no_metadata.SampleIds self.assertEqual(obs, exp) obs = self.empty_map.SampleIds self.assertEqual(obs, []) def test_CategoryNames(self): """Test category names accessor.""" exp = ["BarcodeSequence", "DOB", "Description", "Treatment"] obs = self.overview_map.CategoryNames self.assertEqual(obs, exp) obs = self.no_metadata.CategoryNames self.assertEqual(obs, []) obs = self.empty_map.CategoryNames self.assertEqual(obs, []) def test_filterSamples(self): """Test filtering out samples from metadata map.""" exp = ['PC.356', 'PC.593'] self.overview_map.filterSamples(['PC.593', 'PC.356']) obs = self.overview_map.SampleIds self.assertEqual(obs, exp) self.overview_map.filterSamples([]) self.assertEqual(self.overview_map.SampleIds, []) def test_filterSamples_strict(self): """Test strict checking of sample prescence when filtering.""" with self.assertRaises(ValueError): self.overview_map.filterSamples(['PC.356', 'abc123']) with self.assertRaises(ValueError): self.empty_map.filterSamples(['foo']) def test_filterSamples_no_strict(self): """Test missing samples does not raise error.""" self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False) self.assertEqual(self.overview_map.SampleIds, ['PC.356']) self.empty_map.filterSamples(['foo'], strict=False) self.assertEqual(self.empty_map.SampleIds, []) def test_is_valid_git_refname(self): """Test correct validation of refnames""" # valid branchnames self.assertTrue(is_valid_git_refname('master')) self.assertTrue(is_valid_git_refname('debuggatron_2000')) self.assertTrue(is_valid_git_refname('refname/bar')) self.assertTrue(is_valid_git_refname('ref.nameslu/_eggs_/spam')) self.assertTrue(is_valid_git_refname('valid{0}char'.format( unichr(40)))) self.assertTrue(is_valid_git_refname('master@head')) self.assertTrue(is_valid_git_refname('bar{thing}foo')) # case happening with git < 1.6.6 self.assertFalse(is_valid_git_refname( '--abbrev-ref\nbaa350d7b7063d585ca293fc16ef15e0765dc9ee')) # different invalid refnames, for a description of each group see the # man page of git check-ref-format self.assertFalse(is_valid_git_refname('bar/.spam/eggs')) self.assertFalse(is_valid_git_refname('bar.lock/spam/eggs')) self.assertFalse(is_valid_git_refname('bar.lock')) self.assertFalse(is_valid_git_refname('.foobar')) self.assertFalse(is_valid_git_refname('ref..name')) self.assertFalse(is_valid_git_refname(u'invalid{0}char'.format( unichr(177)))) self.assertFalse(is_valid_git_refname('invalid{0}char'.format( unichr(39)))) self.assertFalse(is_valid_git_refname('ref~name/bar')) self.assertFalse(is_valid_git_refname('refname spam')) self.assertFalse(is_valid_git_refname('bar/foo/eggs~spam')) self.assertFalse(is_valid_git_refname('bar:_spam_')) self.assertFalse(is_valid_git_refname('eggtastic^2')) self.assertFalse(is_valid_git_refname('areyourandy?')) self.assertFalse(is_valid_git_refname('bar/*/spam')) self.assertFalse(is_valid_git_refname('bar[spam]/eggs')) self.assertFalse(is_valid_git_refname('/barfooeggs')) self.assertFalse(is_valid_git_refname('barfooeggs/')) self.assertFalse(is_valid_git_refname('bar/foo//////eggs')) self.assertFalse(is_valid_git_refname('dotEnding.')) self.assertFalse(is_valid_git_refname('@{branch')) self.assertFalse(is_valid_git_refname('contains\\slash')) self.assertFalse(is_valid_git_refname('$newbranch')) def test_is_valid_git_sha1(self): """ """ # valid sha1 strings self.assertTrue(is_valid_git_sha1( '65a9ba2ef4b126fb5b054ea6b89b457463db4ec6')) self.assertTrue(is_valid_git_sha1( 'a29a9911e41253405494c43889925a6d79ca26db')) self.assertTrue(is_valid_git_sha1( 'e099cd5fdea89eba929d6051fbd26cc9e7a0c961')) self.assertTrue(is_valid_git_sha1( '44235d322c3386bd5ce872d9d7ea2e10d27c86cb')) self.assertTrue(is_valid_git_sha1( '7d2fc23E04540EE92c742948cca9ed5bc54d08d1')) self.assertTrue(is_valid_git_sha1( 'fb5dc0285a8b11f199c4f3a7547a2da38138373f')) self.assertTrue(is_valid_git_sha1( '0b2abAEb195ba7ebc5cfdb53213a66fbaddefdb8')) # invalid length self.assertFalse(is_valid_git_sha1('cca9ed5bc54d08d1')) self.assertFalse(is_valid_git_sha1('')) # invalid characters self.assertFalse(is_valid_git_sha1( 'fb5dy0f85a8b11f199c4f3a75474a2das8138373')) self.assertFalse(is_valid_git_sha1( '0x5dcc816fbc1c2e8eX087d7d2ed8d2950a7c16b'))
def preprocess_mapping_file(data, headers, columns, unique=False, single=False, clones=0): """Process a mapping file to expand the data or remove unuseful fields Parameters ---------- data: list of list of str mapping file data headers: list of str mapping file headers columns: list of str headers to keep, if one of these headers includes two ampersands, this function will create a new column by merging the delimited columns unique: bool, optional keep columns where all values are unique. Default: false. single: bool, optional keep columns where all values are the same. Default: false clones: int, optional number of times to replicate the metadata. Default: 0 Returns ------- data: list of list of str processed mapping file data headers: list of str processed mapping file headers """ # The sample ID must always be there, else it's meaningless data if 'SampleID' != columns[0]: columns = ['SampleID'] + columns # process concatenated columns if needed merge = [] for column in columns: # the list can contain None so check "if column" before treating as str if column and '&&' in column: merge.append(column) # each element needs several columns to be merged for new_column in merge: indices = [ headers.index(header_name) for header_name in new_column.split('&&') ] # join all the fields of the metadata that are listed in indices for line in data: line.append(''.join([line[index] for index in indices])) headers.append(new_column) # remove all unique or singled valued columns that are not included in # the list of categories that should be kept i. e. columns if unique or single: columns_to_remove = [] metadata = MetadataMap(mapping_file_to_dict(data, headers), []) # the --coloy_by option in the script interface allows the user to # specify the categories you want to use in the generated plot, this # the default behaviour is to color by all categories that are not # unique. If the user specifies a category with with the --color_by # option and this category contains a unique values, this category must # still be added thus the structure of the next few lines that # form the structure for the two different routes. (1) where no value # is specified in the CLI (the value of columns will be [None, x1, x2, # x3] where x{1,2,3} are categories requested in other CLI options) and # (2) where a value is specified in the CLI. # # TL;DR # see https://github.com/biocore/emperor/issues/271 if None in columns: columns = headers[:] f_unique = metadata.hasUniqueCategoryValues f_single = metadata.hasSingleCategoryValue else: def f_unique(x): return metadata.hasUniqueCategoryValues(x) and x not in columns def f_single(x): return metadata.hasSingleCategoryValue(x) and x not in columns # find columns that have values that are all unique if unique: for c in headers[1::]: if f_unique(c): columns_to_remove.append(c) # remove categories where there is only one value if single: for c in headers[1::]: if f_single(c): columns_to_remove.append(c) columns_to_remove = list(set(columns_to_remove)) # remove the single or unique columns data, headers = keep_columns_from_mapping_file(data, headers, columns_to_remove, negate=True) else: # when a None is contained in columns, we imply we want to use all the # available categories in the mapping file, thus just overwrite the # value if None in columns: columns = headers[:] # remove anything not specified in the input data, headers = keep_columns_from_mapping_file(data, headers, columns) # sanitize the mapping file data and headers data, headers = sanitize_mapping_file(data, headers) # clones mean: replicate the metadata retagging the sample ids with a # suffix if clones: out_data = [] for index in range(0, clones): out_data.extend([[element[0] + '_%d' % index] + element[1::] for element in data]) data = out_data return data, headers
# check for all the mapping fields for col in color_by_column_names: # for concatenated columns check each individual field parts = col.split('&&') offending_fields.extend(p for p in parts if p not in lookup_header) else: # if the user didn't specify the header names display everything color_by_column_names = [None] # extract a list of the custom axes provided and each element is numeric if custom_axes: custom_axes = custom_axes.strip().strip("'").strip('"').split(',') # the MetadataMap object makes some checks easier map_object = MetadataMap(mapping_file_to_dict(mapping_data, header), []) for axis in custom_axes: # append the field to the error queue that it belongs to if axis not in lookup_header: offending_fields.append(axis) break # make sure this value is in the mapping file elif axis not in color_by_column_names: color_by_column_names.append(axis) # perform only if the for loop does not call break else: # make sure all these axes are numeric for axis in custom_axes: if not map_object.isNumericCategory(axis): non_numeric_categories.append(axis)
def preprocess_mapping_file(data, headers, columns, unique=False, single=False, clones=0): """Process a mapping file to expand the data or remove unuseful fields Inputs: data: mapping file data headers: mapping file headers columns: list of headers to keep, if one of these headers includes two ampersands, this function will create a new column by merging the delimited columns. unique: keep columns where all values are unique single: keep columns where all values are the same clones: number of times to replicate the metadata Outputs: data: processed mapping file data headers: processed mapping file headers """ # The sample ID must always be there, else it's meaningless data if 'SampleID' != columns[0]: columns = ['SampleID'] + columns # process concatenated columns if needed merge = [] for column in columns: if '&&' in column: merge.append(column) # each element needs several columns to be merged for new_column in merge: indices = [headers.index(header_name) for header_name in new_column.split('&&')] # join all the fields of the metadata that are listed in indices for line in data: line.append(''.join([line[index] for index in indices])) headers.append(new_column) # remove all unique or singled valued columns if unique or single: columns_to_remove = [] metadata = MetadataMap(mapping_file_to_dict(data, headers), []) # find columns that have values that are all unique if unique == True: columns_to_remove += [column_name for column_name in headers[1::] if metadata.hasUniqueCategoryValues(column_name)] # remove categories where there is only one value if single == True: columns_to_remove += [column_name for column_name in headers[1::] if metadata.hasSingleCategoryValue(column_name)] columns_to_remove = list(set(columns_to_remove)) # remove the single or unique columns data, headers = keep_columns_from_mapping_file(data, headers, columns_to_remove, negate=True) # remove anything not specified in the input data, headers = keep_columns_from_mapping_file(data, headers, columns) # sanitize the mapping file data and headers data, headers = sanitize_mapping_file(data, headers) # clones mean: replicate the metadata retagging the sample ids with a suffix if clones: out_data = [] for index in range(0, clones): out_data.extend([[element[0]+'_%d' % index]+element[1::] for element in data]) data = out_data return data, headers
def preprocess_mapping_file(data, headers, columns, unique=False, single=False, clones=0): """Process a mapping file to expand the data or remove unuseful fields Inputs: data: mapping file data headers: mapping file headers columns: list of headers to keep, if one of these headers includes two ampersands, this function will create a new column by merging the delimited columns. unique: keep columns where all values are unique single: keep columns where all values are the same clones: number of times to replicate the metadata Outputs: data: processed mapping file data headers: processed mapping file headers """ # The sample ID must always be there, else it's meaningless data if 'SampleID' != columns[0]: columns = ['SampleID'] + columns # process concatenated columns if needed merge = [] for column in columns: if '&&' in column: merge.append(column) # each element needs several columns to be merged for new_column in merge: indices = [ headers.index(header_name) for header_name in new_column.split('&&') ] # join all the fields of the metadata that are listed in indices for line in data: line.append(''.join([line[index] for index in indices])) headers.append(new_column) # remove all unique or singled valued columns if unique or single: columns_to_remove = [] metadata = MetadataMap(mapping_file_to_dict(data, headers), []) # find columns that have values that are all unique if unique == True: columns_to_remove += [ column_name for column_name in headers[1::] if metadata.hasUniqueCategoryValues(column_name) ] # remove categories where there is only one value if single == True: columns_to_remove += [ column_name for column_name in headers[1::] if metadata.hasSingleCategoryValue(column_name) ] columns_to_remove = list(set(columns_to_remove)) # remove the single or unique columns data, headers = keep_columns_from_mapping_file(data, headers, columns_to_remove, negate=True) # remove anything not specified in the input data, headers = keep_columns_from_mapping_file(data, headers, columns) # sanitize the mapping file data and headers data, headers = sanitize_mapping_file(data, headers) # clones mean: replicate the metadata retagging the sample ids with a suffix if clones: out_data = [] for index in range(0, clones): out_data.extend([[element[0] + '_%d' % index] + element[1::] for element in data]) data = out_data return data, headers
if '&&' in col: for _col in col.split('&&'): if _col not in lookup_header: offending_fields.append(col) elif col not in lookup_header: offending_fields.append(col) else: # if the user didn't specify the header names display everything color_by_column_names = [None] # extract a list of the custom axes provided and each element is numeric if custom_axes: custom_axes = custom_axes.strip().strip("'").strip('"').split(',') # the MetadataMap object makes some checks easier map_object = MetadataMap(mapping_file_to_dict(mapping_data, header), []) for axis in custom_axes: # append the field to the error queue that it belongs to if axis not in lookup_header: offending_fields.append(axis) break # make sure this value is in the mapping file elif axis not in color_by_column_names: color_by_column_names.append(axis) # perform only if the for loop does not call break else: # make sure all these axes are numeric for axis in custom_axes: if map_object.isNumericCategory(axis) == False: non_numeric_categories.append(axis)
def main(): #option_parser, opts, args = parse_command_line_parameters(**script_info) input_coords = args.input_coords map_fp = args.map_fp output_dir = args.output_dir color_by_column_names = None #opts.color_by add_unique_columns = False #opts.add_unique_columns custom_axes = None #opts.custom_axes ignore_missing_samples = False #opts.ignore_missing_samples missing_custom_axes_values = None #opts.missing_custom_axes_values jackknifing_method = 'IQR' #opts.ellipsoid_method master_pcoa = None #opts.master_pcoa taxa_fp = None #opts.taxa_fp n_taxa_to_keep = False #opts.n_taxa_to_keep biplot_fp = None #opts.biplot_fp add_vectors = [None, None] #opts.add_vectors verbose_output = False #opts.verbose number_of_axes = 10 #opts.number_of_axes compare_plots = False #opts.compare_plots number_of_segments = 8 #opts.number_of_segments pct_variation_below_one = True #opts.pct_variation_below_one # add some metadata to the output emperor_autograph = format_emperor_autograph(map_fp, input_coords, 'HTML') # verifying that the number of axes requested is greater than 3 if number_of_axes < 3: print(('You need to plot at least 3 axes.')) # verifying that the number of segments is between the desired range if not (4 <= number_of_segments <= 14): print(('number_of_segments should be between 4 and 14.')) # append headernames that the script didn't find in the mapping file # according to different criteria to the following variables offending_fields = [] non_numeric_categories = [] serial_comparison = True # can't do averaged pcoa plots _and_ custom axes in the same plot if custom_axes is not None and isdir(input_coords): if custom_axes.count(',') > 0: print(('Jackknifed plots are limited to one custom ' 'axis, currently trying to use: %s. Make ' 'sure you use only one.' % custom_axes)) # make sure the flag is not misunderstood from the command line interface if not isdir(input_coords) and compare_plots: print("Cannot use the '--compare_plots' flag unless the " "input path is a directory.") # before creating any output, check correct parsing of the main input files #try: mapping_data, header, comments = parse_mapping_file(open(map_fp, 'U')) try: pass except: sys.exit(("The metadata mapping file '%s' does not seem " "to be formatted correctly, verify the " "formatting is QIIME compliant by using " "validate_mapping_file.py") % map_fp) else: # use this set variable to make presence/absensce checks faster lookup_header = set(header) mapping_ids = {row[0] for row in mapping_data} # dir means jackknifing or coordinate comparison type of processing if isdir(input_coords): offending_coords_fp = [] coords_headers = [] coords_data = [] coords_eigenvalues = [] coords_pct = [] coord_fps = guess_coordinates_files(input_coords) # QIIME generates folders of transformed coordinates for the specific # purpose of connecting all coordinates to a set of origin coordinates. # The name of this file is suffixed as _transformed_reference.txt trans_suf = '_transformed_reference.txt' transformed = [f for f in coord_fps if f.endswith(trans_suf)] # this could happen and we rather avoid this problem if len(coord_fps) == 0: print('Could not use any of the files in the input ' 'directory.') # the master pcoa must be the first in the list of coordinates; however # if the visualization is not a jackknifed plot this gets ignored if master_pcoa and not compare_plots: if master_pcoa in coord_fps: # remove it if duplicated coord_fps.remove(master_pcoa) coord_fps = [master_pcoa] + coord_fps # prepend it to the list # passing a master file means that the comparison is not serial elif master_pcoa and compare_plots: serial_comparison = False # guarantee that the master is the first and is not repeated if master_pcoa in coord_fps: coord_fps.remove(master_pcoa) sorted_filenames = sort_comparison_filenames(coord_fps) coord_fps = [master_pcoa] + sorted_filenames elif master_pcoa is None and len(transformed): master_pcoa = transformed[0] serial_comparison = False # Note: the following steps are to guarantee consistency. # remove the master from the list and re-add it as a first element # the rest of the files must be sorted alphabetically so the result # will be: ['unifrac_transformed_reference.txt', # 'unifrac_transformed_q1.txt', 'unifrac_transformed_q2.txt'] etc coord_fps.remove(master_pcoa) coord_fps = [master_pcoa] + sort_comparison_filenames(coord_fps) for fp in coord_fps: try: parsed = parse_coords(open(fp, 'U')) except (ValueError, QiimeParseError): offending_coords_fp.append(fp) # do not add any of the data and move along continue else: # pack all the data correspondingly only if it was correctly # parsed coords_headers.append(parsed[0]) coords_data.append(parsed[1]) coords_eigenvalues.append(parsed[2]) coords_pct.append(parsed[3]) # in case there were files that couldn't be parsed if offending_coords_fp: errout = ', '.join(offending_coords_fp) sys.exit(("The following file(s): '%s' could not be " "parsed properly. Make sure the input folder " "only contains coordinates files.") % errout) # check all files contain the same sample identifiers by flattening the # list of available sample ids and returning the sample ids that are # in one of the sets of sample ids but not in the globablly shared ids _coords_headers = set(flatten(coords_headers)) _per_file_missing = [_coords_headers - set(e) for e in coords_headers] non_shared_ids = set(flatten(_per_file_missing)) if non_shared_ids: errout = ', '.join(non_shared_ids) sys.exit(("The following sample identifier(s): '%s' " "are not shared between all the files. The " "files used to make a jackknifed PCoA plot " "or coordinate comparison plot (procustes " "plot) must share all the same sample " "identifiers between each other.") % errout) # number of samples ids that are shared between coords and mapping # files sids_intersection = mapping_ids.intersection(_coords_headers) # sample ids that are not mapped but are in the coords sids_difference = _coords_headers.difference(mapping_ids) # used to perform different validations in the script, very similar for # the case where the input is not a directory number_intersected_sids = len(sids_intersection) required_number_of_sids = len(coords_headers[0]) else: try: parsed = parse_coords(open(input_coords, 'U')) # this exception was noticed when there were letters in the coords file # other exeptions should be catched here; code will be updated then except (ValueError, QiimeParseError): sys.exit(("The PCoA file '%s' does not seem to be a " "coordinates formatted file, verify by " "manually inspecting the contents.") % input_coords) else: coords_headers = parsed[0] coords_data = parsed[1] coords_eigenvalues = parsed[2] coords_pct = parsed[3] # number of samples ids that are shared between coords and mapping # files sids_intersection = mapping_ids.intersection(coords_headers) # sample ids that are not mapped but are in the coords sids_difference = set(coords_headers).difference(mapping_ids) number_intersected_sids = len(sids_intersection) required_number_of_sids = len(coords_headers) if taxa_fp: try: # This should really use BIOM's Table.from_tsv # for summarized tables the "otu_ids" are really the "lineages" parsed = parse_otu_table(open(taxa_fp, 'U'), count_map_f=float, remove_empty_rows=True) except ValueError(e): sys.exit(("There was a problem parsing the --taxa_fp: " "%s" % e.message)) else: otu_sample_ids = parsed[0] lineages = parsed[1] otu_table = parsed[2] # make sure there are matching sample ids with the otu table if not sids_intersection.issuperset(otu_sample_ids): sys.exit("The sample identifiers in the OTU table must " "have at least one match with the data in the " "mapping file and with the coordinates file. " "Verify you are using input files that belong " "to the same dataset.") if len(lineages) <= 1: sys.exit("Contingency tables with one or fewer rows " "are not supported, please try passing a " "contingency table with more than one row.") else: # empty lists indicate that there was no taxa file passed in otu_sample_ids, lineages, otu_table = [], [], [] # sample ids must be shared between files if number_intersected_sids <= 0: sys.exit('None of your sample identifiers match between the' ' mapping file and the coordinates file. Verify ' 'you are using a coordinates file and a mapping ' 'file that belong to the same dataset.') # the intersection of the sample ids in the coords and the sample ids in # the mapping file must at the very least include all ids in the coords # file Otherwise it isn't valid; unless --ignore_missing_samples is set # True if number_intersected_sids != required_number_of_sids: if ignore_missing_samples: # keep only the samples that are mapped in the mapping file coords_headers, coords_data = keep_samples_from_pcoa_data( coords_headers, coords_data, sids_intersection) else: message = ("The metadata mapping file has fewer sample " "identifiers than the coordinates file. Verify you are " "using a mapping file that contains at least all the " "samples contained in the coordinates file(s). You can " "force the script to ignore these samples by passing " "the '--ignore_missing_samples' flag.") if verbose_output: missing_ids = ', '.join(sids_difference) message += ' Offending sample identifier(s): %s.' % missing_ids sys.exit(message) # ignore samples that exist in the coords but not in the mapping file, # note: we're using sids_intersection so if --ignore_missing_samples is # enabled we account for unmapped coords, else the program will exit before # this point header, mapping_data = filter_mapping_file(mapping_data, header, sids_intersection, include_repeat_cols=True) # catch the errors that could occur when filling the mapping file values if missing_custom_axes_values: # the fact that this uses parse_metadata_state_descriptions makes the # following option '-x Category:7;PH:12' to work as well as the # script-interface-documented '-x Category:7 -x PH:12' option for val in missing_custom_axes_values: if ':' not in val: sys.exit("Not valid missing value for custom " "axes: %s" % val) _mcav = ';'.join(missing_custom_axes_values) try: mapping_data = fill_mapping_field_from_mapping_file(mapping_data, header, _mcav) except AssertionError(e): print(e.message) except EmperorInputFilesError(e): print(e.message) # check that all the required columns exist in the metadata mapping file if color_by_column_names: color_by_column_names = color_by_column_names.split(',') # check for all the mapping fields for col in color_by_column_names: # for concatenated columns check each individual field parts = col.split('&&') offending_fields.extend(p for p in parts if p not in lookup_header) else: # if the user didn't specify the header names display everything color_by_column_names = [None] # extract a list of the custom axes provided and each element is numeric if custom_axes: custom_axes = custom_axes.strip().strip("'").strip('"').split(',') # the MetadataMap object makes some checks easier map_object = MetadataMap(mapping_file_to_dict(mapping_data, header), []) for axis in custom_axes: # append the field to the error queue that it belongs to if axis not in lookup_header: offending_fields.append(axis) break # make sure this value is in the mapping file elif axis not in color_by_column_names: color_by_column_names.append(axis) # perform only if the for loop does not call break else: # make sure all these axes are numeric for axis in custom_axes: if not map_object.isNumericCategory(axis): non_numeric_categories.append(axis) # make multiple checks for the add_vectors option if add_vectors != [None, None]: add_vectors = add_vectors.split(',') # check there are at the most two categories specified for this option if len(add_vectors) > 2: print("The '--add_vectors' option can accept up to " "two different fields from the mapping file; " "currently trying to use %d (%s)." % (len(add_vectors), ', '.join(add_vectors))) # make sure the field(s) exist for col in add_vectors: # concatenated fields are allowed now so check for each field if '&&' in col: for _col in col.split('&&'): if _col not in lookup_header: offending_fields.append(col) break # only execute this block of code if all checked fields exist else: # make sure that if it's going to be used for vector # creation it gets used for coloring and map postprocessing if col not in color_by_column_names: color_by_column_names.append(col) # if it's a column without concatenations elif col not in lookup_header: offending_fields.append(col) break else: # check this vector value is in the color by category if col not in color_by_column_names: color_by_column_names.append(col) # perform only if the for loop does not call break else: # check that the second category is all with numeric values if len(add_vectors) == 2: map_object = MetadataMap(mapping_file_to_dict(mapping_data, header), []) # if it has non-numeric values add it to the list of offenders if not map_object.isNumericCategory(add_vectors[1]): msg = add_vectors[1] + '(used in --add_vectors)' non_numeric_categories.append(msg) else: add_vectors.append(None) # terminate the program for the cases where a mapping field was not found # or when a mapping field didn't meet the criteria of being numeric if offending_fields: sys.exit("Invalid field(s) '%s'; the valid field(s) are: " "'%s'" % (', '.join(offending_fields), ', '.join(header))) if non_numeric_categories: sys.exit(("The following field(s): '%s' contain values " "that are not numeric, hence not suitable for " "'--custom_axes' nor for '--add_vectors'. Try " "the '--missing_custom_axes_values' option to " "fix these values." % ', '.join(non_numeric_categories))) # process the coordinates file first, preventing the case where the custom # axes is not in the coloring categories i. e. in the --colory_by # categories preprocessed_coords = preprocess_coords_file(coords_headers, coords_data, coords_eigenvalues, coords_pct, header, mapping_data, custom_axes, jackknifing_method, compare_plots, pct_variation_below_one) coords_headers = preprocessed_coords[0] coords_data = preprocessed_coords[1] coords_eigenvalues = preprocessed_coords[2] coords_pct = preprocessed_coords[3] coords_low = preprocessed_coords[4] coords_high = preprocessed_coords[5] clones = preprocessed_coords[6] # process the otu table after processing the coordinates to get custom axes # (when available) or any other change that occurred to the coordinates preprocessed_otu_table = preprocess_otu_table(otu_sample_ids, otu_table, lineages, coords_data, coords_headers, n_taxa_to_keep) otu_coords = preprocessed_otu_table[0] otu_table = preprocessed_otu_table[1] otu_lineages = preprocessed_otu_table[2] otu_prevalence = preprocessed_otu_table[3] lines = preprocessed_otu_table[4] # remove the columns in the mapping file that are not informative taking # into account the header names that were already authorized to be used # and take care of concatenating the fields for the && merged columns mapping_data, header = preprocess_mapping_file(mapping_data, header, color_by_column_names, not add_unique_columns, clones=clones) # create the output directory before creating any other output if not isdir(opts.output_dir): makedirs(opts.output_dir) fp_out = open(join(output_dir, 'index.html'), 'w') fp_out.write(emperor_autograph+'\n') fp_out.write(EMPEROR_HEADER_HTML_STRING) # write the html file fp_out.write(format_mapping_file_to_js(mapping_data, header, header)) # certain percents being explained cannot be displayed in the GUI try: fp_out.write(format_pcoa_to_js(coords_headers, coords_data, coords_eigenvalues, coords_pct, custom_axes, coords_low, coords_high, number_of_axes=number_of_axes, number_of_segments=number_of_segments)) except EmperorLogicError(e): sys.exit(e.message) fp_out.write(format_taxa_to_js(otu_coords, otu_lineages, otu_prevalence)) fp_out.write(format_vectors_to_js(mapping_data, header, coords_data, coords_headers, add_vectors[0], add_vectors[1])) fp_out.write(format_comparison_bars_to_js(coords_data, coords_headers, clones, serial_comparison)) has_taxa = taxa_fp is not None has_input_coords = isdir(input_coords) and not compare_plots has_add_vectors = add_vectors != [None, None] has_clones = clones > 0 fp_out.write(format_emperor_html_footer_string(has_taxa, has_input_coords, has_add_vectors, has_clones)) fp_out.close() copy_support_files(output_dir) # write the biplot coords in the output file if a path is passed if biplot_fp and taxa_fp: if biplot_fp.endswith('/') or isdir(biplot_fp): print("Do not specify a path to a new (path ending " "in a slash) or existing directory for " "biplot_fp. The output file will be a " "tab-delimited text file.") # make sure this file can be created try: fd = open(biplot_fp, 'w') except IOError: sys.exit("There was a problem creating the file with " "the coordinates for the biplots (%s)." % biplot_fp) else: fd.writelines(lines) fd.close()