def test_ace(): assert_almost_equal(ace(np.array([2, 0])), 1.0) assert_almost_equal(ace(np.array([12, 0, 9])), 2.0) assert_almost_equal(ace(np.array([12, 2, 8])), 3.0) assert_almost_equal(ace(np.array([12, 2, 1])), 4.0) assert_almost_equal(ace(np.array([12, 1, 2, 1])), 7.0) assert_almost_equal(ace(np.array([12, 3, 2, 1])), 4.6) assert_almost_equal(ace(np.array([12, 3, 6, 1, 10])), 5.62749672) # Just returns the number of OTUs when all are abundant. assert_almost_equal(ace(np.array([12, 12, 13, 14])), 4.0) # Border case: only singletons and 10-tons, no abundant OTUs. assert_almost_equal(ace([0, 1, 1, 0, 0, 10, 10, 1, 0, 0]), 9.35681818182)
def mercat_compute_alpha_beta_diversity(counts,bif): abm = dict() abm['shannon'] = skbio_alpha.shannon(counts) abm['simpson'] = skbio_alpha.simpson(counts) abm['simpson_e'] = skbio_alpha.simpson_e(counts) abm['goods_coverage'] = skbio_alpha.goods_coverage(counts) abm['fisher_alpha'] = skbio_alpha.fisher_alpha(counts) abm['dominance'] = skbio_alpha.dominance(counts) abm['chao1'] = skbio_alpha.chao1(counts) abm['chao1_ci'] = skbio_alpha.chao1_ci(counts) abm['ace'] = skbio_alpha.ace(counts) with open(bif + "_diversity_metrics.txt", 'w') as dmptr: for abmetric in abm: dmptr.write(abmetric + " = " + str(abm[abmetric]) + "\n")
def test_ace_only_rare_singletons(): with assert_raises(ValueError): ace([0, 0, 43, 0, 1, 0, 1, 42, 1, 43])
def alpha_diversity(args): """ Our counts data in the biomfile is per OTU NOT per sample as needed. So it must be transformed """ try: json_data = open(args.in_file, 'r') except: print("NO FILE FOUND ERROR") sys.exit() data = json.load(json_data) json_data.close() #size = len(data['rows'])*len(data['columns']) #A = np.arange(size).reshape((len(data['rows']),len(data['columns']))) A = np.zeros(shape=(len(data['rows']), len(data['columns']))) #A.astype(int) #print A for i, counts in enumerate(data['data']): #print 'OTU:',data['rows'][i]['id'], counts #print alpha.chao1(counts) A[i] = counts #pass X = A.astype(int) # insure int #print X Y = np.transpose(X) txt = "Dataset\tobserved richness\tACE\tchao1\tShannon\tSimpson" print(txt) for i, row in enumerate(Y): ds = data['columns'][i]['id'] row = row.tolist() try: ace = alpha.ace(row) except: ace = 'error' try: chao1 = alpha.chao1(row) except: chao1 = 'error' try: osd = alpha.osd(row) except: osd = ['error'] try: simpson = alpha.simpson(row) except: simpson = 'error' try: shannon = alpha.shannon(row) except: shannon = 'error' txt = ds + "\t" + str(osd[0]) + "\t" + str(ace) + "\t" + str( chao1) + "\t" + str(shannon) + "\t" + str(simpson) print(txt)
def test_ace_only_rare_singletons(self): with self.assertRaises(ValueError): ace([0, 0, 43, 0, 1, 0, 1, 42, 1, 43])
def alpha_diversity(args): """ Our counts data in the biomfile is per OTU NOT per sample as needed. So it must be transformed """ try: json_data = open(args.in_file, 'r') except: print("NO FILE FOUND ERROR") sys.exit() data = json.load(json_data) json_data.close() #size = len(data['rows'])*len(data['columns']) #A = np.arange(size).reshape((len(data['rows']),len(data['columns']))) A = np.zeros(shape=(len(data['rows']),len(data['columns']))) #A.astype(int) #print A for i,counts in enumerate(data['data']): #print 'OTU:',data['rows'][i]['id'], counts #print alpha.chao1(counts) A[i] = counts #pass X = A.astype(int) # insure int #print X Y = np.transpose(X) txt = "Dataset\tobserved richness\tACE\tchao1\tShannon\tSimpson" print(txt) for i,row in enumerate(Y): ds = data['columns'][i]['id'] row = row.tolist() try: ace = alpha.ace(row) except: ace = 'error' try: chao1 = alpha.chao1(row) except: chao1 = 'error' try: osd = alpha.osd(row) except: osd = ['error'] try: simpson = alpha.simpson(row) except: simpson = 'error' try: shannon = alpha.shannon(row) except: shannon = 'error' txt = ds+"\t"+str(osd[0])+"\t"+str(ace)+"\t"+str(chao1)+"\t"+str(shannon)+"\t"+str(simpson) print(txt)