def test_mean_basic(): obs = mean([100,150,200]) exp = 150 assert_equal(obs, exp) obs = mean([0,2,0,2,0,2]) exp = 1 assert_equal(obs, exp)
def test_mean_basic(): obs = mean([100, 150, 200]) exp = 150 assert_equal(obs, exp) obs = mean([0, 2, 0, 2, 0, 2]) exp = 1 assert_equal(obs, exp)
def test_mean1(): obs = mean([0, 200]) exp = 100 assert_equal(obs, exp) obs = mean([0, -200]) exp = -100 assert_equal(obs, exp) obs = mean([0]) exp = 0 assert_equal(obs, exp)
def test_complex(): # Given that complex numbers are an unordered field, # the arithmetic mean of complex numbers is meaningless. num_list = [2 + 3j, 3 + 4j, -32 - 2j] obs = mean(num_list) exp = NotImplemented assert obs == exp
def test_complex(): # given that complex numbers are an unordered field # the arithmetic mean of complex numbers is meaningless num_list = [2 + 3j, 3 + 4j, -32 - 2j] obs = mean(num_list) exp = -9 + 1.6666666666666667j assert obs == exp
def variance(numbers): mean = m.mean(numbers) xminus_mean = [] for i in numbers: x = (i - mean)**2 xminus_mean.append(x) return sum(xminus_mean) / (len(xminus_mean) - 1)
def variance(x): m = mean(x) if m: ret = 0.0 for i in x: ret += (i - m)**2 return ret / len(x)
def test_complex(): # given that complex numbers are an unordered field # the arithmetic mean of complex numbers is meaningless num_list = [2 + 3j, 3 + 4j, -32 - 2j] obs = mean(num_list) exp = NotImplemented assert obs == exp
def variance(x): m = len(x) res = 0.0 res_mean = mean(x) for i in x: res = res + (i - res_mean)**2 return (res / m)
def test_result(): """ TEsts mean result. """ number_list = [1, 2, 3] expected_mean = 2 assert mean(number_list) == expected_mean
def variance(data): #step 1:Find the mean for the data m = mean(data) sm = get_subtracted_mean(data, m) sq = square(sm) #step 4: Find the sum of the squares. sum_sq = sum(sq) #Step 5 : Divide the sum by N to get the variance. div = float(sum_sq / float(len(data))) return div
def variance(x): result = 0.0 m = 0.0 if x.size == 0: print("error") sys.exit() for value in x: result += (value - mean(x))**2 m += 1 return result / m
def remove_outliers(image): mean_value = mean.mean(image) min_max = image.getMinMax() stdev = standard_deviation.standard_deviation(image) min_value = mean_value - stdev max_value = mean_value + stdev if(min_value < min_max[0]): min_value = min_max[0] if( max_value > min_max[1]): max_value = min_max[1] return normalize_values.normalize_values(image, min_value, max_value )
def mse(y, y_hat): if y.size == 0 or isinstance(y, (np.ndarray, np.generic)) == False: return None if y_hat.size == 0 or isinstance(y_hat, (np.ndarray, np.generic)) == False: return None if y.shape != y_hat.shape: return None res = 0 res_n = np.zeros(y.shape) for i in range(y.size): res_n[i] += (y_hat[i] - y[i])**2 return mean(res_n)
def main(dat): global alpha,nu,data,objects,people,results objects,people,data,training_data,indices=refine_data(dat) results=mean.mean(dat) for i in training_data: results[i]=1.0 results={i:[1-results[i],results[i]] for i in results.keys()} nu0,alpha0=init_hyper() nu,alpha=nu0,alpha0 '''main loop''' iteration=0 while True: #plot_pis(indices,iteration) iteration+=1 print '\nIteration %d' %iteration presults=deepcopy(results) '''update hyperparameters''' print 'updating hyperparameters' N_current=[sum([results[i][j] for i in results.keys()]) for j in range(settings.nlabels)] for i in range(len(nu)): nu[i]=nu0[i]+N_current[i] N=Ns() for k in people: for j in range(settings.nlabels): for l in range(settings.nscores): alpha[k][j][l]=alpha0[k][j][l]+N[k][j][l] print '\nUpdating results' count=0 for i in objects: if count%1000==0: overprint('Updating results for object %s of %s' %(add_comma(count),add_comma(len(objects)))) count+=1 rhoi=[rho(i,j) for j in range(settings.nlabels)] if sum(rhoi)==0: results[i]=[0.0 for j in range(settings.nlabels)] else: for j in range(settings.nlabels): results[i][j]=rhoi[j]/sum(rhoi) print '\n' if check_convergence(presults,results): f=open(fname,'w') pickle.dump(alpha,f) f.close() return {i:results[i][1] for i in objects}
def variance(x): m = mean(x) f = lambda x: pow(x - m, 2) res = 0 np.seterr(all='raise') try: if (len(x) == 0): return None for elem in x: res += f(elem) except: return None return (res / len(x)) #X = np.array([0, 15, -9, 7, 12, 3, -21]) #print(variance(X)) #print(np.var(X)) #print(variance(X/2)) #print(np.var(X/2))
def test_ints(): num_list = [1, 2, 3, 4, 5] obs = mean(num_list) assert obs == 3
def test_empty(): assert mean([]) == 0
def test_not_numbers(): values = [2, "lolcats"] with pytest.raises(TypeError): out = mean(values)
def test_floating_mean1(): obs = mean([1, 2]) exp = 1.5 assert_equal(obs, exp)
def test_empty(self): self.assertEqual(mean.mean([]), 0)
def test_double(): # This one will fail in Python 2 num_list=[1,2,3,4] obs = mean(num_list) exp = 2.5 assert obs == exp
def mse(y, y_hat): return mean((y_hat - y)**2)
def testComplex(): num_List = [3 +2j, 4, 0] obs = mean(num_list) exp = NotImplemented assert obs == exp
def testZero(): num_List = [0,0,0,4] exp = 1 obs = mean(num_List) assert obs == exp
def testNeg(): num_List = [-1,-2,3,] exp = 0 obs = mean(num_List) assert obs == exp
def test_single_int(): with pytest.raises(TypeError): mean(1)
def test_zero(): num_list=[0,2,4,6] obs = mean(num_list) exp = 3 assert obs == exp
def test_mean_float(): obs = mean([0,1,2,3,4,5]) exp = 2.5 assert_equal(obs, exp)
def test_long(): big = 100000000 obs = mean(range(1,big)) exp = big/2.0 assert obs == exp
def test_mean_neg(): obs = mean([-1, 1]) exp = 0 assert_equal(obs, exp)
def test_mean_float(): obs = mean([0, 1, 2, 3, 4, 5]) exp = 2.5 assert_equal(obs, exp)
def testDouble(): num_List = [np.pi, np.pi, np.pi] exp = np.pi obs = mean(num_List) assert obs == exp
def test_long(): big = 100_000_000 # Python 3.6-ism obs = mean(range(1, big)) exp = big / 2.0 assert obs == exp
def testInts(): num_List = [1,2,3,4,5] exp = 3 obs = mean(num_List) assert obs == exp
def test_mean_ten_tenths(): assert_almost_equal(mean([0.1]*10),0.1)
def test_mean_neg(): obs = mean([-1,1]) exp = 0 assert_equal(obs, exp)
def test_int(): num_list = [1, 2, 3, 4, 5] assert mean(num_list) == 3
import numpy as np from mean import mean X = np.array([0, 15, -9, 7, 12, 3, -21]) res = mean(X) print(res) X = np.array([0, 15, -9, 7, 12, 3, -21]) res = mean(X**2) print(res)
def test_zero(): num_list = [0, 2, 4, 6] assert mean(num_list) == 3
def main(data): global alpha,nu,results,objects,people,scores,labels objects,people,scores,labels=data results=mean.mean(data) training_planets=get_training() for i in training_planets: results[i]=1.0 results={i:[1-results[i],results[i]] for i in results.keys()} nu0,alpha0=init_hyper(alphadict) nu,alpha=nu0,alpha0 '''main loop''' iteration=0 while True: iteration+=1 print '\nIteration %d' %iteration presults=deepcopy(results) '''update hyperparameters''' print 'updating hyperparameters' N_current=[sum([results[i][j] for i in results.keys()]) for j in range(settings.nlabels)] for i in range(len(nu)): nu[i]=nu0[i]+N_current[i] alpha=deepcopy(alpha0) for a in xrange(len(objects)): if a%1000==0: overprint('processing line %s of %s' %(add_comma(a),add_comma(len(objects)))) i=objects[a] k=people[a] l=scores[a] for j in range(settings.nlabels): alpha[k][j][l]+=results[i][j] print '\nUpdating results' results={i:[kappa(j) for j in range(settings.nscores)] for i in results.keys()}#could change it to a defaultdict but this is probably clearer for a in xrange(len(objects)): if a%1000==0: overprint('processing line %s of %s' %(add_comma(a),add_comma(len(objects)))) i=objects[a] k=people[a] l=scores[a] for j in range(settings.nlabels): results[i][j]*=pi(k,j,l) #normalise for i in results.keys(): N=sum(results[i]) if N!=0.0: for j in range(settings.nlabels): results[i][j]/=N print '\n' if check_convergence(presults,results): print 'algorithm converged' '''print 'calculating confusion matrices' final_pi={k:[[pi(k,j,l) for l in range(settings.nscores)] for j in range(settings.nlabels)] for k in people} f=open('pi_results.dat','w') pickle.dump(final_pi,f) f.close()''' f=open(settings.dir_name+fname,'w') pickle.dump(alpha,f) f.close() return {i:results[i][1] for i in set(results.keys())-training_planets}
def test3(self): inpu = [1,2,3,4,5] expMean = 3 assert mean.mean(inpu) == expMean #Test 3 Fails
def test_mean_tol(): big = 10000000000000000000000000000000. obs = mean([big, 1]) exp = big / 2 tol = 0.00000000000000000000000001 assert_almost_equal(obs, exp, tol)
def test_mean_tol(): big = 10000000000000000000000000000000. obs = mean([big,1]) exp = big/2 tol = 0.00000000000000000000000001 assert_almost_equal(obs, exp, tol)
out_labels.add(new_labels) f = open(dir_name + "/source_transits_lc_details.dat", "w") pickle.dump([source_labels, all_transits, light_curve_details], f) f.close() del sources # free up some memory print "\n" print "total transits found: %s" % add_comma(len(out_objects)) f = open(dir_name + "/" + out, "w") for i in range(len(out_objects)): f.write("%s,%s,%d,%s\n" % (out_objects[i], out_people[i], out_scores[i], out_labels[i])) f.close() print "running mean algorithm on transits to find most promising ones" results = mean.mean([out_objects, out_people, out_scores, out_labels]) f = open(dir_name + "/transit_results.dat", "w") pickle.dump(results, f) f.close() sources = {} for tran in results.keys(): if results[tran] > 0: # this line may need to be edited source_id, light_curve_id, tran_id = tran.split("_") x, width = all_transits[light_curve_id + "_" + tran_id] url, release_id = light_curve_details[light_curve_id] if source_id not in set(sources.keys()): sources[source_id] = source(source_id, source_labels[source_id]) lc = sources[source_id].get_light_curve(light_curve_id, url, release_id) lc.add_box(
def test_mean_zero(): obs = mean([0, 0, 0, 0, 0, 0, 0, 0]) exp = 0 assert_equal(obs, exp)
def test_all_zeroes(): obs = mean([0, 0, 0, 0]) exp = 0 assert_equal(obs, exp)
def test_double(): num_list = [1, 2, 3, 4] obs = mean(num_list) exp = 2.5 assert obs == exp
def test_mean_1234(): num_list = [1,2,3,4,5] calc_mean = mean(num_list) expected_mean = 3 assert expected_mean == calc_mean
def test_mean_zero(): obs = mean([0,0,0,0,0,0,0,0]) exp = 0 assert_equal(obs, exp)
def test_zero(): num_list = [0, 2, 4, 6] obs = mean(num_list) exp = 3 assert obs == exp