コード例 #1
0
def sortSubListsAndMedian(A):
    sortedList = []
    medianList = []
    for smallList in A:
        sortedList.append(sorted(smallList))
        medianList.append(statistics.median_low(smallList))
    return sortedList, medianList
コード例 #2
0
ファイル: home_tags.py プロジェクト: MicrobesNG/mngweb
def update_lims_project_stats():
    today = datetime.datetime.today()
    start_date = (today - datetime.timedelta(90))
    try:
        response = limsfm_request(
            'layout/project_api',
            'get',
            {
                'RFMmax': 1,
                'RFMsF1': 'data_sent_date',
                'RFMsV1': '>={}/{}'.format(start_date.month, start_date.year),
            })
        if response.status_code == 200:
            project_stats = {}
            wait_time_string = (response.json()['data'][0]
                                ['summary_list_wait_time_weeks'])
            wait_time_list = [int(i) for i in wait_time_string.splitlines()]
            project_stats['median_wait_time_weeks'] = median_low(
                wait_time_list
            )
            return project_stats
        else:
            return {}
    except Exception as e:
        return {}
コード例 #3
0
ファイル: evaluate.py プロジェクト: ag-gipp/Giveme5W1H
def stats_helper(list):
    """
    https://docs.python.org/3/library/statistics.html#statistics.pvariance
    :param list:
    :return:
    """

    mean = statistics.mean(list)
    mode = None

    try:
        mode = statistics.mode(list)
    except statistics.StatisticsError:
        # no unique mode
        pass

    return {
        'mean': mean,
        'variance': statistics.pvariance(list, mu=mean),
        'standard_deviation': statistics.pstdev(list, mu=mean),
        'median': statistics.median(list),
        'median_low': statistics.median_low(list),
        'median_high': statistics.median_high(list),
        'median_grouped': statistics.median_grouped(list),
        'mode': mode
    }
コード例 #4
0
ファイル: metrics.py プロジェクト: linanc/HCDD-Methods
def get_overall_stats(results_dict):
    average = statistics.mean(results_dict)
    median = statistics.median(results_dict)
    median_low = statistics.median_low(results_dict)
    median_high = statistics.median_high(results_dict)
    mode = calc_mode(results_dict)
    return {'avg': average, 'med': median, 'medlo': median_low, 'medhi': median_high, 'mode': mode}
コード例 #5
0
ファイル: analysis.py プロジェクト: jga/ballotbleach
def create_rating_histogram(ballots, rating_range, chart_title, image_save_path):
    """
    Generates a histogram (bar chart) image from ballot data for the subject rating.
    """
    all_values = list()
    rating_histogram = {'0': 0}
    for value in rating_range:
        rating_histogram[str(value)] = 0
    for ballot in ballots:
        if ballot.subject_rating and str(ballot.subject_rating) in rating_histogram:
            rating_histogram[str(ballot.subject_rating)] += 1
            all_values.append(ballot.subject_rating)
        else:
            rating_histogram['0'] += 1
            all_values.append(0)
    total_submissions = 0
    for rating in rating_histogram:
        total_submissions += rating_histogram[rating]
    sorted_ratings = OrderedDict(sorted(rating_histogram.items(),
                                        key=lambda rating_data: int(rating_data[0])))
    categories = list()
    values = list()
    for rating in sorted_ratings:
        categories.append(rating)
        values.append(round(sorted_ratings[rating] / total_submissions * 100))
    chart_tick_format = '%d%%'
    categories_with_none = [x if x != '0' else "None" for x in categories]
    summary_data = {
        'n': total_submissions,
        'average': round(mean(all_values), 1),
        'median': median_low(all_values)
    }
    create_category_bar_chart(image_save_path, categories_with_none, values, summary_data,
                              chart_title, chart_tick_format)
コード例 #6
0
ファイル: data.py プロジェクト: iJebus/CITS4406-Assignment2
 def __init__(self, values):
     values = [eval(i) for i in values]
     super().__init__(values)
     self.min = min(values)
     self.max = max(values)
     self.mean = Decimal(mean(values)).quantize(Decimal('.00000'))
     self.median_low = median_low(values)
     self.median = median(values)
     self.median_high = median_high(values)
コード例 #7
0
def quick_sort(mlist, p, r):
    if p < r:
        u = statistics.median_low([mlist[p], mlist[r], mlist[int((p + r) / 2)]])
        y = mlist.index(u)
        mlist[y], mlist[r] = mlist[r], mlist[y]
        q = partition(mlist, p, r)
        quick_sort(mlist, p, q - 1)
        quick_sort(mlist, q + 1, r)
    return mlist
コード例 #8
0
def web_dataset_reader(url):
    '''
    :param url: Input a web address.
    :return: This algorithm returns a dataseet and its descriptive statistics.
    Example:
    >>> url='http://robjhyndman.com/tsdldata/ecology1/hopedale.dat'
    >>> import web_dataset_reader_program as wbd
    >>> print(wbd.web_dataset_reader(url))
    >>> .
        .
        .
    '''
    from urllib import request as ur
    import web_skip_header_program as wh
    import statistics as st
    highvalue=0
    count=0
    with ur.urlopen(url) as webpage:
        first_data=wh.webpage_skip_head(webpage)
        first_data=first_data.split()
        dataset=first_data
        for data in webpage: # This reads the remaining lines of the webpage
            data=data.decode('utf-8')
            data=data.split()
            dataset+=data # For each iteration, the data transformed into
            # list append to dataset with first_data as its first list
            # #print(dataset)
            data_float=dataset
        for i in range(len(dataset)):
            data=float(dataset[i])
            data_float[i]=float(dataset[i]) # Elements in the list 'dataset' are transformed to
            #  float for additional operations such as min, max, range, sum
            count+= 1 # This counts the number of elements in the list
            highvalue =max(highvalue,data) # Zero is assigned to highvalue for its start value.
            #  The data changes for each loop and compared with each element in the list
        lowvalue = min(data_float)
        totalvalue =sum(data_float)
        rangevalue = highvalue - lowvalue
        #print(count)
        observation=len(dataset)
        average=totalvalue/observation
        mean=st.mean(data_float)
        median=st.median(data_float)
        median_high=st.median_high(data_float)
        median_low=st.median_low(data_float)
        #mode=st.mode(data_float)# If there is more than one mode, it will return Stat Error
        stdev=st.pstdev(data_float)
        variance=st.pvariance(data_float)
    return print('The Dataset in List form is:\n',data_float,'\n',
              '=============================================\n','            DIAGNOSTIC ANALYTICS             \n',
                 '=============================================\n','The Descriptive Statistics of the Dataset are:',
                 '\nTotal:\t\t{3}\nMaximum:\t{0}\nMinimum:\t{1}\nRange:\t\t{2}\nAverage:\t{5:.2f}\nMedian:\t\t{6}'
              '\nMedianHigh:\t{7}\nMedian Low:\t{8}\nVariance:\t{10:.2f}\nStd.Dev:\t{9:.2f}\nCounts:\t\t{4}'
              .format(highvalue,lowvalue,rangevalue,totalvalue,observation,mean,median,median_high,
                      median_low,stdev,variance),'\n','=============================================')
コード例 #9
0
ファイル: experiments.py プロジェクト: abrosen/thesis
def collectStartingMedians():
    global seed
    for _ in range(100000):
        random.seed(seed)
        s.setupSimulation(numNodes=1000, numTasks=100000)
        loads = [len(x.tasks) for x in s.nodes.values()]  # this won't work once the network starts growing
        # print(sorted(loads))
        median = statistics.median_low(loads)
        with open("data/working/medians.txt", "a") as medians:
            medians.write(str(median) + "\n")
        print(median)
        seed += 1
コード例 #10
0
ファイル: metrics.py プロジェクト: linanc/HCDD-Methods
def connected_components(matrix, author_to_int):
	#convert matrix to adj_list
	v = len(matrix)
	adj_list = convert_matrix_to_adj_list(matrix, v)
	#using dfs to find connected components
	metavisited, list_of_ints = [False for _ in range(v)], []
	for vertex in range(v):
		if not metavisited[vertex]:
			ret = dfs(metavisited, vertex, adj_list)
			list_of_ints.append(ret[0])
			metavisited = ret[1]
	largest = largest_connected(list_of_ints)
	max_cc_size, max_cc, sizes = largest[0], largest[1], sorted(largest[2])

	#convert ints into authors
	inverse_author_dict = {v: k for k, v in author_to_int.items()}
	list_of_strings, max_cc_string = [], []
	for cc in list_of_ints:
		string_cc = [inverse_author_dict[i] for i in cc]
		if cc == max_cc:
			max_cc_string = string_cc
		list_of_strings.append(string_cc)

	#statistics of cc sizes
	average = statistics.mean(sizes)
	median = statistics.median(sizes)
	median_low = statistics.median_low(sizes)
	median_high = statistics.median_high(sizes)
	mode = calc_mode(sizes)
	#density per connected component
	d_per_component = density_per_component(list_of_ints, matrix, author_to_int)
	#saving info into a text file
	with open('connected_components.txt', 'w') as f:
		f.write('Connected authors: ' + '\n')
		for cc in list_of_strings:
			f.write('\n' + str(len(cc)) + '   ' + str(cc) + '\n')
			f.write('Density : ' + str(d_per_component[str(cc)]) + '\n')
		f.write('\n')
		f.write('Connected components: ' + '\n')
		for cc in list_of_ints:
			f.write(str(len(cc)) + '   ' + str(cc) + '\n')
		f.write('\n')
		f.write('Number of connected components: ' + str(len(list_of_ints)) + '\n')
		f.write('Largest component size: ' + str(max_cc_size) + '\n')
		f.write('Largest connected component: ' + str(max_cc_string) + '\n')
		f.write('Component sizes: ' + str(sizes) + '\n')
		f.write('Average component size: ' + str(average) + '\n')
		f.write('Median: ' + str(median) + '\n')
		f.write('Low median: ' + str(median_low) + '\n')
		f.write('High median: ' + str(median_high) + '\n')
		f.write('Mode: ' + str(mode))
	pickle.dump(adj_list, open("adj_list.p", "wb"))
	pickle.dump(list_of_ints, open("list_of_ints.p", "wb"))
コード例 #11
0
ファイル: plotter.py プロジェクト: abrosen/thesis
def plotLoads():
    s = Simulator()
    seed = 500
    loads = []
    for _ in range(20):
        random.seed(seed)
        s.setupSimulation(numNodes=1000,numTasks=1000000)
        loads = loads + [len(x.tasks) for x in s.nodes.values()]
        seed += 1
    n, bins, patches = plt.hist(loads, 25, normed =1 )
    plt.xlabel('Tasks Per Node')
    plt.ylabel('Probability')
    plt.axvline(statistics.median_low(loads), color='r', linestyle='--')
    plt.show()
コード例 #12
0
def main():
    print(stats.mean(range(6)))
    print(stats.median(range(6)))
    print(stats.median_low(range(6)))
    print(stats.median_high(range(6)))
    print(stats.median_grouped(range(6)))
    try:
        print(stats.mode(range(6)))
    except Exception as e:
        print(e)
    print(stats.mode(list(range(6)) + [3]))
    print(stats.pstdev(list(range(6)) + [3]))
    print(stats.stdev(list(range(6)) + [3]))
    print(stats.pvariance(list(range(6)) + [3]))
    print(stats.variance(list(range(6)) + [3]))
コード例 #13
0
ファイル: instructions.py プロジェクト: dessibelle/legoleif
    def read(self):
        arr = np.array(self.image)

        nrows, ncols = arr.shape
        h_blocks = median_low(self.factors(ncols))
        v_blocks = median_low(self.factors(nrows))
        block_width = ncols // h_blocks
        block_height = nrows // v_blocks

        print("Colors\n{}".format("=" * 6))
        colors = self.image.getcolors()
        for num, color_idx in colors:
            print("{}: {} blocks".format(color_idx, num))

        heading = "{}x{} blocks".format(ncols, nrows)
        print("\n" + heading + "\n" + "=" * len(heading))
        print(self.format_portion_blocks(arr, block_width, block_height))

        for i in range(0, h_blocks):
            for j in range(0, v_blocks):
                heading = "{},{} ({}x{} blocks)".format(i + 1, j + 1, block_width, block_height)
                print("\n" + heading + "\n" + "=" * len(heading))
                p = self.get_portion(arr, i * block_width, j * block_height, block_width, block_height)
                print(self.format_portion(p))
コード例 #14
0
def select(k, A):

    bigList = chunks(A, 5)

    subSorted, medians = sortSubListsAndMedian(bigList)

    # This can't be done recursively. Stack overflow.
    # If you get it working that way, please create a pull-request.
    medianPivot = statistics.median_low(medians)

    smaller, equal, larger = partition(medianPivot, A)

    if k <= len(smaller):
        return select(k, smaller)

    if k <= (len(smaller) + len(equal)):
        return medianPivot

    return select(k - (len(smaller) + len(equal)), larger)
コード例 #15
0
 def __init__(self, values, stdDevs): 
     new_values = []
     for i in values:
         if i != '':
             try:
                 if "." in i:
                     new_values.append(float(i))
                 else:
                     new_values.append(int(i))
             except:
                 pass #already picked up by error checks
     values = new_values
     super().__init__(values)
     self.stDevOutliers = []
     standardDeviations = Decimal(stdDevs)
     if len(values) >= 8:
         self.pval = mstats.normaltest(array(values))[1]
     else:
         self.pval = 100
     self.min = min(values)
     self.max = max(values)
     self.mean = Decimal(mean(values)).quantize(Decimal('.00000'))
     self.median_low = median_low(values)
     self.median = median(values)
     self.median_high = median_high(values)
     self.stdev = Decimal(stdev(values)).quantize(Decimal('.00'))
     self.normDist = 'No'
     if(self.pval > 0.055):
         self.normDist = 'Yes'
     elif self.pval == 100:
         self.normDist = 'N/A'
     if self.normDist == 'Yes':
         outlier_count = 0
         for x, value in enumerate(values):
             if value < (self.mean - standardDeviations * self.stdev) or \
             value > (self.mean + standardDeviations * self.stdev):  
                 if outlier_count > max_Outliers:
                     self.stDevOutliers = ">%d outliers" % max_Outliers
                     break
                 self.stDevOutliers.append("Row: %d Value: %s" % (x, value))
                 outlier_count += 1
コード例 #16
0
def fill_missing_attributes( instances ):

    print( "Filling in missing data...." )
    instance_ex = instances[0]
    attr_medians = [];
    for idx in range( len(instance_ex) ):
        attr_median = statistics.median_low([
                instance[idx] for instance in instances if instance[idx] != '?' ])
        attr_medians.append( attr_median )

    filled_instances = []

    for instance in instances:
        if '?' in instance:
            indices = [i for i, x in enumerate(instance) if x == "?"] # since multiple attributes could be missing
            for idx in indices:
                instance[idx] = attr_medians[ idx ]
        filled_instances.append( instance )

    print( "....complete. starting next step...")
    return filled_instances
コード例 #17
0
 def __init__(self, values, stdDevs):
     standardDeviations = stdDevs 
     new_values = []
     for i in values:
         if i != '':
             try:
                 new_values.append(float(i))
             except:
                 pass #already picked up in error checks
     values = new_values
     super().__init__(values)
     self.stDevOutliers = []
     if len(values) >= 8:
         self.pval = mstats.normaltest(array(values))[1]
     else:
         self.pval = 100
     if self.mode != 'N/A':
         self.mode = self.int_to_sci(self.mode)
     self.min = self.int_to_sci(min(values))
     self.max = self.int_to_sci(max(values))
     self.mean = self.int_to_sci(mean(values))
     self.median_low = self.int_to_sci(median_low(values))
     self.median = self.int_to_sci(median(values))
     self.median_high =  self.int_to_sci(median_high(values))
     self.stdev = self.int_to_sci(stdev(values))
     self.normDist = 'No'
     if(self.pval < 0.055):
         self.normDist = 'Yes'
     elif(self.pval == 100):
         self.normDist = 'N/A'
     if self.normDist == 'Yes':
         outlier_count = 0
         for x, value in enumerate(values):
             if value < (float(self.mean) - standardDeviations * float(self.stdev)) or \
             value > (float(self.mean) + standardDeviations * float(self.stdev)):               
                 if outlier_count > max_Outliers:
                     self.stDevOutliers = ">%d outliers" % max_Outliers
                     break
                 self.stDevOutliers.append("Row: %d Value: %s" % (x, value))
                 outlier_count += 1
コード例 #18
0
import statistics

score = [30, 40, 60, 70, 80, 90]
print(statistics.mean(score))
print(statistics.harmonic_mean(score))
print(statistics.median(score))
print(statistics.median_low(score))
print(statistics.median_high(score))

import time

print(time.time())

t = time.time()
print(time.ctime(t))

import time
now = time.localtime()
print("%d년 %d월 %d일" % (now.tm_year, now.tm_mon, now.tm_mday))
print("%d:%d;%d" % (now.tm_hour, now.tm_min, now.tm_sec))

import time

start = time.time()
for a in range(1000):
    print(a)
end = time.time()
print(end - start)

print("안녕하세요.")
time.sleep(1)
コード例 #19
0
percent = [
    2.606255012, 1.222935044, 1.283079391, 3.628708901, 6.856455493,
    4.911788292, 2.886928629, 0.781876504, 0.962309543, 2.265437049,
    6.816359262, 3.688853248, 3.468323978, 5.633520449, 4.530874098,
    1.984763432, 0.922213312, 3.327987169, 4.190056135, 5.493183641,
    1.864474739, 10.60545309, 2.425821973, 2.726543705, 8.740978348,
    6.174819567
]

percent.sort()
print(percent)

#this ends with error
#print(median(percent))
#print(median_low(percent))
#print(median_high(percent))

#this succeeds
import statistics
print(statistics.median(percent))
print(statistics.median_low(percent))
print(statistics.median_high(percent))

#this also succeeds
##from statistics import *
##print(median(percent))
##print(median_low(percent))
##print(median_high(percent))
コード例 #20
0
ファイル: gromacs_myscript.py プロジェクト: burbol/scripts
i2 = 0
for slice2 in range(0,200):
    ymean = input[slice2]
    popt, pcov = curve_fit(func, x, ymean,[1000, 3, 1])
    k = func(0,*popt)
    if k > 800:
        densmean2[i2] = k
        i2 = i2 + 1

        
print 'length of densmap2', len(densmean2)
plt.plot(zcoord,densmean,'k.')
import statistics
print mean(densmean2)
print median(densmean2)
print statistics.median_low(densmean2)
print statistics.median_high(densmean2)
#print statistics.mode(densmean2)

# <codecell>

## DELETE THIS CELL, IT GIVES ONLY HOW MANY TIMES DOES EACH VALUE OF densmean APPEAR -> NOT USEFUL
from collections import Counter
c = Counter(densmean)  
#print densmean
#list(c)
c += Counter()   
#print c.most_common()       # n least common elements 
#print c[-3.2638837653955745e-17]
#sum(c.values())  
コード例 #21
0
    def __init__(self, pi, gpio, measurement_time=120):

        self.pi = pi
        self.gpio = gpio
        self.period = 1 / 910 * 1000000
        self.tick_high = None
        self.duty_cycle = None
        self.duty_scale = 1000
        self.list_duty_cycles = []
        self.duty_cycle_min = None
        self.duty_cycle_max = None

        #http://abyz.me.uk/rpi/pigpio/python.html#set_mode
        self.pi.set_mode(gpio=self.gpio, mode=pigpio.INPUT)

        #http://abyz.me.uk/rpi/pigpio/python.html#callback
        self.cb = self.pi.callback(user_gpio=self.gpio,
                                   edge=pigpio.EITHER_EDGE,
                                   func=self.cbf)

        print('{}{}{}'.format('Starting measurements for: ', measurement_time,
                              ' seconds.'))
        print('----------------------------------------------------------')
        time.sleep(measurement_time)

        #stop callback before sorting list to avoid getting added new elements unintended
        #http://abyz.me.uk/rpi/pigpio/python.html#callback
        self.cb.cancel()
        time.sleep(1)

        self.list_duty_cycles = sorted(self.list_duty_cycles)

        #some analyzis of the dc values
        sorted_set = list(sorted(set(self.list_duty_cycles)))
        print('{} {}'.format('Ascending sorted distinct duty cycle values:',
                             sorted_set))
        print('----------------------------------------------------------')
        differences_list = [
            sorted_set[i + 1] - sorted_set[i]
            for i in range(len(sorted_set) - 1)
        ]
        rounded_differences_list = [
            round(differences_list[i], 2)
            for i in range(len(differences_list) - 1)
        ]
        counted_sorted_list = collections.Counter(rounded_differences_list)
        print('{} {}'.format(
            'Ascending counted, sorted and rounded distinct differences between duty cycle values:',
            counted_sorted_list))
        print('----------------------------------------------------------')

        #Median and median_high/median_low are chosen, because the biggest
        #and smallest values are needed, and not an avarage of the smallest
        #and biggest values of the selection.
        #https://docs.python.org/3/library/statistics.html#statistics.median
        print('{} {}'.format('Smallest 250 values:',
                             self.list_duty_cycles[:250]))
        self.duty_cycle_min = statistics.median_high(
            self.list_duty_cycles[:20])
        print('----------------------------------------------------------')
        print('{} {}'.format('Biggest 250 values:',
                             self.list_duty_cycles[-250:]))
        self.duty_cycle_max = statistics.median_low(
            self.list_duty_cycles[-20:])
        print('----------------------------------------------------------')

        print('duty_cycle_min:', round(self.duty_cycle_min, 2))

        print('duty_cycle_max:', round(self.duty_cycle_max, 2))
コード例 #22
0
ファイル: D.py プロジェクト: shuyzn/AtCoder_Omeikan6F
import statistics

N, K = list(map(int, input().split()))

park = []
for i in range(N):
    park_row = list(map(int, input().split()))
    park.append(park_row)

ans = 10 ** 9

if N - K == 0:
    ans_list = []
    for i in park:
        for j in i:
            ans_list.append(j)
    ans = statistics.median_low(ans_list)

else:
    for i in range(0, N-K + 1):
        for j in range(0, N-K+1):
            area = [l[i:K+i] for l in park[j:K+j]]
            flat_area = sum(area, [])
            temp = statistics.median_low(flat_area)
            if ans > temp:
                ans = temp


print(int(ans))
コード例 #23
0
import matplotlib.pyplot as plt

dane = np.loadtxt("MDR_RR_TB_burden_estimates_2020-10-29.csv", delimiter=',', skiprows=1, usecols=12)
# print(dane)

print(np.max(dane))
print(dane.max())
print(np.median(dane))

dane1 = np.loadtxt("Wzrost.csv", delimiter=',', skiprows=0)
# print(dane1)

print("odchylenie standardowe:", st.stdev(dane1))
print("odchylenie standardowe 2", st.pstdev(dane1))
print("median high:", st.median_high(dane1))
print("median low:", st.median_low(dane1))
print("pstdev:", st.pstdev(dane1))
print("stdev:", st.stdev(dane1))
print("pvariance:", st.pvariance(dane1))
print("variance:", st.variance(dane1))
print("mode:", st.mode(dane1))

print(sc.ttest_1samp(dane, 0))

df = pd.read_csv("brain_size.csv", delimiter=";", skiprows=0)
# print(df)

print(np.mean(df['VIQ']))
print(df['Gender'].value_counts())

X = np.arange(0, len(df.index))
コード例 #24
0
'''
import math
from itertools import groupby
import statistics
from collections import OrderedDict
with open("google.in", "r") as infile, open("solution.txt", "w") as outfile:
    cases = int(infile.readline())
    for case in range(0, cases):
        counter = 0
        stringamount = int(infile.readline())
        string = []
        for s in range(0, stringamount):
            s = infile.readline().strip()
            string.append(["".join(grp) for num, grp in groupby(s)])
        for numb in range(0, len(string[0])):
            temp = []
            if len(string[0]) != len(string[1]):
                counter = "Fegla Won"
                break
            for num in range(0, stringamount):
                temp.append(string[num][numb])
            if temp[0][0] != temp[1][0]:
                counter = "Fegla Won"
                break
            temp2 = []
            for thing in temp:
                temp2.append(len(thing))
            median = statistics.median_low(temp2)
            for number in temp2:
                counter += abs(number - median)
        outfile.write("Case #" + str(case + 1) + ": " + str(counter) + "\n")
コード例 #25
0
ファイル: settings.py プロジェクト: benwhalley/signalbox
try:
    from statistics import median_low, mean, stdev
except ImportError:
    from simplestats import median_low, mean, stdev


SCORESHEET_FUNCTION_NAMES = "sum mean min max stdev median".split(" ")

SCORESHEET_FUNCTION_LOOKUP = {
    'min': lambda x: round(min(x), 0),
    'max': lambda x: round(max(x), 0),
    'sum': sum,
    'mean': mean,
    'stdev': stdev,
    'median': lambda x: round(median_low(x), 0),
}

DATE_INPUT_FORMATS = ('%d/%m/%Y', )

DATETIME_INPUT_FORMATS = ('%d/%m/%Y %H:%M', )


# SIGNALBOX
USER_PROFILE_FIELDS = [
    # this is the list of possible fields in the user profile
    # list in the order in which they should appear in the form
    # note, you can't simply add fields here - they must also be
    # defined on the UserProfile model.
    'landline',
    'mobile',
    'site',
コード例 #26
0
ファイル: Intro.py プロジェクト: aaa121/Python22
seh=3j+3+2.3j
print(seh)
print(type(seh))

my_name='Akinwsnde Atanda'
print(my_name)

saw=user.append(3)
print(saw)

def Take_home(hours, tax):
    monthly_pay=4*hours*15.10
    tax_deduct=monthly_pay*(tax/100)
    Net_pay=monthly_pay-tax_deduct
    return Net_pay
Wande=Take_home(20,16.5)
print('$',Wande)

import statistics
statistics.mean(user)

statistics.median(user)

statistics.median_low(user)

statistics.pstdev(user)



コード例 #27
0
ファイル: leader.py プロジェクト: adsharma/raft
    async def on_response_received(
        self,
        message: ResponseMessage,
        test_original_message: Optional[AppendEntriesMessage] = None,
    ):
        original_message = None
        if self._server._outstanding_index is not None:
            if message.id in self._server._outstanding_index:
                original_message = self._server._outstanding_index[message.id]
                num_entries = len(original_message.entries)
            else:
                logger.warn(f"Can't find message id: {message.id}")
                return self, None
        else:
            num_entries = 0
            if test_original_message:
                original_message = test_original_message
                num_entries = len(test_original_message.entries)

        # Was the last AppendEntries good?
        if not message.response:
            # No, so lets back up the log for this node
            if num_entries == 0:
                # Need to backup by at least 1
                num_entries = 1
                # Get the next log entry to send to the client.
                previousIndex = max(0, self._nextIndex[message.sender] - 1)
            else:
                # Get the next log entry to send to the client.
                previousIndex = original_message.prev_log_index

            logger.debug(
                f"Backing up {message.sender} by {num_entries} to {previousIndex}"
            )
            self._nextIndex[message.sender] = previousIndex
            return self, None
        else:
            if num_entries > 0 and original_message:
                if message.role == ResponseMessage.Role.FOLLOWER:
                    # The last append was good so increase their index.
                    self._matchIndex[message.sender] = (
                        original_message.prev_log_index + num_entries)
                    self._nextIndex[message.sender] = max(
                        self._nextIndex[message.sender],
                        self._matchIndex[message.sender] + 1,
                    )
                    logger.debug(f"Advanced {message.sender} by {num_entries}")
                elif message.role == ResponseMessage.Role.LEARNER:
                    self._learnerIndex[message.sender] = (
                        original_message.prev_log_index + num_entries)
                    self._nextIndex[message.sender] = max(
                        self._nextIndex[message.sender],
                        self._learnerIndex[message.sender] + 1,
                    )
                    logger.debug(
                        f"Learner: Advanced {message.sender} by {num_entries}")
                new_commit_index = statistics.median_low(
                    self._matchIndex.values())
                if (self._server._log[new_commit_index].term
                        == self._server._currentTerm
                        and new_commit_index > self._server._commitIndex):
                    self._server._commitIndex = new_commit_index
                    async with self._server._condition:
                        self._server._condition.notify_all()

        return self, None
コード例 #28
0
def createTedMetaFile(path, metaDataLocation, laughDataLocation):

    #the file that stores the laugh meta data
    lf = open(laughDataLocation, "w")
    #the file that stores all the meta data for the files
    wf = open(metaDataLocation, "w")
    wf.writelines("Number of files: " + str(len(os.listdir(path))) +
                  " minus broken files\n\n")

    numFiles = 0

    firstLaughs = []
    firstLaughsPercent = []
    laughCounters = [0]
    avgLaughCount = 0
    highestLaughs = 0
    linesToWrite = []
    numWithoutLaughs = 0
    metadataCollection = []

    posLengths = []
    negLengths = []

    for filename in os.listdir(path):
        fileToCheck = path + filename
        md = getMetaDataForFile(fileToCheck)
        metadataCollection.append(md)

        if md.name != "":
            numFiles += 1
            linesToWrite.append(md.toString() + "\n")

            if md.firstLaughAt != -1:
                firstLaughs.append(md.firstLaughAt)
                firstLaughsPercent.append(md.firstLaughAt / md.wordCount)
                posLengths.append(md.wordCount)
            else:
                numWithoutLaughs += 1
                negLengths.append(md.wordCount)

            if md.numLaughs > highestLaughs:
                for i in range(md.numLaughs - highestLaughs):
                    laughCounters.append(0)

                highestLaughs = md.numLaughs

            laughCounters[md.numLaughs] += 1
            avgLaughCount += md.numLaughs

    linesToWrite.sort()
    firstLaughs.sort()
    firstLaughsPercent.sort()
    negLengths.sort()
    posLengths.sort()

    lf.writelines("Number with laughs: " + str(numFiles - numWithoutLaughs) +
                  "\n")
    lf.writelines("Number without Laughs: " + str(numWithoutLaughs) + "\n\n")

    lf.writelines("Average Laugh Count: " +
                  str(avgLaughCount / (numFiles - numWithoutLaughs)) + "\n")
    lf.writelines("Average Laugh Location: " +
                  str(statistics.mean(firstLaughs)) + "\n")
    lf.writelines("Average Laugh Location by Percent: " +
                  str(statistics.mean(firstLaughsPercent)) + "\n\n")

    lf.writelines("standard deviation of first laugh location: " +
                  str(statistics.stdev(firstLaughs)) + "\n")
    lf.writelines("standard deviation of first laugh percent : " +
                  str(statistics.stdev(firstLaughsPercent)) + "\n\n")

    lf.writelines("Median Low Laugh Location: " +
                  str(statistics.median_low(firstLaughs)) + "\n")
    lf.writelines("Median Low Laugh Location by Percent: " +
                  str(statistics.median_low(firstLaughsPercent)) + "\n\n")

    lf.writelines("Laugh Counts: \n")
    for i in range(len(laughCounters)):
        lf.writelines("    " + str(i) + ": " + str(laughCounters[i]) + "\n")

    lf.writelines("\nLaugh Locations: \n")
    for fl in firstLaughs:
        lf.writelines("    " + str(fl))

    lf.writelines("\nLaugh Locations by percent down: \n")
    for fl in firstLaughsPercent:
        lf.writelines("    " + str(fl))

    lf.writelines("\nWord count for non laugh scripts\n")
    for line in negLengths:
        lf.writelines("    " + str(line))

    for line in linesToWrite:
        wf.writelines(line)
    wf.close
    lf.close

    lengths = posLengths + negLengths
    lengths.sort()

    return (metadataCollection, lengths)
コード例 #29
0
 def update_event(self, inp=-1):
     self.set_output_val(0, statistics.median_low(self.input(0)))
コード例 #30
0
import statistics
lista1 = [3,5,3,12,13,3,7,8]
lista2 = [3,3,3,5,7,8,12,13]
print(statistics.median(lista1))
print(lista1)
print(statistics.median_low(lista1))
print(statistics.median(lista2))
print(lista2)
print(statistics.median_low(lista2))
print(statistics.median_high(lista1))
print(lista2[(statistics.median_high(lista2))])

l2 = ["bla", "aga"]
l2[0] = 0
print(l2)
コード例 #31
0
# using Python statistics functions
import statistics
import csv
import array

# simple statistics operations
sample_data1 = [1, 3, 5, 7]
sample_data2 = [2, 3, 5, 4, 3, 5, 3, 2, 5, 6, 4, 3]

# TODO: Use the mean function - calculates an average value
print(statistics.mean(sample_data1))

# TODO: Use the different median functions
print(statistics.median(sample_data1))
print(statistics.median_high(sample_data1))
print(statistics.median_low(sample_data1))
print(statistics.median_grouped(sample_data1))

# TODO: The mode function indicates which data item occurs
# most frequently
print(statistics.mode(sample_data2))


# Read data from a CSV file and calculate statistics
def readData():
    with open("StockQuotes.csv") as dataFile:
        data = array.array('f', [])

        reader = csv.reader(dataFile)
        curLine = 0
        for row in reader:
コード例 #32
0
scipy.stats.gmean(y)
print(
    f'Вычисление геометрического среднего с помощью scipy.stats.gmean(): {scipy.stats.gmean(y)}'
)

#Медиана
n = len(x)
if n % 2:
    median_ = sorted(x)[round(0.5 * (n - 1))]
else:
    x_ord, index = sorted(x), round(0.5 * n)
    median_ = 0.5 * (x_ord[index - 1] + x_ord[index])
print(f'Расчет медианы: {median_}')
median_2 = statistics.median(x)
print(f'Расчет медианы с помощью statistics.median(): {median_2}')
statistics.median_low(x[:-1])
print(
    f'Расчет медианы с помощью statistics.median_low: {statistics.median_low(x[:-1])}'
)
statistics.median_high(x[:-1])
print(
    f'Расчет медианы с помощью statistics.median_high {statistics.median_high(x[:-1])}'
)
median_2 = np.median(y)
print(f'Расчет медианы с помощью np.median: {median_2}')

#Мода
u = [2, 3, 2, 8, 12]
mode_ = max((u.count(item), item) for item in set(u))[1]
print(f'Вычисление моды: {mode_}')
mode_2 = statistics.mode(u)
コード例 #33
0
'''
Created on 2016. 12. 8.

@author: lsh
'''

import statistics as sta

data = [1, 2, 3, 4, 5, 6]

print(sta.mean(data))
print(sta.median(data))
print(sta.median_low(data))
print(sta.median_high(data))

print(sta.pvariance(data))
print(sta.pstdev(data))

print(sta.variance(data))
print(sta.stdev(data))
コード例 #34
0
import statistics as st 
x = [3,1.5,4.5,6.75,2.25,5.75,2.25]

print("_________Question1__________")

print(st.mean(x))
print(st.harmonic_mean(x))
print(st.median(x))
print(st.median_low(x))
print(st.median_high(x))
print(st.median_grouped(x))
print(st.mode(x))
print(st.pstdev(x))
print(st.pvariance(x))
print(st.stdev(x))
print(st.variance(x))


print("_________Question2__________")
import random

print(random.random())
print(random.randrange(10))
print(random.choice(["Ali","Khalid","Hussam"]))
print(random.sample(range(1000),10))
print(random.choice("Orange Academy"))

y = [1,5,8,9,2,4]
random.shuffle(y)
print(y)
コード例 #35
0
import statistics

#Data
data = ([5,10,15,20,25,30,35])
print("data =",data)

#AVERAGES AND MEASURES OF CENTRAL LOCATION:
#Arithmetic mean of data:
print("data mean =", statistics.mean(data))

#Middle value (median) of data:
print("data median =",statistics.median(data))

#Low median: is always a member of the data set. When the number of data points is odd the middle value is returned. When it is even,
#the smaller of the two data points is returned:
print("data low median =",statistics.median_low(data))
#Example with a dataset with even number of data points:
print("even data set = ", (1,3,5,7))
print("even data set low median =",statistics.median_low([1,3,5,7]))

#Median high: when the number of points in the data set is odd, this function will return the middle value. When the number of data points
#is even, it will return the highest of the two middle points.
print("data high median =",statistics.median_high(data))
#Example with a dataset with even number of data points:
print("even data set high median =",statistics.median_high([1,3,5,7]))

#MEASURES OF SPREAD:
#Sample standard deviation
print("data standard deviation =", statistics.stdev(data))

#Variance
コード例 #36
0
ファイル: datalib_xtract.py プロジェクト: zackw/tbbscraper
 def median_low(self):
         return stats.median_low(self.values())
コード例 #37
0
import statistics
from sys import stdin
message = stdin.readline().strip()
N = len(message)
rc = []

for i in range(1, N + 1):
    if N % i == 0:
        rc.append(i)

N_rc = len(rc)

if N_rc % 2 == 1:
    r = statistics.median(rc)
    c = statistics.median(rc)
else:
    r = statistics.median_low(rc)
    c = statistics.median_high(rc)

matrix = [[0 for _ in range(r)] for _ in range(c)]
for i in range(c):
    for j in range(r):
        matrix[i][j] = message[(i * r) + j]

for i in range(r):
    for j in range(c):
        print(matrix[j][i], end="")
コード例 #38
0
m2 = {}
l1 = []
l2 = []
while n <= end:
    c += 1

    h = intHash32(n)
    #Extract left most 12 bits
    x1 = (h >> 20) & 0xfff
    m1[x1] = 1
    z1 = m - len(m1)
    #Linear counting formula
    u1 = int(m * math.log(float(m) / float(z1)))
    e1 = abs(100*float(u1 - c)/float(c))
    l1.append(e1)
    print("%d %d %d %f" % (n, c, u1, e1))

    #Extract right most 12 bits
    x2 = h & 0xfff
    m2[x2] = 1
    z2 = m - len(m2)
    u2 = int(m * math.log(float(m) / float(z2)))
    e2 = abs(100*float(u2 - c)/float(c))
    l2.append(e2)
    print("%d %d %d %f" % (n, c, u2, e2))

    n += 1

print("Left 12 bits error: min=%f max=%f avg=%f median=%f median_low=%f median_high=%f" % (min(l1), max(l1), stat.mean(l1), stat.median(l1), stat.median_low(l1), stat.median_high(l1)))
print("Right 12 bits error: min=%f max=%f avg=%f median=%f median_low=%f median_high=%f" % (min(l2), max(l2), stat.mean(l2), stat.median(l2), stat.median_low(l2), stat.median_high(l2)))
コード例 #39
0
import statistics

data = [14, 3, 11, 133, 4]

result = statistics.median_low(data)

print(result)
コード例 #40
0
print("Arithmetischer Mittelwert: ", statistics.mean(probe1))
# Mitterlwert = mean = Summe der Werte geteilt durch ihre Anzahl
print("Harmonischer Mittelwert: ", statistics.harmonic_mean(probe1))
# harmonischer Mitterlwert = mean = Summe der Werte geteilt durch ihre Kehrwerte
print()

# Median
# Mitte der Zahlenmenge
print("Median: ", statistics.median(probe1))
probe2 = [5, 2, 4, 17, 3]
print("Median: ", statistics.median(probe2))
print()

# Unterer Median
# Mitte der Zahlenmenge, low wertet nach unteren Wert ab
print("Unterer Median: ", statistics.median_low(probe1))
print("Unterer Median: ", statistics.median_low(probe2))
print()

# Oberer Median
# Mitte der Zahlenmenge, high wertet nach oberen Wert auf
print("Oberer Median: ", statistics.median_high(probe1))
print("Oberer Median: ", statistics.median_high(probe2))
print()

# Tupel, Werte eines Dictionary
probe3 = (5, 2, 4, 17)
print("aus Tupel: ", statistics.mean(probe3))
probe4 = {'D': 5, 'NL': 2, 'CH': 4, 'F': 17}
print("aus Dictionary: ", statistics.mean(probe4.values()))
コード例 #41
0
ファイル: ptable.py プロジェクト: ajprax/ptable
def ptable(headers,
           *rows,
           max_width=200,
           str=str,
           str_by_type={},
           justification=()):
    """
    Make an easily readable table.

    :param headers: iterable of header values
    :param rows: iterables of column contents (must all be the same length as headers)
    :param max_width: maximum number of columns for the table (including margins and separators)
    :param str: function to convert items to strings
    :param str_by_type: dictionary mapping types to custom str functions to be used for those types
                        used in preference over default str except for headers which always use default str
    :return: a string containing the table
    """
    headers = [str(h).split("\n") for h in headers]
    rows = [[(str_by_type.get(type(c)) or str)(c).split("\n") for c in row]
            for row in rows]
    assert len(set(len(row) for row in rows) | {len(headers)}
               ) == 1, "headers and rows must have same number of columns"
    # 2 chars padding on the left, 3 between each column, 2 on the right
    available_width = max_width - 2 - (len(headers) - 1) * 3 - 2
    assert available_width >= len(
        headers
    ), "must provide enough width for at least one character per column"
    # use the max and median widths of each column to see if any need to be squeezed to fit the overall max_width
    col_width_maxes = []
    col_width_medians = []
    # zip(*rows) transposes to columns
    # zip_longest in case there are no rows
    for header, col in zip_longest(headers, zip(*rows), fillvalue=()):
        widths = [max(len(line) for line in r) for r in col]
        widths.append(max(len(line) for line in header))
        col_width_maxes.append(max(widths))
        col_width_medians.append(median_low(widths))
    col_widths = col_width_maxes

    if sum(col_width_maxes) > available_width:
        # reduce the column with the greatest difference between max and median width by one repeatedly until it fits
        diffs = {
            i: d
            for i, d in enumerate(
                mx - md for mx, md in zip(col_width_maxes, col_width_medians))
        }
        to_chop = defaultdict(int)
        while sum(col_width_maxes) - sum(to_chop.values()) > available_width:
            i, _ = max(diffs.items(), key=itemgetter(1))
            diffs[i] -= 1
            to_chop[i] += 1
        for i, tc in to_chop.items():
            col_widths[i] -= tc
        headers = [_squeeze(h, col_widths[i]) for i, h in enumerate(headers)]
        rows = [[_squeeze(c, col_widths[i]) for i, c in enumerate(row)]
                for row in rows]
        # recalculate the max width after the squeeze and use the lesser of that and the current width to avoid
        # whitespace at the end of wrapped lines
        for i, (header, col) in enumerate(
                zip_longest(headers, zip(*rows), fillvalue=())):
            widths = [max(len(line) for line in r) for r in col]
            widths.append(max(len(line) for line in header))
            col_widths[i] = min(col_widths[i], max(widths))

    out = ""
    header_height = max(len(h) for h in headers)
    for header in headers:
        header.extend([""] * (header_height - len(header)))
    for line in zip(*headers):
        out += "| {} |\n".format(" | ".join(
            just(col_line, col_widths[i]) for i, (just, col_line) in enumerate(
                zip_longest(justification, line, fillvalue=ljust))))
    out += "|"
    for just, cw in zip_longest(justification, col_widths, fillvalue=ljust):
        # markdown justification markers
        l = ":" if just in (ljust, cjust) else " "
        r = ":" if just in (cjust, rjust) else " "
        out += "{}{}{}|".format(l, "-" * cw, r)
    out += "\n"
    for row in rows:
        row_height = max(len(c) for c in row)
        for col in row:
            col.extend([""] * (row_height - len(col)))
        for line in zip(*row):
            out += "| {} |\n".format(" | ".join(
                just(col_line, col_widths[i])
                for i, (just, col_line) in enumerate(
                    zip_longest(justification, line, fillvalue=ljust))))
    return out
コード例 #42
0
ファイル: statisticsDemo.py プロジェクト: vvotman/pythonstudy
__author__ = 'luowen'

""" the statisic demo """

import statistics

list1 = [1, 2, 3, 4, 5, 6]

midNum = statistics.mean(list1)
print(midNum)  # get the average data of list1

medianNum = statistics.median(list1)
print(medianNum)  # get median data of list1

medianNumLow = statistics.median_low(list1)
print(medianNumLow)  # get median lower data of list1

medianNumHigh = statistics.median_high(list1)
print(medianNumHigh)  # get median hight data of list1

medianNumGroup = statistics.median_grouped(list1, 10)
print(medianNumGroup)  # get detail information from https://docs.python.org/3/library/statistics.html

コード例 #43
0
def solve(nums):
    x = st.median_low(nums)
    return sum(abs(x - n) for n in nums)
コード例 #44
0
ファイル: experiments.py プロジェクト: abrosen/thesis
def runTrials(
    strategy,
    homogeneity,
    workMeasurement,
    networkSize,
    jobSize,
    churn,
    adaptationRate,
    maxSybil,
    sybilThreshold,
    numSuccessors,
):
    global seed
    """
    if seed < 13545:
        seed += variables.trials
        return
    """
    times = []
    idealTimes = []
    medianLoads = []
    stdDevs = []
    workPerTickList = []
    hardestWorkers = []

    inputs = "{:<15} {:<15} {:<15} {:5d} {:8d} {:8.6f} {: 4d} {: 4d} {: 4.2f} {: 4d} {:6d}".format(
        strategy,
        homogeneity,
        workMeasurement,
        networkSize,
        jobSize,
        churn,
        adaptationRate,
        maxSybil,
        sybilThreshold,
        numSuccessors,
        seed,
    )

    with open("data/working/results" + start + ".txt", "a") as f:
        f.write(inputs)
        f.write("\n")
    print(inputs)

    for _ in range(variables.trials):

        random.seed(seed)

        s.setupSimulation(
            strategy=strategy,
            homogeneity=homogeneity,
            workMeasurement=workMeasurement,
            numNodes=networkSize,
            numTasks=jobSize,
            churnRate=churn,
            adaptationRate=adaptationRate,
            maxSybil=maxSybil,
            sybilThreshold=sybilThreshold,
            numSuccessors=numSuccessors,
        )

        loads = [len(x.tasks) for x in s.nodes.values()]  # this won't work once the network starts growing
        # print(sorted(loads))
        medianNumStartingTasks = statistics.median_low(loads)
        medianLoads.append(medianNumStartingTasks)
        stdDevOfLoad = statistics.pstdev(loads)
        stdDevs.append(stdDevOfLoad)
        # variance
        # variance over time
        idealTime = s.perfectTime
        idealTimes.append(idealTime)

        """
        x = s.nodeIDs
        y = [len(s.nodes[q].tasks) for q in s.nodeIDs]
        plt.plot(x,y, 'ro')
        plt.show()
        """
        numTicks, hardestWorker = s.simulate()
        times.append(numTicks)
        hardestWorkers.append(hardestWorker)

        slownessFactor = numTicks / idealTime
        averageWorkPerTick = jobSize / numTicks
        workPerTickList.append(averageWorkPerTick)

        results = "{:8d} {:8.3f} {:7.3f} {:6d} {:10.3f} {:10.3f} {:8d}".format(
            numTicks, idealTime, slownessFactor, medianNumStartingTasks, stdDevOfLoad, averageWorkPerTick, hardestWorker
        )

        with open("data/working/results" + start + ".txt", "a") as f:
            f.write(results)
            f.write("\n")
        print(results)

        seed += 1
    avgTicks = sum(times) / len(times)
    avgIdealTime = sum(idealTimes) / len(idealTimes)
    avgSlowness = avgTicks / avgIdealTime

    avgMedianLoad = sum(medianLoads) / len(medianLoads)
    # stdOfMedians = statistics.pstdev(medianLoads)
    avgStdDev = sum(stdDevs) / len(stdDevs)

    avgAvgWorkPerTick = sum(workPerTickList) / len(workPerTickList)
    avgHardestWork = sum(hardestWorkers) / len(hardestWorkers)

    outputs = "{:10.2f} {:8.3f} {:7.3f} {:10.3f} {:10.3f} {:10.2f} {:8.2f}".format(
        avgTicks, avgIdealTime, avgSlowness, avgMedianLoad, avgStdDev, avgAvgWorkPerTick, avgHardestWork
    )

    with open("data/working/averages" + start + ".txt", "a") as averages:
        averages.write(inputs + " " + outputs + "\n")
コード例 #45
0
                                       category,
                                       evaluation=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=1,
                                          num_workers=0,
                                          shuffle=False,
                                          drop_last=False,
                                          pin_memory=True)

pointsList_train = []
pointsList_test = []
for ind, data in enumerate(train_loader):
    conditioned_input = data['points_tgt']
    print("object id:", ind + 1, "sample points:", conditioned_input.shape[1])
    pointsList_train.append(conditioned_input.shape[1])
print(stat.median_low(pointsList_train))

num_bins = 50

fig, ax = plt.subplots()

for ind, data in enumerate(test_loader):
    conditioned_input = data['points_tgt']
    print("object id:", ind + 1, "sample points:", conditioned_input.shape[1])
    pointsList_test.append(conditioned_input.shape[1])

# the histogram of the data
n, bins, patches = ax.hist([pointsList_train, pointsList_test],
                           num_bins,
                           histtype='bar',
                           label=['train', 'test'],
コード例 #46
0
print(len(x),len(y))

peak = 49.9378768165 + 1399.2004138
#print(statistics.mean(y))
#print(statistics.stdev(y))
for val in y:
	if val >peak:
		z.append(val)

print(len(z))

import matplotlib.pyplot as plt
import numpy as np
spread = z
flier_high = [statistics.median_high(z)]
flier_low = [statistics.median_low(z)]

print(flier_high, flier_low)

data = np.concatenate((spread, flier_high, flier_low), 0)

plt.boxplot(data,1)
plt.figure()
plt.show()
'''

for i in range(1,len(x)):
	x[i-1] = float(x[i])-float(x[i-1])

ll = []
mm = []
コード例 #47
0
ファイル: ch8-1.py プロジェクト: nobu5563/hangman
import statistics

data = [14, 3, 11, 133, 4]
print(statistics.median_low(data))
コード例 #48
0
# question1
import statistics
import cubed

list = [1, 22, 79, 12, 6, 40, 208]

print(statistics.median_low(list))

#question2

result = cubed.func(input("数字を入力:"))

print(result)
コード例 #49
0
# Lets see about Statistics module in python

# statistics module provides the functions to mathematical statistics of numeric data.

# statistics.median_low( ) = Used to return a low middle value from the list.

# NOTE: In case there are two mean then it is used to print the low median value.

# Here is the example program for statistics.median_low( )

import statistics

List = [1, 3, 6, 5, 10, 15, 20, 30, 25, 32]

LowMedian = statistics.median_low(List)

print('\nList value is :\n', List)

print('\nLow Median value of the List is :', LowMedian)
コード例 #50
0
def createTedMetaFile(path, laughDataLocation):

    #the file that stores the laugh meta data
    lf = open(laughDataLocation, "w")

    numFiles = 0

    firstLaughs = []
    firstLaughsPercent = []
    laughCounters = [0]
    avgLaughCount = 0
    highestLaughs = 0
    linesToWrite = []
    numWithoutLaughs = 0
    metadataCollection = []
    totalParas = 0
    totalWords = 0

    posLengths = []
    negLengths = []

    for filename in os.listdir(path):
        fileToCheck = path+filename
        md = getMetaDataForFile(fileToCheck)
        metadataCollection.append(md)

        if md.name != "":
            numFiles += 1
            linesToWrite.append(md.toString() + "\n")

            if md.firstLaughAt != -1:
                firstLaughs.append(md.firstLaughAt)
                firstLaughsPercent.append(md.firstLaughAt/md.wordCount)
                posLengths.append(md.wordCount)
            else:
                numWithoutLaughs += 1
                negLengths.append(md.wordCount)

            if md.numLaughs > highestLaughs:
                for i in range(md.numLaughs - highestLaughs):
                    laughCounters.append(0)

                highestLaughs = md.numLaughs

            laughCounters[md.numLaughs] += 1
            avgLaughCount += md.numLaughs
            totalParas += md.paraCount
            totalWords += md.wordCount

    linesToWrite.sort()
    firstLaughs.sort()
    firstLaughsPercent.sort()
    negLengths.sort()
    posLengths.sort()

    lf.writelines("Number of files with laughs: " + str(numFiles - numWithoutLaughs) + "\n")
    lf.writelines("Number of files without Laughs: " + str(numWithoutLaughs) + "\n\n")

    lf.writelines("Average Paragraphs: " + str(totalParas/numFiles) + "\n")
    lf.writelines("Average Words: " + str(totalWords/numFiles) + "\n\n")

    lf.writelines("Total Paragraphs: " + str(totalParas) + "\n")
    lf.writelines("Total Words: " + str(totalWords) + "\n\n")

    lf.writelines("Total Laugh Count: " + str(avgLaughCount) + "\n")
    lf.writelines("Average Laugh Count (only for files with laughter): " + str(avgLaughCount/(numFiles - numWithoutLaughs)) + "\n")
    lf.writelines("Average Laugh Location: " + str(statistics.mean(firstLaughs)) + "\n")
    lf.writelines("Average Laugh Location by Percent: " + str(statistics.mean(firstLaughsPercent)) + "\n\n")

    lf.writelines("standard deviation of first laugh location: " + str(statistics.stdev(firstLaughs)) + "\n")
    lf.writelines("standard deviation of first laugh percent : " + str(statistics.stdev(firstLaughsPercent)) + "\n\n")

    lf.writelines("Median Low Laugh Location: " + str(statistics.median_low(firstLaughs)) + "\n")
    lf.writelines("Median Low Laugh Location by Percent: " + str(statistics.median_low(firstLaughsPercent)) + "\n\n")

    lf.writelines("Laugh Counts: \n")
    for i in range(len(laughCounters)):
        lf.writelines("    " + str(i) + ": " + str(laughCounters[i]) + "\n")

    lf.writelines("\nLaugh Locations: \n")
    for fl in firstLaughs:
        lf.writelines("    " + str(fl))

    lf.writelines("\nLaugh Locations by percent down: \n")
    for fl in firstLaughsPercent:
        lf.writelines("    " + str(fl))

    lf.writelines("\nWord count for non laugh scripts\n")
    for line in negLengths:
        lf.writelines("    " + str(line))

    lf.close

    lengths = posLengths + negLengths
    lengths.sort()

    return (metadataCollection, lengths)
コード例 #51
0
# Input : arr[] = {13.5, 14.5, 14.8, 15.2, 16.1}
# Output : 14.7707
# =============================================================================

#harmonic mean
print("Harmonic Mean")
print(statistics.harmonic_mean([2.5, 3, 10]))
print("Median")
#median
print(statistics.median([1, 3, 4, 5]))

#median high
print(statistics.median_high([1, 3, 5, 7]))

#median high
print(statistics.median_low([1, 3, 5, 7]))

print("Statistics Grouped Median...")
print(statistics.median_grouped([52, 52, 53, 54]))

#mode
print(statistics.mode(["red", "blue", "blue", "red", "green", "red", "red"]))
print(statistics.mode([1, 1, 2, 2, 2]))

data = [4, 5, 7, 1, 3, 6, 7]
print(statistics.stdev(data))
print(statistics.variance(data))

import numpy as np
#std
a = np.array([[0.10, 0.14, 0.18], [0.01, 0.112, 0.018]])