Exemplo n.º 1
0
def query_data(data):
    data.sort(key=lambda p: p.price) #why not just use the attribute directly

    high_purchase = data[-1]
    print('the highest price is ${:,}'.format(int(high_purchase.price)))

    low_purchase = data[0]
    print('the lowest price is ${:,}'.format(int(low_purchase.price)))

    # average price of house?
    prices = [
        p.price
        for p in data
    ]
    avg_price = statistics.mean(prices)
    print('The average home price is ${:,}'.format(int(avg_price)))


    two_bed_homes = (
        p
        for p in data
        if announce(p, '2-bedrooms, found {}'. format(p.beds)) and p.beds == 2
    )

    homes = []
    for h in two_bed_homes:
        if len(homes) > 5:
            break
        homes.append(h)

    avg_price = statistics.mean((announce(p.price, 'price') for p in homes))
    avg_baths = statistics.mean((p.baths for p in homes))
    avg_sqft = statistics.mean((p.sq__ft for p in homes))
    print('Average 2-bedroom home is ${:,}, baths={}, sq ft={:,}'
          .format(int(avg_price), round(avg_baths,1), round(avg_sqft, 1)))
Exemplo n.º 2
0
def average_distribution(distributions):
    return TerrainDistribution(
        mean([x.normalized_weights()[0] for x in distributions]),
        mean([x.normalized_weights()[1] for x in distributions]),
        mean([x.normalized_weights()[2] for x in distributions]),
        mean([x.normalized_weights()[3] for x in distributions])
    )
Exemplo n.º 3
0
def main():
    dbcon = {}
    conf = ConfigParser.ConfigParser()
    conf.read('/etc/eservices/iostat_hdd.cfg')


    dbcon.update({'dbhost': conf.get("mysql", "host")})
    dbcon.update({'dbuser': conf.get("mysql", "user")})
    dbcon.update({'dbpasswd': conf.get("mysql", "passwd")})
    dbcon.update({'dbbase': conf.get("mysql", "base")})
    dbcon.update({'hdd': conf.get("hdd", "hdd")})
    dbcon.update({'service_id': conf.get("main", "service_id")})

    io = []
    for i in dbcon['hdd'].split(','):
        print "Check disk:", i
        io.append(IoWait(i))
        print "in main", io
    print "io", st.mean(map(float, io))
    print "Int", int(st.mean(map(float, io)))
    try:
        print "in update"
        con = mdb.connect(dbcon['dbhost'], dbcon['dbuser'], dbcon['dbpasswd'], dbcon['dbbase'], charset='utf8')
        cur = con.cursor()
        cur.execute("UPDATE ma_internet_v2.nginx_cur_load SET io_load = %s WHERE external_service_id = %s",
                   (int(st.mean(map(float, io))), dbcon['service_id'],))
        con.commit()

    except con.Error, e:
                logging.error("Error %d: %s", e.args[0], e.args[1])
                print "Error %d: %s" % (e.args[0], e.args[1])
Exemplo n.º 4
0
def show_result2():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    GPDALC = []
    GPWALC = []
    MSWALC = []
    MSDALC = []
    for el in fd_list:
        if(el.SCHOOL=='GP'):
            GPWALC.append(el.WALC)
            GPDALC.append(el.DALC)
        elif(el.SCHOOL=='MS'):
            MSWALC.append(el.WALC)
            MSDALC.append(el.DALC)
        else:
            print("School error")

    mean_GPWALC = statistics.mean(GPWALC)
    mean_GPDALC = statistics.mean(GPDALC)

    mean_MSWALC = statistics.mean(MSWALC)
    mean_MSDALC = statistics.mean(MSDALC)

    # Prepare data for google charts
    data = [['GP School Workday Alcohol Consumption', mean_GPDALC], ['GP School Weekend  Alcohol Consumption', mean_GPWALC],
            ['MS School Workday Alcohol Consumption', mean_MSDALC], ['MS School Weekend Alcohol Consumption', mean_MSWALC]]
    return render_template('result2.html', data=data)
Exemplo n.º 5
0
	def process_result(self, t_frame, r_frame):
		print(t_frame, r_frame) 
		
		try:
			stat_const = float(2.776)

			res2 = [] # frame transmission 
			res3 = [] # throughput 
			for i in range(int(self.T[0])):
				# frame transmission
				res2.append(t_frame[i]/r_frame[i])
				res3.append(self.F * r_frame[i] / self.R)

			# print(res2, res3)

			avg_res2 = statistics.mean(res2)
			sd2 = statistics.stdev(res2)
			dif2 = sd2/math.sqrt(int(self.T[0]))*stat_const
			upper_bound2 = avg_res2 + dif2 
			lower_bound2 = avg_res2 - dif2 

			avg_res3 = statistics.mean(res3)
			sd3 = statistics.stdev(res3)
			dif3 = sd3/math.sqrt(int(self.T[0]))*stat_const
			upper_bound3 = avg_res3 + dif3
			lower_bound3 = avg_res3 - dif3 

		except ZeroDivisionError: 
			return float("inf"), float("inf"), float("inf"), 0, 0, 0

		return avg_res2, lower_bound2, upper_bound2, avg_res3, lower_bound3, upper_bound3 
Exemplo n.º 6
0
    def bootstrap_test(self, nsamples=100, noise=0.2):
        """Returns mean and std. dev. of successful recognitions."""
        boot = {}
        for vec, pat in zip(self.pattern_vectors, self.patterns):
            boot[pat] = {"closest": [], "iterations": [], "full_matches": [], "accuracy": []}
            for sample in range(nsamples):
                recalled, noisy, iterations = self.recall_noisy(vec, noise=noise)
                self.show_pattern(noisy, "{}_{}_noisy_{}".format(
                    noise, pat, sample))
                self.show_pattern(recalled,      "{}_{}_recalled_{}".format(
                    noise, pat, sample))

                # equal to any patterns?
                matches = {}
                full_match = None
                for vec2, pat2 in zip(self.pattern_vectors, self.patterns):
                    matches[pat2] = list( \
                            vec2[0] == recalled[0]).count(True)
                    if matches[pat2] == vec2.size:
                        full_match = pat2

                boot[pat]["iterations"].append(iterations)
                boot[pat]["full_matches"].append(full_match)
                boot[pat]["closest"].append(pat == max(matches, key=matches.get))
                boot[pat]["accuracy"].append(matches[pat] / vec.size)
            boot[pat]["iterations"] = (mean(boot[pat]["iterations"]), stdev(boot[pat]["iterations"]))
            boot[pat]["accuracy"] = (mean(boot[pat]["accuracy"]), stdev(boot[pat]["accuracy"]))

            count_matches = lambda l: len(list(filter(lambda f: not f is None, l)))

            boot[pat]["full_matches"] = count_matches(boot[pat]["full_matches"])
            boot[pat]["closest"] = count_matches(boot[pat]["closest"])
        return boot
Exemplo n.º 7
0
    def sample(self, borrowers, threshold, n_iterations=1000, eps=0.0001, target=None):
        """

        :param borrowers: list of borrower (and information about them)
        :type borrowers: list[Borrower]

        :param threshold: big losses threshold
        :type threshold: float

        :param n_iterations: number of simulations
        :type n_iterations: int

        :return:
        """
        weights_matrix, independent_weight, losses, vitality = self.get_parameters(borrowers)
        res = []
        iteration = 0
        for iteration in range(n_iterations):
            res.append(self.one_loss(weights_matrix, independent_weight, losses, vitality, threshold))
            if iteration > 100 and target is not None and abs(target - mean(res)) < eps:
                break
            elif iteration > 100 and (max(res) - min(res)) / (iteration ** 0.5) < eps:
                break
        print("TwoStepSampler break after {} iterations".format(iteration))

        return mean(res)
Exemplo n.º 8
0
    def good_stdev(self, current_offer):
        if self.counter < 5:
            return False

        # array of utilities the opponent would get for their offer
        past_utils = [self.utility_for(x) for x in self.opponent_offers]
        old_stdev = statistics.stdev(past_utils)
        old_mean = statistics.mean(past_utils)

        if past_utils[-1] < self.penalty:
            return False

        new_utils = []
        # filter outliers (2 standard deviations above or below)
        for u in past_utils:
            if old_mean - 2*old_stdev < u < old_mean + 2*old_stdev:
               new_utils.append(u)

        if len(new_utils) < 2:
            return False

        # if the utility we get for the offer is greater than the mean + 1 std dev, then return True
        offer_utility = self.utility_for(current_offer)
        new_stdev = statistics.stdev(new_utils)
        new_mean = statistics.mean(new_utils)
        return offer_utility > new_mean + new_stdev
Exemplo n.º 9
0
def get_parts_closeness(part1, part2) -> float:
    part1_distances = part1.distances
    part2_distances = part2.distances
    mean1 = statistics.mean(part1_distances)
    mean2 = statistics.mean(part2_distances)
    difference = abs(mean1 - mean2)
    return difference
Exemplo n.º 10
0
def reducer(key, list_of_values):

    acc = []
    gyro = []
    motypes = []

    for el in list_of_values :
        magn = 0
        try :
            magn = magnitude(el[5], el[6], el[7])
        except:
            pass
        try:
            motypes.append(el[2])
        except:
            pass

        if el[1] == "Acceleration":
            acc.append(magn)
        if el[1] == "Gyroscope" or el[1] == "Gyro":
            gyro.append(magn)

    words_to_count = (word for word in motypes)
    c = Counter(words_to_count)
    motype = c.most_common()[0][0]

    if len(acc) > 1 and len(gyro) > 1:
        mr.emit({
            'date':key,
            'activity':motype,
            'acc':mean(acc),
            'gyro':mean(gyro)}
        )
Exemplo n.º 11
0
def mean_dev(training_set):
    '''
    Calculates and returns the mean and standard deviation to the classes yes and no of a given training set
    '''
    class_yes = []
    class_no = []
    mean_yes = {}
    mean_no = {}
    dev_yes = {}
    dev_no = {}
    for key in training_set[0]:
        for i in range(len(training_set)):
            if training_set[i]['DiabetesClass'] == 'yes':
                class_yes.append(training_set[i][key])
            else:
                class_no.append(training_set[i][key])
        if not key == 'DiabetesClass':
            mean_yes[key] = statistics.mean(class_yes)
            mean_no[key] = statistics.mean(class_no)
            dev_yes[key] = statistics.stdev(class_yes)
            dev_no[key] = statistics.stdev(class_no)
        else:
            prob_yes = float(len(class_yes) / len(training_set))
            prob_no = float(len(class_no) / len(training_set))
        class_yes = []
        class_no = []
    return mean_yes, mean_no, dev_yes, dev_no, prob_yes, prob_no
def main(total_rolls=20000):
    rolls_list = rolls(total_rolls, 1, 6)
    sliced_sum20 = sliced_sums(20, rolls_list)
    sums20 = sums(sliced_sum20, -20)
    roll_count20 = lens(sliced_sum20)
    sliced_sum10k = sliced_sums(10000, rolls_list)
    sums10k = sums(sliced_sum10k, -10000)
    roll_count10k = lens(sliced_sum10k)
    paired_sums = [(20, sums20), (10000, sums10k)]
    paired_rolls = [(20, roll_count20), (10000, roll_count10k)]

    answers("Mean of the sum - {0} when M is {0}:",
            paired_sums, lambda s: statistics.mean(s))
    answers("Mean of the number of rolls when M is {0}:",
            paired_rolls, lambda s: statistics.mean(s))
    answers("Standard deviation of the sum - {0} when M is {0}:",
            paired_sums, lambda s: statistics.stdev(s))
    answers("Standard deviation of the number of rolls when M is {0}:",
            paired_rolls, lambda s: statistics.stdev(s))
    answers("\nView of the rolls summing to {0}\n" +
            format("Count", ">7") + " " + format("Sum", ">7") + " Rolls\n",
            [(20, sliced_sum20), (10000, sliced_sum10k)],
            lambda ss: ''.join(
                format(len(s[1]), ">7") + " " + format(s[0], ">7") + " " +
                format(s[1]) + "\n" for s in ss)
            , sep=''
            )
def calculate_latencies(version_dates):
    linux_latencies = latency(version_dates['linux'], OrderedDict(avo.os_to_kernel))
    set_latex_value('linuxMeanUpdateLatency', ufloat(statistics.mean(linux_latencies.values()),statistics.stdev(linux_latencies.values())))
    openssl_latencies = latency(version_dates['openssl'], OrderedDict(avo.os_to_project['openssl']))
    set_latex_value('opensslMeanUpdateLatency', ufloat(statistics.mean(openssl_latencies.values()),statistics.stdev(openssl_latencies.values())))
    bouncycastle_latencies = latency(version_dates['bouncycastle'], OrderedDict(avo.os_to_project['bouncycastle']))
    set_latex_value('bouncycastleMeanUpdateLatency',ufloat(statistics.mean(bouncycastle_latencies.values()),statistics.stdev(bouncycastle_latencies.values())))
Exemplo n.º 14
0
    def survey(filtered=NO_FILTER):
        from statistics import mean

        # for lex_user in Lexicon.lex_users:
        #     for lex in Lexicon.lex[lex_user]:
        #         if lex.headword in Lexicon.common_vocabulary:
        #             lex["stats"][lex.headword].update(lex.timestamp, lex.start_time)
        stats_by_lex = [
            list(zip(*[(logic_entry["stats"][headword].count, logic_entry["stats"][headword].latency,
                        mean(logic_entry["stats"][headword].interval) if logic_entry["stats"][headword].interval else 0,
                        mean(logic_entry["stats"][headword].permanence) if logic_entry["stats"][
                            headword].permanence else 0)
                       for logic_entry in Logicon.logic.values() if filtered.filtered(logic_entry["logic"][0])]))
            # for idiom_entry in Idiomaton.idiom.values() if idiom_entry["logic"][0].clazz == "E"]))
            for headword in Logicon.common_logics]
        print("Logicon.logic.", [(l, Logicon.logic[l]) for l in Logicon.logic])
        print("stats_by_lex", stats_by_lex)
        stat_props = [
            (count, latency, interval,
             permanence)
            for count, latency, interval, permanence in stats_by_lex] if any(stats_by_lex) else \
            [(0, 2.5e3, 0, 0), (1, 1.5e3, 1, 1)]
        ticks = [Logicon.logicon[burst]["sequence"] for burst in Logicon.common_logics]
        return list(zip(*stat_props)), \
            ["%s da Lógica" % stat for stat in "Contagem Latência Intervalo Permanência".split()], \
            ticks, filtered.claz, "Índices das lógicas EICA"
def main(graph, nbk, delta_max, mu, max_eval, iter, move_operator, tabuSize, logsPath):
    logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    fh = logging.FileHandler(logsPath + "/tabusearch.log")
    fh.setLevel(logging.INFO)
    frmt = logging.Formatter('%(message)s')
    fh.setFormatter(frmt)
    log.addHandler(fh)

    all_num_evaluations = []
    all_best_score = []
    all_time = []
    log.info("-------RUNNING TABU SEARCH-------")
    for i in range(iter):
        start = timeit.default_timer()
        num_evaluations, best_score, best = test_file_tabusearch(graph, nbk, delta_max, mu, max_eval, move_operator, tabuSize)
        stop = timeit.default_timer()
        log.debug('time : %f' % (stop - start))
        all_num_evaluations.append(num_evaluations)
        all_best_score.append(best_score)
        all_time.append(stop - start)
    log.info("nbS = %d; nbK = %d; delta_max = %d; mu = %r; move_operator= %s; tabu_maxsize = %d" % (graph.get_nbVertices(), nbk, delta_max, mu, move_operator.__name__, tabuSize))
    log.info("for %d iteration with %d max_evaluations each, "
             "\n best score found is %d,"
             "\n total time in sec : %r"
             "\n mean time in sec : %r,"
             "\n mean best_score : %r, EcT : %r"
             "\n mean num_eval : %r"
             % (iter,
                max_eval,
                min(score for score in all_best_score),
                sum(all_time),
                statistics.mean(all_time),
                statistics.mean(all_best_score), statistics.stdev(all_best_score),
                statistics.mean(all_num_evaluations)))
Exemplo n.º 16
0
    def test_alignment_display_speeds(self):
        """
        Tests alignment display page
        """
        # User visits home page, submits a protein alignment and renders it
        files = [
            'spa_protein_alignment.fasta',
            'spa1_protein_alignment.fasta',
            'ser_thr_kinase_family.fasta'
        ]
        for f in files:
            q_display = []
            a_display = []
            for i in range(3):
                self.client = requests.Session()
                self.client.get(self.live_server_url)
                csrftoken = self.client.cookies['csrftoken']
                alignment_string = file_to_string(f)
                start = time.time()
                r = self.client.post(self.live_server_url,
                                     data={'csrfmiddlewaretoken': csrftoken, 'seq_type': 'Protein',
                                           'align_input': alignment_string})
                roundtrip1 = time.time() - start
                q_display.append(roundtrip1)

                render_form = html.parse(StringIO(r.text)).getroot().cssselect('form[id="render"]')
                start = time.time()
                r = self.client.get(self.live_server_url + render_form[0].attrib.get('action'))
                roundtrip2 = time.time() - start
                a_display.append(roundtrip2)

                self.client.close()
            self.assertTrue(mean(q_display) <= 2, 'query sequences display of %s took: %s' % (f, mean(q_display)))
            self.assertTrue(mean(a_display) <= 2, 'alignment display of %s took: %s' % (f, mean(a_display)))
Exemplo n.º 17
0
def threshold(imageArray):
    balanceAr=[]
    newAr = imageArray
    
    #averages each pixle's RGB values
    for evryRow in imageArray:
        for evryPix in evryRow:
            avgNum = mean(evryPix[:3])
            balanceAr.append(avgNum)
        
    #averages all pixle averages
    balance = mean(balanceAr)
    for evryRow in newAr:
        for evryPix in evryRow:
            #brighter pixles are made white
            if mean(evryPix[:3]) > balance:
                evryPix[0] = 255
                evryPix[1] = 255
                evryPix[2] = 255
            #darker pixles made black
            else:
                evryPix[0] = 0
                evryPix[1] = 0
                evryPix[2] = 0
    return newAr
Exemplo n.º 18
0
def uniform_input():
    global current_delays, previous_delays
    mu1 = statistics.mean(previous_delays)
    mu2 = statistics.mean(current_delays)
    two_sigma1 = 2*statistics.stdev(previous_delays)
    print ("unif check: " + str(mu1-two_sigma1) + " <= " + str(mu2) + " <= " + str(mu1+two_sigma1) + " answer is: " + str(mu1-two_sigma1 <= mu2 <= mu1+two_sigma1))
    return mu1-two_sigma1 <= mu2 <= mu1+two_sigma1
Exemplo n.º 19
0
def printLEMranks(results_file,LEM_file,fname="LEMranks.txt",use_pldLap=False,plotresults=False,title="11D malaria 40 hr 90 TF, scaling factor 0.05"):
    # use_pldLap = False or 0 means use sqrt loss / root
    if use_pldLap:
        source,target,type_reg,lem_score = fileparsers.parseLEMfile(-1,LEM_file)
    else:
        source,target,type_reg,sqrtlossdroot_score = fileparsers.parseLEMfile_sqrtlossdroot(2,LEM_file)
    totaledges = float(len(source))

    results = json.load(open(results_file,'r'))

    try:
        network_spec_str = results["Network"]
        LEMranks = getLEMranks(network_spec_str,totaledges,source,target,type_reg)
        print "Mean: {}".format(statistics.mean(LEMranks))
        print "Median: {}".format(statistics.median(LEMranks))
        print "% stable FCs: {}".format(float(results["StableFCParameterCount"])/float(results["ParameterCount"]))
        print "% pattern matches: {}".format(float(results["StableFCMatchesParameterCount"])/float(results["ParameterCount"]))
    except:
        stats=[]
        for R in results:
            network_spec_str = R["Network"]
            LEMranks = getLEMranks(network_spec_str,totaledges,source,target,type_reg)
            stats.append( (statistics.mean(LEMranks),statistics.median(LEMranks),float(R["StableFCMatchesParameterCount"])/float(R["ParameterCount"])) )
        with open(fname,'w') as sf:
            for s in stats:
                sf.write('/'.join([str(t) for t in s])+"\n")
        if plotresults:
            plotLEMranks(stats,title,use_pldLap)
Exemplo n.º 20
0
def run_simulation(init_duration, init_stake, samples, player):
    """ Run simulation, print the result to stdout

    """
    wheel = create_wheel()
    table = Table(wheel)
    game = RouletteGame(wheel, table)
    simulator = Simulator(game, player,
                          init_duration=init_duration, samples=samples,
                          init_stake=init_stake)
    simulator.gather()
    durations = simulator.durations
    maxima = simulator.maxima
    print(player)
    print()
    print("Durations")
    print("  min :", min(durations))
    print("  max :", max(durations))
    print("  mean: %.2f" % statistics.mean(durations))
    print("  dev : %.2f" % statistics.stdev(durations))
    print("Maxima")
    print("  min :", min(maxima))
    print("  max :", max(maxima))
    print("  mean: %.2f" % statistics.mean(maxima))
    print("  dev : %.2f" % statistics.stdev(maxima))
Exemplo n.º 21
0
def monitor(q, lamda):  
    
    print("\n--------------------------------")
    print("    Monitoring Current Queue      ")
    print("--------------------------------\n")
    
    serv_times = [event.serv_time for event in list(q)]
    mean = stats.mean(serv_times)
    ts.append(mean)
    print("mean Ts = ", mean)
    
    if (len(serv_times) > 1):
        std_dev = stats.stdev(serv_times)
        print("std dev = ", std_dev) 
    
    wait_times = [event.wait_time for event in list(q)]
    avg_wait_time = stats.mean(wait_times)
    tws.append(avg_wait_time)
    w = lamda * avg_wait_time
    avg_ws.append(w)
    print("w = ", w)    
    
    total_times = [event.wait_time + event.serv_time for event in list(q)]
    avg_time_in_system = stats.mean(total_times)
    q = lamda * avg_time_in_system
    avg_qs.append(q)
    print("q = ", q)
    
    print("--------------------------------\n")
Exemplo n.º 22
0
def show_result():
    fd_list = db.session.query(Formdata).all()

    # Some simple statistics for sample questions
    satisfaction = []
    q1 = []
    q2 = []
    for el in fd_list:
        satisfaction.append(int(el.satisfaction))
        q1.append(int(el.q1))
        q2.append(int(el.q2))

    if len(satisfaction) > 0:
        mean_satisfaction = statistics.mean(satisfaction)
    else:
        mean_satisfaction = 0

    if len(q1) > 0:
        mean_q1 = statistics.mean(q1)
    else:
        mean_q1 = 0

    if len(q2) > 0:
        mean_q2 = statistics.mean(q2)
    else:
        mean_q2 = 0

    # Prepare data for google charts
    data = [['Satisfaction', mean_satisfaction], ['Python skill', mean_q1], ['Flask skill', mean_q2]]

    return render_template('result.html', data=data)
Exemplo n.º 23
0
    def nutritionfacts(self):

        # print keys
        svgdata = ""
        frame_x = self.width * self.bins + 100 - 90
        frame_y = (self.graphheight + 700) // 2 + 25 - self.graphheight
        for i, s in enumerate([l for l in self.points if l[2]]):
            mu = "μ = —"
            sigma = "σ = —"
            if len(s[0]) != 0:
                xmean = stat.mean([t[0] for t in s[0]])
                xsigma = stat.pstdev([t[0] for t in s[0]], xmean)

                ymean = stat.mean([t[1] for t in s[0]])
                ysigma = stat.pstdev([t[1] for t in s[0]], ymean)

                mu = "μ = (" + str(round(xmean, 4)) + ", " + str(round(ymean, 4)) + ")"
                sigma = "σ = (" + str(round(xsigma, 4)) + ", " + str(round(ysigma, 4)) + ")"

            line_y = frame_y + i * 65
            svgdata += circle(frame_x - 4, line_y + 3, 2, s[1])
            svgdata += circle(frame_x + 4, line_y + 4, 2, s[1])
            svgdata += circle(frame_x - 1, line_y + 10, 2, s[1])

            svgdata += text(frame_x + 20, line_y + 10, s[2], align=-1, color=s[1], font="Neue Frutiger 65")
            svgdata += text(frame_x + 28, line_y + 25, "n = " + str(len(s[0])), align=-1, color=s[1])
            svgdata += text(frame_x + 28, line_y + 40, mu, align=-1, color=s[1])

            svgdata += text(frame_x + 28, line_y + 55, sigma, align=-1, color=s[1])
        self._frostbyte(svgdata)
Exemplo n.º 24
0
 def features_present1(self, othertmpft):
     a=FeatureFinder()
     a.train(othertmpft)
     j=a.scan_data(othertmpft)
     features=list()
     dre=self.dict_process(othertmpft)
     sendback=list()
     final_list=list()
     del j[0]
     del j[len(j)-1]
     for i in j:
         #print(i.location)
         if i.location<2:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-1:i.location+3])))
         else:
             final_list.append(Feature(i.location, statistics.mean(dre[i.location-2:i.location+2])))
     for i in self.outline:
        if type(i)==Feature:features.append(i)
     for i in features:
         if len(final_list)>0:l=min(final_list, key=lambda x: abs(i.loc-x.loc))
         else:return [0]*len(self.outline)
         dis=len(othertmpft)-abs(i.loc-l.loc)
         penalize_by=dis/len(othertmpft)
         #print(penalize_by)
         sendback.append(statistics.mean([penalize_by, self.t(abs(i.lo-l.lo))]))
   #  print(sendback)
     #print("I am features1")
     return self.find_outliar(sendback)            
Exemplo n.º 25
0
 def scan(self):
     dre=list()
     final=list()
     dre=self.dict_process(self.data)
     pol=[]
     oo=list()
     for d in self.listy:
         r=self.__process(d)
         if len(r[1])<2 and not len(r[1])==0:pol.append(statistics.mean(r[1]))
         elif len(r[1])==0:pass
         else:pol.append(statistics.mean(r[1]))
       #  print(pol)
     for i in range(len(pol)):
         final.append(Slope(self.download[i].location, pol[i]))
        ## print(final)
     del self.download[0]
     del self.download[-1]
     last=1
     for i in range(len(self.download)):
         try:
             final.insert(i+last, Feature(self.download[i].location, statistics.mean(dre[self.download[i].location-2:self.download[i].location+2])))
         except statistics.StatisticsError:
             #del  final[i-1]
             pass
         last+=1
       #  print(final)
     self.outline=final
def csv_dict_reader(file_obj):
    """
    Read a CSV file using csv.DictReader
    """
    reader = csv.DictReader(file_obj, delimiter=',')
    num_likes = []
    num_comments = []
    num_shares = []
    for line in reader:
        p = int(line["num_likes"])
        q = int(line["first_page_comment"])
        r = int(line["comments_beyond_pageone"])
        num_likes.append(p)
        num_comments.append(q)
        num_shares.append(r)
    mean_num_likes = statistics.mean(num_likes)
    stdev_num_likes = statistics.stdev(num_likes)
    mean_num_comments = statistics.mean(num_comments)
    stdev_num_comments = statistics.stdev(num_comments)
    mean_num_shares = statistics.mean(num_shares)
    stdev_num_shares = statistics.stdev(num_shares)
    covariance_likes = stdev_num_likes / mean_num_likes
    covariance_comments = stdev_num_comments / mean_num_comments
    covariance_shares = stdev_num_shares / mean_num_shares
    w = csv.writer(open("svm_dataset.csv","a"),delimiter=',',quoting=csv.QUOTE_ALL)
    
    w.writerow([mean_num_likes,stdev_num_likes,covariance_likes,mean_num_comments,stdev_num_comments,covariance_comments,mean_num_shares,stdev_num_shares,covariance_shares])
Exemplo n.º 27
0
def insertNormalizedModelInDB(idUser, idString, keystroke, isTest = False):
	insertNormalizedRecord = replaceIfIsTest("INSERT INTO `mdl_user#isTest_keystroke_normalized`(`id_user`, `id_string`) VALUES (%s, %s)", isTest);
	updateNormalizedRecord = replaceIfIsTest("UPDATE `mdl_user#isTest_keystroke_normalized` ", isTest);
	
		
	executeSqlInDB(insertNormalizedRecord, (idUser, idString));
	
	keyDimensionsExtractor = KeystrokeDimensionsExtractor(keystroke);
	
	#extracting dimensions
	timePressed = keyDimensionsExtractor.getTimePressed();
	#geting avarage and standardDeviation
	timePressedAverage = statistics.mean(timePressed);
	timePressedstandardDeviation = statistics.pstdev(timePressed);
	
	latencies = keyDimensionsExtractor.getLatencies();
	latenciesAverage = statistics.mean(latencies);
	latenciesStandardDeviation = statistics.pstdev(latencies);
	
	dbModel = {
		'id_user': idUser,
		'id_string': idString,
		'press_average': timePressedAverage,
		'latency_avarage': latenciesAverage,
		'press_standard_deviation': timePressedstandardDeviation,
		'latency_standard_deviation': latenciesStandardDeviation,
	}
	
	#update in table created before
	updateNormalizedRecord = updateNormalizedRecord + (" SET `press_average`= %(press_average)s,`latency_avarage`= %(latency_avarage)s, `press_standard_deviation`= %(press_standard_deviation)s,`latency_standard_deviation`= %(latency_standard_deviation)s " 
		" WHERE `id_user`= %(id_user)s AND `id_string`= %(id_string)s");
	executeSqlInDB(updateNormalizedRecord, dbModel);
Exemplo n.º 28
0
def get_stats_window(depth_iterator, length, window_size):
    """Calculate min/max/mean and min/max windowed mean.

    Assumes the depth_iterator will fill in all the implicit zero
    entries which ``samtools depth`` may omit!

    Assumes window_size < number of values in iterator!
    """
    window = deque()
    total_cov = 0
    min_cov = None
    max_cov = 0.0

    assert 1 <= window_size <= length

    prev_pos = 0
    while len(window) < window_size:
        try:
            ref, pos, depth = next(depth_iterator)
        except NoCoverage:
            return 0, 0, 0.0, 0.0, 0.0
        except StopIteration:
            outstr = "Not enough depth values to fill %i window" % window_size
            logger.info(outstr)
            raise ValueError("%s" % outstr)
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov vals for %s position %i" % (ref,
                                                                                  pos)
        total_cov += depth
        if min_cov is None:
            min_cov = depth
        else:
            min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.append(depth)

    assert len(window) == window_size
    min_win = max_win = mean(window)
    for ref, pos, depth in depth_iterator:
        prev_pos += 1
        assert pos == prev_pos, "Discontinuity in cov val for %s position %i" % (ref,
                                                                                 pos)
        total_cov += depth
        min_cov = min(min_cov, depth)
        max_cov = max(max_cov, depth)
        window.popleft()
        window.append(depth)
        assert len(window) == window_size
        win_depth = mean(window)
        min_win = min(min_win, win_depth)
        max_win = max(max_win, win_depth)

    mean_cov = total_cov / float(length)

    assert prev_pos == length, "Missing final coverage?"
    assert len(window) == window_size
    assert min_cov <= mean_cov <= max_cov
    assert min_cov <= min_win <= max_win <= max_cov

    return min_cov, max_cov, mean_cov, min_win, max_win
Exemplo n.º 29
0
def show_result3():
    fd_list = db.session.query(Price_History).all()

    # Some simple statistics for sample questions
    MDALC = []
    MWALC = []
    FWALC = []
    FDALC = []
    for el in fd_list:
        if(el.SEX=='M'):
            MWALC.append(el.WALC)
            MDALC.append(el.DALC)
        elif(el.SEX=='F'):
            FWALC.append(el.WALC)
            FDALC.append(el.DALC)
        else:
            print("Sex error")

    mean_MWALC = statistics.mean(MWALC)
    mean_MDALC = statistics.mean(MDALC)

    mean_FWALC = statistics.mean(FWALC)
    mean_FDALC = statistics.mean(FDALC)

    # Prepare data for google charts
    data = [['Female Workday Alcohol Consumption', mean_FDALC], ['Female Weekend  Alcohol Consumption', mean_FWALC],
            ['Male Workday Alcohol Consumption', mean_MDALC], ['Male Weekend Alcohol Consumption', mean_MWALC]]
    return render_template('result3.html', data=data)
def analyzeData(purchases, times):
    std = statistics.stdev(purchases, statistics.mean(purchases))
    data = []

    for i in range(len(purchases)):
        if purchases[i] > (statistics.mean(purchases) + std * 1.5):
            outliers.append(purchases[i])
        else:
            trimmedData.append(purchases[i])
            trimmedTimes.append(times[i])

    for i in range(max(trimmedTimes)):
        data.append(0)

    flag = 0

    for w in trimmedTimes:
        data[w - 1] += trimmedData[flag]
        flag = flag + 1

    for w in range(max(trimmedTimes) - 1):

        data[w + 1] += data[w]

    for i in outliers:
        for w in range(len(data)):
            data[w] += i

    return data
Exemplo n.º 31
0
def get_mean_stdev(array):
    return statistics.mean(array), statistics.stdev(array)
Exemplo n.º 32
0
            time += world.waiting_times[num_of_transitions]
            num_of_transitions += 1
            #print(time)

        total_population = sum([group.size for group in world.groups])
        total_cooperators = sum([group.num_of_coops for group in world.groups])
        run_series.append(total_cooperators / total_population)
    matrix_of_runs.append(run_series)
    print(run)
#pdb.set_trace()
avg_of_runs = []
for i in range(GRANULARITY):
    across_runs = []
    for run in range(RUNS):
        across_runs.append(matrix_of_runs[run][i])
    avg_of_runs.append(mean(across_runs))

avg_of_runs_array = np.array(avg_of_runs)

ax.plot(x, avg_of_runs_array, label='eta = 0')

matrix_of_runs = []
for run in range(RUNS):
    run_series = []
    for i in range(GRANULARITY):
        migration = 0.05 * i / 50
        world = World(initial_groups, B=10, eta=0.006, mu=migration)

        time = 0
        num_of_transitions = 0
        while time < TIME:
Exemplo n.º 33
0
def operation_data(uuid, operation):
    list_operations = ['max', 'min', 'mean']
    if operation not in list_operations:
        return jsonify({"status": "failed", "message": "invalid operation", "data": []})

    logger.info(f"Retrieving data associated with UUID '{uuid}'")

    try:
        data = data_store.get(uuid)

    except KeyError:
        logger.warning(f"cannot retrieve data associated with UUID '{uuid}'")
        return jsonify({"status": "failed", "message": "data cannot be retrieved", "data": []})

    if operation == 'max':
        return jsonify({"status": "success", "message": "operation successfuly", "max": max(data)})
    elif operation == 'min':
        return jsonify({"status": "success", "message": "operation successfuly", "max": min(data)})
    elif operation == 'mean':
        return jsonify({"status": "success", "message": "operation successfuly", "mean": mean(data)})
Exemplo n.º 34
0
 def moyenne(self):
     print(f"{self.prenom} a eu les notes suivantes : {self.notes}")
     self.moyenneuser=mean(self.notes)
     print(f"{self.prenom} a une moyenne de {self.moyenneuser}")
    for line in f:
        values = [float(el) for el in line.split("]")[0].strip("[").split(",")]
        n = len(values)
        instance = line.split(",")[n].split(".")[0].split("/")[-1]
        controller = line.split(",")[n+1].split("/")[-1].split("_gen_")[0]
        scores.append([values, instance, controller])



inst_contr_dict = dict()
test_instances = []
for el in scores:
    if el[1] not in test_instances:
        test_instances.append(el[1])
    #inst_contr_dict[(el[1],el[2])] = (mean(el[0]) - RS_res) / (BK - RS_res)
    inst_contr_dict[(el[1],el[2])] = mean(el[0])




def xor(a, b):
    return (a and not b) or (not a and b)



def order(x):
    if "0a" in x:
        return x.split("_")[0] + "_A_" + x.split("_")[1]
    elif "sko" in x:
        return x.split("_")[0] + "_SKO_" + x.split("_")[1]
    elif "0b" in x:
Exemplo n.º 36
0
import statistics as stat
em1 = {'name': 'John', 'salary': 60000}
em2 = {'name': 'Mark', 'salary': 70000}
em3 = {'name': 'Dan', 'salary': 50000}
emps = [em1, em2, em3]
print(emps)

#薪資總和?
salary = []  #建立數組
for s in emps:  #放內容到數組
    salary.append(s['salary'])
print(sum(salary))
print(max(salary))
print(min(salary))
print(stat.mean(salary))
Exemplo n.º 37
0
from subprocess import Popen, PIPE
import sys
import statistics


def get_wall_time(prog, t, n):
    p = Popen(["/usr/bin/time", "-p", prog,
               str(t), str(n)],
              stdout=PIPE,
              stderr=PIPE)
    o, e = p.communicate()
    line = str(e).split('\\n')[0]
    val = line.split(' ')[-1]
    return float(val)


if __name__ == "__main__":
    prog = sys.argv[2]
    runs = int(sys.argv[1])
    times = []
    threads = int(sys.argv[3])
    n = int(sys.argv[4])
    for i in range(0, runs):
        time = get_wall_time(prog, threads, n)
        times.append(time)
    for i in range(0, runs - 1):
        print(times[i], end=",")
    print(times[runs - 1])
    print("{:.2f},{:.2f}".format(statistics.mean(times),
                                 statistics.stdev(times)))
Exemplo n.º 38
0
Arquivo: p5.py Projeto: chin/FINAL
                deck = deck[1:]
    cards_rem.extend(hand)
    return len(hand)
    # n +=1


rem = []
while n < N:
    x = sim()
    rem.append(x)
    #print(x)
    n += 1

rem.sort()

print( "Sample mean of remaining cards: ", mean(rem) )
print( "Sample standard deviation of remaining cards: ", stdev(rem) )
    
#(mu, sigma) = norm.fit(rem)

n, bins, patches = plt.hist(rem, bins='auto', normed=True)
#y = mlab.normpdf(bins, mean(rem), stdev(rem)**2)
avg = mean(rem)
var= stdev(rem)**2
pdf_x = np.linspace(np.min(rem), np.max(rem), 100)
pdf_y = 1.0/np.sqrt(2*np.pi*var)*np.exp(-0.5*(pdf_x-avg)**2/var)

plt.plot(pdf_x, pdf_y, 'k--', linewidth=2)

plt.title('Histogram of cards remaining')
plt.xlabel('No. Cards Remaining')
Exemplo n.º 39
0
# Calculate cumulative load
Ellsworth_cumload = np.cumsum(Ellsworth_load)
DBasin_cumload = np.cumsum(DBasin_load)
Wetland_cumload = np.cumsum(Wetland_load)
Channel_cumload = np.cumsum(Channel_load)

#----------------------------------------------------------------------#

# Print final load released
print("Ellsworth:", Ellsworth_cumload[-1])
print("Doyle Basin:", DBasin_cumload[-1])
print("Wetland:", Wetland_cumload[-1])
print("Channel to Outfall:", Channel_cumload[-1])

print("Ellsworth Avg", st.mean(Ellsworth_conc))
print("Doyle Basin Avg", st.mean(DBasin_conc))
print("Wetland Avg", st.mean(Wetland_conc))
print("Channel Avg", st.mean(Channel_conc))

#----------------------------------------------------------------------#
# Plot Result
fig, ax = plt.subplots(4, 4)
ax[0,0].plot(Ellsworth_inflow_m, color='#6CC6D1', linewidth=2)
ax[0,0].set_xticks([])
ax[0,0].set_yticks([0,3,6,9])
ax[0,0].set_yticklabels(["0","3","6","9"])
ax[0,0].set_ylim(0,10)
ax[0,0].set_xlim(0,86400)
ax[0,0].set_ylabel("Inflow (m³/s)")
Exemplo n.º 40
0
# =======IF=======

from statistics import mean  # média importada
n1 = float(input('Digite sua primeira nota: '))
n2 = float(input('Digite sua segunda nota: '))
n3 = float(input('Digite sua terceira nota: '))

m = mean([n1, n2, n3])
print('Sua média é: {:.2f}' .format(m))

if m >= 7:  # se a média for maior que 7. Nao esquecer dois ponto :
    # precisa ser escrito com tab, pois esta subordinado ao IF
    print('Congrats, aprovado!')
else:  # se não for maior que 7
    # precisa ser escrito com tab, pois esta subordinado ao ELSE
    print('Burro pra caralho!')

nome = str(input('Qual o seu nome? '))
if nome == 'Paulo':  # se o nome foi igual a paulo
    print('Que nome bonito!')  # CONDIÇÃO SIMPLES não precisa do else (se não)
# condição que sempre vai ocorrer pois esta fora do if
print('Tenha um bom dia, {}.' .format(nome))

nome = str(input('Qual o seu nome? '))
if nome == 'Paulo':  # se
    print('Que nome bonito!')
else:  # se nao. Se o nome nao foi igual a paulo
    print('Seu nome é bem normal')  # ESTRUTURA CONDICIONAL COMPOSTA
# como esta fora do tab irá ocorrer de qualquer maneira
print('Tenha um bom dia, {}.' .format(nome))
Exemplo n.º 41
0
def print_stats(l):
    c = Counter(l)
    print(c)
    print('mean', mean(l))
    print('variance', pvariance(l))
    print('cycle_len', findCycle(l))
Exemplo n.º 42
0
# Stream column for top 5 songs only
top5_streams = [2993988783, 1829621841, 1460802540, 1386258295, 1311243745]
def average(values):
    total = 0.
    for s in values:
        total += s
    return total/len(values)
    
total_average = average(top5_streams)

## 2. Introduction to Modules ##

top5_streams = [2993988783, 1829621841, 1460802540, 1386258295, 1311243745]
import statistics

average = statistics.mean(top5_streams)

## 3. Loading our data using the CSV module ##

import csv
f = open("top100.csv","r")
csvreader = csv.reader(f)
music = list(csvreader)

print(music)

## 4. Understanding the namespace ##

import statistics
print(dir())
print(dir(statistics))
Exemplo n.º 43
0
# read the file
with open('turk.csv') as csvfile:
    turk = csv.reader(csvfile)
    count = 0
    for row in turk:
        if count == 0:
            count = 1
            continue
        babel_jaccard.append(float(row[3]))
        babel_edit.append(int(row[4]))
        babel_answer.append(int(row[10]))
        mrpc_answer.append(int(row[11]))

# hist/mean/median for rows 1-69 of babel_answer
print("mean of rows 1-69 of babel_answer is: ",
      str(statistics.mean(babel_answer[0:69])))
print("median of rows 1-69 of babel_answer is: ",
      str(statistics.median(babel_answer[0:69])))
hist1 = plt.hist(babel_answer[0:69], [0, 1, 2, 3, 4, 5, 6])
table1 = {}
table1['babel answer'] = [0, 1, 2, 3, 4, 5]
table1['count'] = hist1[0].tolist()
df1 = pd.DataFrame(table1, columns=['babel answer', 'count'])
df1.to_csv('/Users/ellachang/Desktop/first_babel.csv',
           index=False,
           header=True)

# hist/mean/median for rows 70-170 of babel_answer
print("mean of rows 70-170 of babel_answer is: ",
      str(statistics.mean(babel_answer[69:170])))
print("median of rows 70-170 of babel_answer is: ",
Exemplo n.º 44
0
import pandas as pd
import statistics
import csv

df = pd.read_csv("data.csv")
height_list = df["Height(Inches)"].to_list()
weight_list = df["Weight(Pounds)"].to_list()

height_mean = statistics.mean(height_list)
weight_mean = statistics.mean(weight_list)

height_median = statistics.median(height_list)
weight_median = statistics.median(weight_list)

height_mode = statistics.mode(height_list)
weight_mode = statistics.mode(weight_list)

print("Mean,Median and Mode of height is :", height_mean, height_median,
      height_mode)
print("Mean,Median and Mode of weight is :", weight_mean, weight_median,
      weight_mode)

height_std_deviation = statistics.stdev(height_list)
print("Standard deviation of this data is ", height_std_deviation)
weight_std_deviation = statistics.stdev(weight_list)
print("Standard deviation of this data is ", weight_std_deviation)

height_first_std_deviation_start, height_first_std_deviation_end = height_mean - height_std_deviation, height_mean + height_std_deviation
height_second_std_deviation_start, height_second_std_deviation_end = height_mean - (
    2 * height_std_deviation), height_mean + (2 * height_std_deviation)
height_third_std_deviation_start, height_third_std_deviation_end = height_mean - (
Exemplo n.º 45
0
    def average_reviewer_score(self):
        reviewer_ratings = review_models.ReviewerRating.objects.filter(assignment__reviewer=self)
        ratings = [reviewer_rating.rating for reviewer_rating in reviewer_ratings]

        return statistics.mean(ratings) if ratings else 0
Exemplo n.º 46
0
        dates.append(row[1])

    #loop through earnings list
    for i in range(len(earnings)):
        
        #increment total earnings
        total_earnings += int(earnings[i])
        
        #if not first record, calculate earnings delta and append to list
        if i != 0:
            earnings_delta.append(int(earnings[i]) - int(earnings[i-1]))

#calculate max, min, and avg changes
max_change = round(max(earnings_delta),0)
min_change = round(min(earnings_delta),0)
avg_change = round(mean(earnings_delta),2)

#Insert null value into earnings list to match date indexes
earnings_delta.insert(0,0)

#create summary dictionary based on earnings delta and dates
summary_dict = dict(zip(earnings_delta, dates))

#print summary
print("Financial Analysis")
print("------------------------------------------")
print("Total Months: " + str(total_months))
print("Total: " + str(round(total_earnings,0)))
print("Average Change: " + "$" + str(avg_change))
print("Greatest Increase in Profits: " + summary_dict[max_change] + " ($" + str(max_change) + ")")
print("Greatest Decrease in Profits: " + summary_dict[min_change] + " ($" + str(min_change) +")")
Exemplo n.º 47
0
import statistics

import matplotlib
import matplotlib.pyplot as plt
import requests

matplotlib.use('Qt5Agg')
matplotlib.rcParams['backend.qt5'] = 'PySide2'

# matplotlib.use('Cairo')

data = requests.get("http://127.0.0.1:5000/goc").json()[3:]

total_population = [d["Total population"] for d in data]

avg_pop = statistics.mean(total_population)

provinces = []
in_migration = []
out_migration = []
total_population = []
rate = []

for d in data:
    provinces.append(d["Province"])
    in_migration.append(d["In-migration"])
    out_migration.append(d["Out-migration"])
    total_population.append(d["Total population"])
    rate.append(d["Rate of net migration (‰)"])

figure, ax1 = plt.subplots()
Exemplo n.º 48
0
 def centroid(coordinates):
     # Returns the mean of a list of coordinates like [(lat, lon)].
     # Source: https://stackoverflow.com/a/23021198
     latitudes, longitudes = coordinates[0::2], coordinates[1::2]
     return (statistics.mean(latitudes), statistics.mean(longitudes))
Exemplo n.º 49
0
 def mean(self, col):
     """Returns mean in column"""
     data = self.__bin_read(key=self.__index_to_col(col))
     return int(statistics.mean(list(map(int, data))))
Exemplo n.º 50
0
fout_dir = sys.argv[2]

fout = open(fout_dir + finput.split('/')[-1].split('.')[0] + ".bed", 'w')

with open(finput, 'r') as fp:
    for line in fp:
        line_temp = line.strip().split('\t')

        meths = line_temp[3].split('..')
        fout.write('\t'.join(line_temp[0:3]) + '\t')
        tmp_indi_M = []
        for meth in meths:
            M = str(
                round(
                    mean([
                        float(x.split('/')[0]) / float(x.split('/')[1])
                        for x in meth.split(',') if int(x.split('/')[1]) > 0
                    ]), 3))
            indi_M = [
                str(round(float(x.split('/')[0]) / float(x.split('/')[1]), 3))
                if int(x.split('/')[1]) > 0 else M for x in meth.split(',')
            ]
            tmp_indi_M.append(','.join(indi_M))
        fout.write('..'.join(tmp_indi_M) + '\n')

fout.close()
'''
AN05483_Control_NeuN	X1525_Control_NeuN	X1527_Control_NeuN	X1531_Control_NeuN	X1536_Control_NeuN	X1538_Control_NeuN	X1539_Control_NeuN	X3590_Control_NeuN	X3602_Con
trol_NeuN	X4615_Control_NeuN	ABBY_ND	ANJA_ND	BELEKA_ND	CALLIE_ND	DUNCAN_ND	LULU_ND	LYK_ND	MELISSA_ND	OSSABAW_ND	Roger_ND	YN04-200_ND	YN08-380_ND	Y
N09-122_ND	YN09-173_ND	YN09-179_ND	YN09-72_ND	YN11-64_ND	YN11-77_ND	YN12-654_ND	YN14-248_ND	AN03398_Control_Olig2	AN15240_Control_Olig2	AN16799_Control_Olig2	X
1527_Control_Olig2	X1532_Control_Olig2	X1538_Control_Olig2	X1539_Control_Olig2	X1541_Control_Olig2	X3602_Control_Olig2	X4615_Control_Olig2	ABBY_OD	ANJA_OD	BELEKA_OD	B
JORN_OD	CALLIE_OD	DUNCAN_OD	LULU_OD	MELISSA_OD	OSSABAW_OD	Roger_OD	YN04-200_OD	YN08-380_OD	YN09-122_OD	YN09-173_OD	YN09-72_OD	YN11-300_OD	YN11-77_O
Exemplo n.º 51
0
def pullMaddenRatings(conn,
                    season,
                    url = 'https://ratings-api.ea.com/v2/entities/m22-ratings?filter=iteration:launch-ratings&sort=overall_rating:DESC,firstName:ASC&limit=1000&offset=2000',
                    timestamp = 'NULL'):
    teams = getTeamId(conn)
    players = getPlayerId(season,conn)
    sql = InsertTable("madden_ratings.playerRatings")
    
    r = requests.get(url)
    data = r.json()
    ## pull all depth charts divs
    
    for playerEntry in data['docs']:
        teamName = playerEntry['team']
        playerName = playerEntry['fullNameForSearch']
        position = playerEntry['position']
        teamId = teams.teamId(teamName,conn)

    

        playerId = players.playerId([
            remove_non_ascii(playerName),
            str(teamId),
            position,
            str(season)],
            conn
            )
        routeRunning = s.mean([
                int(playerEntry['mediumRouteRunning_rating']),
                int(playerEntry['deepRouteRunning_rating']),
                int(playerEntry['shortRouteRunning_rating'])
            ])
        throwAccuracy = s.mean([
                int(playerEntry['throwAccuracyShort_rating']),
                int(playerEntry['throwAccuracyDeep_rating']),
                int(playerEntry['throwAccuracyMid_rating'])
            ])
        print(playerId,'-',playerName)
        sql.appendRow([
            ['NULL',''],## id
            [remove_non_ascii(playerName).replace("'","\'"),'string'],##playerName
            [str(playerId),''],##playerId
            [str(season),''],##season
            [teamName,'string'],## team
            [teamId,''],##teamId,
            [position,'string'],#position
            [playerEntry['height'],''],#height
            [playerEntry['weight'],''],##weight
            [playerEntry['overall_rating'],''],##overall
            [playerEntry['speed_rating'],''],##speed
            [playerEntry['acceleration_rating'],''],##acceleration
            [playerEntry['strength_rating'],''],##strenght
            [playerEntry['agility_rating'],''],##agility
            [playerEntry['awareness_rating'],''],##awareness
            [playerEntry['throwPower_rating'],''],##throw_power
            [str(throwAccuracy),''],##throw_accuracy
            [playerEntry['kickPower_rating'],''],##kick_power
            [playerEntry['kickAccuracy_rating'],''],##kick_accuracy
            [playerEntry['passBlock_rating'],''],##pass_block
            [playerEntry['runBlock_rating'],''],##run_block
            [playerEntry['catching_rating'],''],##catch
            [playerEntry['carrying_rating'],''],##carrying
            [playerEntry['bCVision_rating'],''],##bc_vision
            [playerEntry['injury_rating'],''],##injury
            [playerEntry['toughness_rating'],''],##toughness
            [playerEntry['stamina_rating'],''],##stamina
            [str(routeRunning),''],##route_running
            [playerEntry['age'],''],##experience
            [playerEntry['yearsPro'],''],##age
            ])

    return [sql.returnStatement()]
Exemplo n.º 52
0
            yolo_target_errors_list.extend(yolo_target_errors)

    outliers_unet = findOutliers(unet_target_errors_list, 2)
    outliers_yolo = findOutliers(yolo_target_errors_list, 2)
    outliers_unet_ratio = len(outliers_unet) / len(unet_target_errors_list)
    outliers_yolo_ratio = len(outliers_yolo) / len(yolo_target_errors_list)

    print('\n======== Summary ==========')
    print(f'Using device: {device}')
    print(f'Dataset - {opt.dataset_name}: {image_folder}')
    print(f'Total samples: {len(image_files)}')
    print(f'Model Weights: ')
    print(f'\t- unet: {unet_path}')
    print(f'\t- yolo: {yolo_path}')
    print('outliers_unet:')
    print(outliers_unet)
    print('outliers_yolo:')
    print(outliers_yolo)
    print(f'Average prediction speed/time (s): ')
    print('\t- unet: {:.4f}'.format(statistics.mean(unet_infer_time_list)))
    print('\t- yolo: {:.4f}'.format(statistics.mean(yolo_infer_time_list)))
    print(f'Average centroid prediction error: ')
    print('\t- unet: {:.5f}'.format(statistics.mean(unet_target_errors_list)))
    print('\t- yolo: {:.5f}'.format(statistics.mean(yolo_target_errors_list)))
    print('Outliers ratio of centroid prediction errors: ')
    print('\t- unet: {}/{} = {:.5f}'.format(len(outliers_unet),
                                            len(unet_target_errors_list),
                                            outliers_unet_ratio))
    print('\t- yolo: {}/{} = {:.5f}'.format(len(outliers_yolo),
                                            len(yolo_target_errors_list),
                                            outliers_yolo_ratio))
            bet9, earnings9, gains9 = winlimitim(i, bet9, earnings9, gains9,
                                                 j9)
            if bet9 > betlimit:
                bet9, gains9 = 1, 0
            data9.append(earnings9)

            bet0, earnings0, gains0 = winlimitim(i, bet0, earnings0, gains0,
                                                 j0)
            if bet0 > betlimit:
                bet0, gains0 = 1, 0
            data0.append(earnings0)

            if i != 0:
                datam.append(i)

        a = 100 * mean(datam)

    ends1.append(data1[-1])
    mins1.append(min(data1))
    maxs1.append(max(data1))
    ends2.append(data2[-1])
    mins2.append(min(data2))
    maxs2.append(max(data2))
    ends3.append(data3[-1])
    mins3.append(min(data3))
    maxs3.append(max(data3))
    ends4.append(data4[-1])
    mins4.append(min(data4))
    maxs4.append(max(data4))
    ends5.append(data5[-1])
    mins5.append(min(data5))
    # Print episode number
    print('Episode ' + str(i_episode) + ' out of ' + str(num_episodes))

    # Perform a step of optimization
    optimize_model()

    # Update the target network, copying all weights and biases in DQN
    if i_episode % TARGET_UPDATE == 0:
        target_net.load_state_dict(policy_net.state_dict())

print('Complete')

# Print statistics on how the games went
wins = sum([comp_scores[i] < user_scores[i] for i in range(len(comp_scores))])
user_average = statistics.mean(user_scores)
standard_deviation = statistics.stdev(user_scores)
vert = np.array(user_scores).reshape((-1, 1))
horiz = np.array(range(len(user_scores))).reshape((-1, 1))
model = LinearRegression().fit(horiz, vert)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
print("AI won " + str(wins) + " games out of " + str(len(user_scores)))
print("AI average was " + str(user_average))
print("AI stdev was " + str(standard_deviation))

print(user_scores)

# Save Model
torch.save(policy_net.state_dict(), 'state_dict_final.pyt')
Exemplo n.º 55
0
from glob import glob
from PIL import Image
from statistics import mean
import os

dir = "./train"

folders = glob(os.path.join(dir, "*"))

height, width = [], []

for folder in folders:
    images = glob(os.path.join(folder, "*"))
    for image in images:
        img = Image.open(image)
        width.append(img.size[0])
        height.append(img.size[1])

    print(folder.split("/")[-1] + " done")

print("average height is : " + str(mean(height)))
print("average width is : " + str(mean(width)))
Exemplo n.º 56
0
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 14:31:45 2019

@author: agnib
"""

from statistics import mean

frenz = [
    "Rohit", "Rahul", "Rohan", "Rini", "Ronit", "Rakesh", "Roshan", "Rupert"
]

height = [1.56, 2.0, 1.65, 1.87, 1.43, 1.98, 1.66]

maxheight = max(height)
maxheightindex = height.index(maxheight)
name = frenz[maxheightindex]
print("Max Height : ", name, maxheight)

minheight = min(height)
minheightindex = height.index(minheight)
name2 = frenz[minheightindex]
print("Min Height : ", name2, minheight)

avg = mean(height)
print("Average Height : ", avg)
Exemplo n.º 57
0
def coefficient_of_determination(ys_orig, ys_line):
    y_mean_line = [mean(ys_orig) for y in ys_orig]
    squared_error_reg = squared_error(ys_orig,ys_line)
    squared_error_y_mean = squared_error(ys_orig,y_mean_line)
    return 1 - squared_error_reg /squared_error_y_mean
def checkCicleRot(xs,ys):
    slope = ((mean(xs) * mean(ys)) - (mean(xs * ys))) / ((mean(xs) ** 2) - mean(xs**2))

    return slope
Exemplo n.º 59
0
def best_fit_slope_and_intercept(xs,ys):
    m = ((mean(xs)*mean(ys) - (mean(xs*ys)))/((mean(xs)*mean(xs))-(mean(xs*xs))))
    b = mean(ys)- m*mean(xs)
    return m,b
Exemplo n.º 60
0
def zeroCentralisation(features):
    # meanValue = sum(features) / len(features)
    meanValue = mean(features)
    centeredFeatures = [feat - meanValue for feat in features]
    return centeredFeatures