Exemple #1
0
def grow_plants(play_area, num, plant_type, bias):
    """
    :param play_area:
    :param num:
    :param plant_type:
    :param bias:
    :return: a list of spawned plants of given type
    """
    normal_distribution = NormalDist(0.5, 0.15)

    plant_db = {}

    dist_rolls = normal_distribution.samples(num)

    if bias == 'edges':
        dist_rolls = inverse_probabilities(dist_rolls)

    for i in range(num):
        location = get_location_in_circle(dist_rolls[i], play_area)
        new_p = Decoration(location, plant_type)
        plant_db[location] = new_p

    clean_up_db = {}

    for p in plant_db.values():
        p_loc = p.rect.left, p.rect.top
        if p_loc not in clean_up_db:
            clean_up_db[p_loc] = p

    return clean_up_db
def _accrued_value_in_league(stat_values: List[float], is_reverse: bool,
                             std_dev) -> List[float]:
    """
    Given a list of accrued stat values, calculates the worth of each of those values.

    Worth is assigned as follows: best value gets n, worst value gets 1. From there, adjust
    the worth of each value based on distance to neighbors.
    :param std_dev:
    :param stat_values:
    :param is_reverse:
    :return:
    """
    result = [1] * len(stat_values)
    # pylint: disable=consider-using-enumerate
    for first in range(0, len(stat_values)):
        for second in range(first + 1, len(stat_values)):
            mean_1 = stat_values[first]
            mean_2 = stat_values[second]
            variance = 2 * pow(std_dev, 2)
            first_minus_second = NormalDist(mean_1 - mean_2, sqrt(variance))
            p_second_greater = first_minus_second.cdf(0)
            if is_reverse:
                result[second] += 1 - p_second_greater
                result[first] += p_second_greater
            else:
                result[second] += p_second_greater
                result[first] += 1 - p_second_greater
    return result
def bootStrapDrivers(bootStrapData, phi=0.5):
    """
    Determines driver-nodes through the boostrap distribution
    It consists as an iterative procedure that takes the max value in the data, then
    a normal distribution is fit an tested with overlap for the other nodes in the data at :alpha: level
    Parameters
    ----------
    bootStrapData: np.array
    2d matrix consisting of size samples x nodes
    phi : float
    Overlap between distribution.
    """
    trials, nodes = bootStrapData.shape[:2]
    # create bootstrap distribution
    driver = bootStrapData.mean(0).argmax()
    # add noise to prevent p(x) = 0
    bootStrapData += np.random.rand(*bootStrapData.shape) * 1e-16
    driverDist = NormalDist().from_samples(bootStrapData[..., driver])

    drivers = {driver: (driverDist.mean, driverDist.variance)}
    # other nodes to consider
    options = np.delete(np.arange(nodes), driver)
    for node in options:
        # fit distribution
        otherDist = NormalDist().from_samples(bootStrapData[..., node])
        # compute overlap
        overlap = driverDist.overlap(otherDist)
        if overlap > phi:
            drivers[node] = (otherDist.mean, otherDist.variance)
    return drivers
Exemple #4
0
def computeFeedbackPeriodic(resp_probe_side,change_info,response_key):

	left_probe_contingency =[[0,0],[0,0]];
	right_probe_contingency = [[0,0],[0,0]];

	for i in range(len(response_key)):
		if resp_probe_side[i]==-1: 		#probe left
			if change_info[i]==-1 or change_info[i]==2: 	#change left or both
				if response_key[i]==1: 		#response yes, left probe hit
					left_probe_contingency[0][0]=left_probe_contingency[0][0]+1;
				elif response_key[i]==0:		#response no, left probe miss
					left_probe_contingency[0][1]=left_probe_contingency[0][1]+1;
			
			if change_info[i]==1 or change_info[i]==0:		#change right or no change
				if response_key[i]==1: 		#response yes, left probe false alarm
					left_probe_contingency[1][0]=left_probe_contingency[1][0]+1;
				elif response_key[i]==0: 		#response no, left probe correct rejection
					left_probe_contingency[1][1]=left_probe_contingency[1][1]+1;

		elif resp_probe_side[i]==1: 	#probe right
			if change_info[i]==1 or change_info[i]==2: 	#change right or both
				if response_key[i]==1: 		#response yes, right probe hit
					right_probe_contingency[0][0]=right_probe_contingency[0][0]+1;
				elif response_key[i]==0: 	#response no, right probe miss
					right_probe_contingency[0][1]=right_probe_contingency[0][1]+1;
			if change_info[i]==-1 or change_info[i]==0:
				if response_key[i]==1:		#response yes, right probe false alarm
					right_probe_contingency[1][0]=right_probe_contingency[1][0]+1;
				elif response_key[i]==0: 	#response no, right probe correct rejection
					right_probe_contingency[1][1]=right_probe_contingency[1][1]+1;
	
	to_replace=0.5;
	for i in range(len(left_probe_contingency)):
		for j in range(len(left_probe_contingency[0])):
			if left_probe_contingency[i][j]==0:
				left_probe_contingency[i][j]=to_replace;
			if right_probe_contingency[i][j]==0:
				right_probe_contingency[i][j]=to_replace;

	print(left_probe_contingency)
	lp_hit_rate=left_probe_contingency[0][0]/(left_probe_contingency[0][0]+left_probe_contingency[0][1]);
	lp_false_alarm_rate=left_probe_contingency[1][0]/(left_probe_contingency[1][0]+left_probe_contingency[1][1]);
	rp_hit_rate=right_probe_contingency[0][0]/(right_probe_contingency[0][0]+right_probe_contingency[0][1]);
	rp_false_alarm_rate=right_probe_contingency[1][0]/(right_probe_contingency[1][0]+right_probe_contingency[1][1]);
	
	a=NormalDist().inv_cdf(lp_false_alarm_rate)
	b=NormalDist().inv_cdf(lp_hit_rate)

	lp_d = b-a;
	lp_c = -a;
	lp_bcc = lp_c - (lp_d/2);

	a=NormalDist().inv_cdf(rp_false_alarm_rate)
	b=NormalDist().inv_cdf(rp_hit_rate)

	rp_d = b-a;
	rp_c = -a;
	rp_bcc = lp_c - (lp_d/2);

	return lp_d,lp_c,lp_bcc,rp_d,rp_c,rp_bcc		
Exemple #5
0
def calcV(otype, d1, d2, S, div, T, E, r):
    if otype == 0:
        return (S * exp(-div * T) * NormalDist().cdf(d1)) - (
            E * exp(-r * T) * NormalDist().cdf(d2))
    elif otype == 1:
        return (E * exp(-r * T) * NormalDist().cdf(-d2)) - (
            S * exp(-div * T) * NormalDist().cdf(-d1))
Exemple #6
0
	def gaussian_probability(self):
		x = self.pressure
		y = self.flowrate
		muprex = Reading.objects.all().aggregate(Avg('pressure')).get('pressure__avg')
		mufloy = Reading.objects.all().aggregate(Avg('flowrate')).get('flowrate__avg')
		stdprex = Reading.objects.all().aggregate(StdDev('pressure')).get('pressure__stddev')
		stdfloy = Reading.objects.all().aggregate(StdDev('flowrate')).get('flowrate__stddev')
		probx = NormalDist(muprex, stdprex).pdf(x)
		proby = NormalDist(mufloy, stdfloy).pdf(y)
		gauss = probx*proby
		return gauss
Exemple #7
0
	def detect_leak(self):
		a = self.pressure
		b = self.flowrate
		mupre = Reading.objects.all().aggregate(Avg('pressure')).get('pressure__avg')
		muflo = Reading.objects.all().aggregate(Avg('flowrate')).get('flowrate__avg')
		stdpre = Reading.objects.all().aggregate(StdDev('pressure')).get('pressure__stddev')
		stdflo = Reading.objects.all().aggregate(StdDev('flowrate')).get('flowrate__stddev')
		proba = NormalDist(mupre, stdpre).pdf(a)
		probb = NormalDist(muflo, stdflo).pdf(b)
		gaussnorm = proba*probb
		if gaussnorm > 0.02:
			return 'No Leakage'
		else:
			return 'Leakage'
Exemple #8
0
def exercise_2():
    # Draw from binomial dist. and calculate KLD to diverse normal dists.
    # -------------------------------------------------------------------------
    nr_points = 1000
    p = 0.5
    n = 100

    X = np.random.binomial(n, p, nr_points)
    plt.hist(X)
    plt.show()

    normal_distributions = [
        lambda x: NormalDist(mu=n * p, sigma=np.sqrt(n * p * (1 - p))).pdf(x),
        lambda x: NormalDist(mu=n * p - 10, sigma=np.sqrt(n * p *
                                                          (1 - p))).pdf(x),
        lambda x: NormalDist(mu=n * p, sigma=2 * np.sqrt(n * p *
                                                         (1 - p))).pdf(x),
        lambda x: NormalDist(mu=n * p + 20,
                             sigma=0.5 * np.sqrt(n * p * (1 - p))).pdf(x)
    ]

    [
        plt.plot(range(n), [normal(x) for x in range(n)])
        for normal in normal_distributions
    ]
    plt.show()

    print('KL-Divergence optimal:\t{}'.format(
        kld_value(n * p, np.sqrt(n * p * (1 - p)), n, p)))
    print('KL-Divergence shift:\t{}'.format(
        kld_value(n * p - 10, np.sqrt(n * p * (1 - p)), n, p)))
    print('KL-Divergence scale increase:\t{}'.format(
        kld_value(n * p, 2 * np.sqrt(n * p * (1 - p)), n, p)))
    print('KL-Divergence right scale increase:\t{}'.format(
        kld_value(n * p + 20, 0.5 * np.sqrt(n * p * (1 - p)), n, p)))

    # Identify areas with high KLD given n and p.
    # -------------------------------------------------------------------------

    num_samples = 100
    xx, yy = np.mgrid[0.01:0.99:.01, 10:500:5]
    grid = np.c_[xx.ravel(), yy.ravel()]
    contour = [
        kld_value_approx(n=grid[i, 1], p=grid[i, 0], num_samples=num_samples)
        for i in tqdm(range(len(grid)), leave=False)
    ]
    contour = np.asarray(contour).reshape(xx.shape)
    plt.contourf(xx, yy, contour)
    plt.show()
    def __init__(self, arms=10, reward_mean=0, reward_std=1, sigma=1):
        self.arms = arms
        self.reward_mean = reward_mean
        self.reward_std = reward_std
        self.sigma = sigma

        self.actual_reward_values = NormalDist(mu=reward_mean,
                                               sigma=reward_std).samples(arms)
        self.reward_distributions = \
            [NormalDist(mu=actual_reward_value, sigma=sigma) for actual_reward_value in self.actual_reward_values]

        self.action_space = [x for x in range(self.arms)]
        self.optimal_action = max(
            zip(self.actual_reward_values,
                range(len(self.actual_reward_values))))[1]
Exemple #10
0
def oddsNum(dc, dice=3, die=6):
    """return odds of succeeding a dice check

    Args:
        dc (int): Dice check to pass/fail
        dice (int, optional): Number of dice. Defaults to 3.
        die (int, optional): Number of sides per die. Defaults to 6.

    Returns:
        [str]: Percent chance of success
    """
    # calculate mean, standard deviation, and then odds

    # calculate mean. dice*die is max, dice is min, dice*(die+1) is max+min
    mean = (dice * (die + 1)) / 2

    # calculate standard deviation via discrete uniform variance formula:
    # (n^2 - 1) / 12
    dievariance = (die ** 2 - 1) / 12
    dicevariance = dievariance * dice
    stdev = dicevariance ** 0.5

    # calculate odds
    odds = NormalDist(mu=mean, sigma=stdev).cdf(dc)
    percentSuccess = 100 - int(round(odds, 2) * 100)

    return f"{percentSuccess}%"
Exemple #11
0
def logLik_from_mahalanobis(stim, mu_x, cov, k=None):
    """calculate the log likelihood of the current image given the presumed
    'system state' mu_x and covariance matrix cov based on the mahalanobis
    distance between image feature vector and vector representing the system
    state
    """
    if k is None:
        k = 0

    stim = np.array(stim)
    mu_x = np.array(mu_x)

    if mu_x.shape == (1, ) or mu_x.shape == ():  # if 1D
        if cov > 0:
            z = NormalDist(mu=mu_x, sigma=cov).pdf(stim)
        else:
            z = 0

        if z == 0:
            log_p = np.log(1e-10)
        else:
            log_p = np.log(z)
    else:
        try:
            inv_cov = np.linalg.inv(cov)
        except np.linalg.LinAlgError as err:
            if 'Singular matrix' in str(err):
                inv_cov = np.linalg.pinv(cov)
            else:
                raise
        s_minus_mu = stim - mu_x
        log_p = k - np.dot(np.dot(s_minus_mu.T, inv_cov), s_minus_mu) / 2

    return log_p
Exemple #12
0
def _norminv_function():
    try:
        from statistics import NormalDist
        return NormalDist(mu=0, sigma=1.0).inv_cdf
    except ImportError:
        from scipy.stats import norm
        return norm.ppf
Exemple #13
0
 def _gset_tail_settings(self):
     """Compute settings relevant to tail selection
     """
     tails_to_anal = []
     if self.anal_right:
         tails_to_anal.append(Tail.right)
     if self.anal_left:
         tails_to_anal.append(Tail.left)
     self.tails_to_anal = tuple(tails_to_anal)
     if bool(self.tails_to_anal):
         self.analyze_tails = True
         nd = NormalDist()
         self.alpha_qntl = nd.inv_cdf(1 - len(self.tails_to_anal) / 2 *
                                      self.alpha_signif)
     else:
         self.analyze_tails = False
         self._enforce_null_tail_analysis_opts()
Exemple #14
0
def sax_transform(paa, alphabet_size):
    """ Generate character regions using inverse cumulative density function then return string representation """

    regions = [
        NormalDist().inv_cdf((i * 1) / alphabet_size)
        for i in range(1, alphabet_size)
    ]
    return paa_to_string(paa, regions, get_alphabet(alphabet_size))
Exemple #15
0
def get_control(x_):
    seed = x_[0]["close"]
    generated = [{"datetime": r["datetime"], "close": r["close"]} for r in x_]
    try:
        (mean_, stdev_) = dist(x_, 0, len(x_) - 1)
    except (StatisticsError, ValueError, ZeroDivisionError):
        mean_, stdev_ = (0, 1)

    dist_ = NormalDist(mean_, stdev_)
    samples = dist_.samples(len(x_))

    for i in range(1, len(samples)):
        seed *= round(exp(samples[i]), 2)
        generated[i]["close"] = seed

    control = rsi(generated)

    return (control, generated)
def one_one(low, high, n, p, f, it, fit):

    dec = round(mt.pow(1.5, -0.25), 15)
    np.set_printoptions(precision=p)

    #initialization
    values = np.random.uniform(low, high, n)
    sigma = 0.2
    print("Init population")
    print(values, sigma, '\n')

    #first aptitude
    ap = f(values)
    print("Aptitude of the population")
    print(values, sigma, ap, '\n')

    ii = 1
    it += 1

    while (ii < it):

        print("*****Iteration ", ii, "*****")
        print(values, sigma, ap)
        al_g = np.zeros((n, ))
        N = NormalDist(0, sigma)
        for i in range(0, n):
            al = rdm.uniform(0, 1)
            al_g[i] = N.inv_cdf(al)
            print_al(i + 1, al, al_g[i])

        new_values = np.copy(values)
        for i in range(0, n):
            new_values[i] += al_g[i]

        if f(new_values) <= f(values):
            values = np.copy(new_values)
            sigma *= 1.5
        else:
            sigma *= dec
        sigma = round(sigma, 30)

        print(values, sigma, f(values), '\n')
        ii += 1
Exemple #17
0
def kld_value_approx(n: int, p: float, num_samples: int) -> float:
    # PMF Functions are not vectorized --> expect high runtime
    x = np.random.binomial(n, p, num_samples)
    log_binom = np.log([binom.pmf(x[i], n, p) for i in range(len(x))])
    log_normal = np.log([
        NormalDist(mu=n * p, sigma=np.sqrt(n * p * (1 - p))).pdf(x[i])
        for i in range(len(x))
    ])
    mean = np.mean(log_binom - log_normal)
    return mean if mean > 0 else 0
Exemple #18
0
    def logNorm(self, step):

        if len(self.data) >= self.maxNumb:
            self.data = self.data[:self.maxNumb - 1]
            self.data.insert(0, float(step))

        else:
            self.data.insert(0, float(step))

        dist = NormalDist.from_samples(self.data)
        return (step - dist.mean)/dist.stdev
Exemple #19
0
def backward_compat_normcdf_function():
    try:
        from statistics import NormalDist
        return NormalDist(mu=0, sigma=1.0).cdf
    except ImportError:
        try:
            from scipy.stats import norm
            return norm.cdf
        except ImportError:
            raise Exception(
                'You need to install scipy or a version of Python with statistics.NormalDist'
            )
Exemple #20
0
def percent_overlap(mean1=None, sdev1=None, mean2=None, sdev2=None, \
                    mean3=None, sdev3=None):
    """
    A function to estimate the percentage of overlap between multiple
    normally distributed data.
    Parameters:
        mean1 (float, required): The mean of the first data set.
        mean2 (float, required): The mean of the second data set.
        mean3 (float, required): The mean of the third data set.
        sdev1 (float, required): The standard deviation of the first
        sdev2 (float, required): The standard deviation of the second
            data set.
        sdev3 (float, required): The standard deviation of the third
            data set.
    Returns:
        numpy.float64: A float value showing the percentage overlap
            between 1st and 2nd data sets.
        numpy.float64: A float value showing the percentage overlap
            between 1st and 3rd data sets.
        numpy.float64: A float value showing the percentage overlap
            between 2nd and 3rd data sets.
    """
    overlap_11_perc = 'The likelihood of a wild genotype null effect is \
                        {0:1.2%}'.format(NormalDist(mu=mean1, sigma=sdev1).\
                        overlap(NormalDist(mu=mean1, sigma=sdev1)))
    overlap_12_perc = 'The likelihood of a single SNP genotype null effect\
                        is {0:1.2%}'.format(NormalDist(mu=mean1, sigma=sdev1).\
                        overlap(NormalDist(mu=mean2, sigma=sdev2)))
    overlap_13_perc = 'The likelihood of a double SNP genotype null effect\
                        is {0:1.2%}'.format(NormalDist(mu=mean1, sigma=sdev1).\
                        overlap(NormalDist(mu=mean3, sigma=sdev3)))
    return overlap_11_perc, overlap_12_perc, overlap_13_perc
def calc_overlap(mu1, sigma1, mu2, sigma2, unique, ratio=1):
    """
    Calculate the overlap between two normal distributions, defined by given statistics.


    :param mu1: mean of distribution 1
    :param sigma1: std dev of distribution 1
    :param mu2: mean of distribution 2
    :param sigma2: std dev of distribution 2
    :param unique: boolean, if bin represents a non-overlapping, unique segment
    :param ratio: ratio between first and second distribution
    :return: overlap between two distributions as value -> [0, 1]
    """
    # check if bin is non-overlapping
    if unique:
        return 0

    mu1 = (mu1 - 1) * ratio + 1
    sigma1 *= ratio

    # check if distributions are equal
    if mu1 == mu2 and sigma1 == sigma2:
        return 1

    # run builtin method in statistics.NormalDist
    if 'statistics' in sys.modules:
        return NormalDist(mu1, sigma1).overlap(NormalDist(mu2, sigma2))

    # calculate intersection(s) of the two distributions
    x_intersect = calc_pdf_intersect(mu1, sigma1, mu2, sigma2)

    if len(x_intersect) == 1:  # sigma1 == sigma2
        area = NormalDist(mu1, sigma1).cdf(x_intersect[0])
        if area < 0.5:
            return area * 2  # doubled -> pdf1_area = pdf2_area when sigma1 = sigma2
        else:  # take other side of cdf
            return (1 - area) * 2

    # calculate overlap cdf
    mid_section1 = NormalDist(mu1, sigma1).cdf(max(x_intersect)) - \
                   NormalDist(mu1, sigma1).cdf(min(x_intersect))
    mid_section2 = NormalDist(mu2, sigma2).cdf(max(x_intersect)) - \
                   NormalDist(mu2, sigma2).cdf(min(x_intersect))

    # compute sum of overlap sections based on which middle section is larger
    if mid_section1 < mid_section2:
        sum_overlap = 1 + mid_section1 - mid_section2
    else:
        sum_overlap = 1 + mid_section2 - mid_section1

    return sum_overlap
Exemple #22
0
def normal_distribution_flower_spawning_strategy(play_area, flower_num):
    """
    Returns a normal-distribution generated list of flowers
    :param play_area:
    :return: list of spawned flowers
    """
    normal_distribution = NormalDist(0.5, 0.15)
    flower_database = {}

    dist_rolls = normal_distribution.samples(flower_num)

    for i in range(flower_num):
        location = get_location_in_circle(dist_rolls[i], play_area)
        new_f = Flower(location)
        flower_database[location] = new_f

    clean_up_table = {}

    for f in flower_database.values():
        f_loc = f.rect.left, f.rect.top
        if f_loc not in clean_up_table:
            clean_up_table[f_loc] = f

    return clean_up_table
Exemple #23
0
def _normal_quantile_func(q):
    """
    Compute the quantile function of the standard normal distribution.

    This wrapper exists because we are dropping scipy as a mandatory dependency
    but statistics.NormalDist was added to the standard library in 3.8.

    """
    try:
        from statistics import NormalDist
        qf = np.vectorize(NormalDist().inv_cdf)
    except ImportError:
        try:
            from scipy.stats import norm
            qf = norm.ppf
        except ImportError:
            msg = (
                "Standard normal quantile functions require either Python>=3.8 or scipy"
            )
            raise RuntimeError(msg)
    return qf(q)
Exemple #24
0
def ztable_reverse(percent: float) -> float:
    return NormalDist().inv_cdf(percent)
Exemple #25
0
def NormCdf(x):
    return NormalDist().cdf(x)
Exemple #26
0
                                     x, y, theta):
                pix_color_inside.append(img.getpixel((x, y)))
    ellipse = {
        'center_x': center_x,
        'center_y': center_y,
        'theta': theta,
        'colors': pix_color_inside,
        'radius_x': MEAN_ELLIPSE_PARAMS['radius_x'],
        'radius_y': MEAN_ELLIPSE_PARAMS['radius_y'],
        'label': 1
    }
    return ellipse


BLUE_CELLS_COLOR_DIST = {
    'blue': NormalDist(mu=162.8042662974839, sigma=14.133422501229884),
    'red': NormalDist(mu=147.89261081749171, sigma=29.41359934013295),
    'green': NormalDist(mu=133.23393004166803, sigma=30.903213662200027),
}
BROWN_CELLS_COLOR_DIST = {
    'blue': NormalDist(mu=88.10424552490524, sigma=24.622359440726957),
    'red': NormalDist(mu=157.64154611480924, sigma=25.734934966067815),
    'green': NormalDist(mu=117.84177901266865, sigma=29.850845608312493),
}
b1_min, b1_max, r1_min, r1_max, g1_min, g1_max = [84, 211, 40, 242, 33, 234]


def check_ellipse_colors(ellipse, color):
    r, g, b = zip(*ellipse['colors'])
    mean_r = mean(r)
    mean_g = mean(g)
Exemple #27
0
 def z(p):
     return -NormalDist().inv_cdf(p)
Exemple #28
0
def summarize(results):
    #print(dumps(results, indent = 2))
    precision = 3
    std = NormalDist()
    alpha = 0.01
    bad_records = 0
    summary = {"mae": [], "mfe": [], "pnl": [], "duration": []}

    operations = ["mae", "mfe", "pnl"]

    for record in results:
        fns = get_summary_ops(record["type"])

        for i in range(len(operations)):
            try:
                summary[operations[i]].append(fns[i](record))
            except (ZeroDivisionError, KeyError):
                bad_records += 1
                continue

            summary["duration"].append(record["end_index"] -
                                       record["start_index"])

    #pyplot.hist(summary["mae"], bins = 100, range = (-10, 10))
    #pyplot.show()

    summary["mae"].sort()
    summary["mfe"].sort()
    summary["pnl"].sort()
    summary["duration"].sort()

    summary["mae"] = trim_outliers(summary["mae"])
    summary["mfe"] = trim_outliers(summary["mfe"])
    summary["pnl"] = trim_outliers(summary["pnl"])
    summary["duration"] = trim_outliers(summary["duration"])

    res = {
        "test": {
            "group": results[0]["group"],
            "type": results[0]["type"],
            "enter": results[0]["enter"],
            "exit": results[0]["exit"],
            "samples": len(results)
        },
        "mae": {
            "mean": round(mean(summary["mae"]), precision),
            "stdev": round(stdev(summary["mae"]), precision)
        },
        "mfe": {
            "mean": round(mean(summary["mfe"]), precision),
            "stdev": round(stdev(summary["mfe"]), precision)
        },
        "mfe/mae": {
            "mean":
            round(mean(summary["mfe"]) / mean(summary["mae"]), precision)
        },
        "pnl": {
            "mean": round(mean(summary["pnl"]), precision),
            "stdev": round(stdev(summary["pnl"]), precision)
        },
        "duration": {
            "mean": round(mean(summary["duration"]), precision),
            "stdev": round(stdev(summary["duration"]), precision)
        },
        "excluded_records": bad_records
    }

    for statistic in ["mae", "mfe", "pnl"]:
        E = std.zscore(alpha / 2) * res[statistic]["stdev"] / sqrt(
            res["test"]["samples"])
        mu = res[statistic]["mean"]
        res[statistic][f"interval"] = {
            "lower": round(mu - E, 2 * precision),
            "upper": round(mu + E, 2 * precision),
            "alpha": 1 - alpha
        }

    return res
Exemple #29
0
def confidence_interval(data, confidence=0.95):
    dist = NormalDist.from_samples(data)
    z = NormalDist().inv_cdf((1 + confidence) / 2.)
    h = dist.stdev * z / ((len(data) - 1)**.5)
    return dist.mean, round((2 * h) / dist.mean, 4)
Exemple #30
0
        'theta': theta,
        'colors': None,
        'radius_x': radius_x,
        'radius_y': radius_y,
        'label': 1
    }
    for x in x_range:
        for y in y_range:
            if check_ellipse_rotated(ellipse, x, y):
                pix_color_inside.append(img.getpixel((x, y)))
    ellipse['colors'] = pix_color_inside
    return ellipse


BLUE_CELLS_COLOR_DIST = {
    'hue': NormalDist(mu=184.98805646036917, sigma=13.690580569546032),
    'sat': NormalDist(mu=59.25407166123779, sigma=19.295759302753112),
    'vol': NormalDist(mu=163.071661237785, sigma=11.81532113975374)
}
BROWN_CELLS_COLOR_DIST_LEFT = {
    'hue': NormalDist(mu=18.25, sigma=8.4),
    'sat': NormalDist(mu=127.03, sigma=37.06),
    'vol': NormalDist(mu=162.7, sigma=30.06)
}
BROWN_CELLS_COLOR_DIST_RIGHT = {
    'hue': NormalDist(mu=238.5, sigma=14.5),
    'sat': NormalDist(mu=103.5, sigma=27.2),
    'vol': NormalDist(mu=108.6, sigma=26.2)
}
hue_limit_brown_l = BROWN_CELLS_COLOR_DIST_LEFT['hue'].pdf(
    BROWN_CELLS_COLOR_DIST_LEFT['hue']._mu -