Ejemplo n.º 1
0
 def uniform_type_1(a, b, x1, x2):
     γ = b
     f = 1 / (γ - a)
     mean = uniform.mean(a, b - a)
     var = uniform.var(a, b - a)
     p = uniform.cdf(x2, a, b - a) - uniform.cdf(x1, a, b - a)
     return γ, mean, var, p, f, a, b
Ejemplo n.º 2
0
    def test_ns_with_invcdf(self):
        """
        Check that results are consistent with uniform sampling
        using inverse transform sampling.
        """
        ix = (-2, 2)
        iy = (1e-30, 2)
        xx = np.linspace(ix[0], ix[1], 1000)
        xy = np.linspace(iy[0], iy[1], 2000)

        cdfx = uniform.cdf(xx, ix[0], ix[1] - ix[0])
        cdfy = uniform.cdf(xy, iy[0], iy[1] - iy[0])
        icdfx = InvCDF('x', xx, cdfx)
        icdfy = InvCDF('y', xy, cdfy)
        ns = NestedSampling(seed=42)
        lh = partial(lighthouse, data=self.D)
        rs = ns.explore(vars=[icdfx, icdfy], initial_samples=100,
                        maximum_steps=1000,
                        likelihood=lh, tolZ=1.e-4, tolH=1e30)
        ep = rs.getexpt()
        ev = rs.getZ()
        h = rs.getH()
        var = rs.getvar()
        m = rs.getmax()
        self.assertAlmostEqual(ep[0], 1.233940, 6)
        self.assertAlmostEqual(ep[1], 0.981719, 6)
        self.assertAlmostEqual(np.sqrt(var[0]), 0.169695, 6)
        self.assertAlmostEqual(np.sqrt(var[1]), 0.183914, 6)
        self.assertAlmostEqual(m[0], 1.247247, 6)
        self.assertAlmostEqual(m[1], 0.908454, 6)
        self.assertAlmostEqual(m[2], -156.4192, 4)
        self.assertAlmostEqual(ev[0], -160.1016, 4)
        self.assertAlmostEqual(ev[1], 0.161775, 6)
        self.assertAlmostEqual(h, 2.617102, 6)
Ejemplo n.º 3
0
 def uniform_type_2(a, b, x1, x2):
     γ = (a * b - 1) / a
     f = a
     a = γ
     mean = uniform.mean(a, b - a)
     var = uniform.var(a, b - a)
     p = uniform.cdf(x2, a, b - a) - uniform.cdf(x1, a, b - a)
     return γ, mean, var, p, f, a, b
Ejemplo n.º 4
0
 def uniform_type_4(a, b, x1, x2):
     γ = 1 / a
     f = a
     a = (a * b - 1) / (2 * a)
     b = a + γ
     mean = uniform.mean(a, b - a)
     var = uniform.var(a, b - a)
     p = uniform.cdf(x2, a, b - a) - uniform.cdf(x1, a, b - a)
     return γ, mean, var, p, f, a, b
Ejemplo n.º 5
0
    def cdf(self, x: Tuple[float]):
        """Find the CDF for a certain x value.

        Args:
            x (float): The value for which the CDF is needed.

        Returns:
            float: The CDF value at point x.
        """
        return uniform.cdf(x[0], loc=self.x_lower_bound, scale=self.x_upper_bound - self.x_lower_bound) \
               * uniform.cdf(x[1], loc=self.y_lower_bound, scale=self.y_upper_bound - self.y_lower_bound)
Ejemplo n.º 6
0
    def compute_cdf(self, x_grid):
        """
        Returns numpy array of uniform CDF values at the points contained
        in x_grid.
        """

        return scipy_uniform.cdf(x_grid, self._minimum_value, self._range_size)
def Uniformd():
    import numpy as np
    from matplotlib import pyplot as plt
    from scipy.stats import uniform 
    print("The amount of time, in minutes, that a person must wait for a bus is uniformly distributed between zero and 15 minutes, inclusive.")
    print("What is the probability that a person waits fewer than 12.5 minutes?")
    print("On the average, how long must a person wait? Find the mean, μ, and the standard deviation, σ. also plot a graph")
    def uniform1(x, a, b):

        y = [1 / (b - a) if a <= val and val <= b
                    else 0 for val in x]

        return x, y, np.mean(y), np.std(y)

    x = np.arange(-10, 100) # define range of x
    for ls in [(0, 15)]:
        a, b = ls[0], ls[1]
        x, y, u, s = uniform1(x, a, b)
        plt.plot(x, y, label=r'$\mu=%.2f,\ \sigma=%.2f$' % (u, s))
    
    plt.legend()
    plt.show()
    arr=uniform.cdf(np.arange(0,13,0.5),loc=a,scale=b)
    res=arr[-1]-arr[0]
    print(res)
Ejemplo n.º 8
0
def punif(q, minimum=0,maximum=1):
    """
    Calculates the cumulative of the uniform distribution
    """
    from scipy.stats import uniform
    result=uniform.cdf(x=q,loc=minimum,scale=maximum-minimum)
    return result
Ejemplo n.º 9
0
def main (M):
    k_array=np.array([0.5,1.5])
    species=np.linspace(0,M-1, M)
    for i in range (np.size(k_array)):
        k=k_array[i]
        m=math.ceil((M-1)/(3-k)) # maximum number of generous species
        print(str('maximum value of generous is %d' %(m)))
        prob_f1=np.zeros([m+1])#p(f1)
        prob_f1[0]=1#initial condition
        q_fav=np.zeros([M])# favorability or probability to be selfish
        q_cond=-np.zeros([m+1])#conditional probability to be selfish given f1
        for j in range(M-2):
            for f1 in range(m+1):
                #calculate conditional probability to be selfish
                Tf=((M-1)/(3-k)-f1)/(M-j-1)
                sd=(1/(12*(M-j-1)))**0.5 
                q_cond[f1]=1-norm.cdf(Tf,0.5,sd)# conditional prob to be selfish
                q_fav[j]+=prob_f1[f1]*q_cond[f1]
            prob_new=np.zeros([m+1])
            for f1 in range (m+1):
                #calculate p(f1) for the next species
               if f1==0:
                   prob_new[f1]=prob_f1[f1]*q_cond[f1]
               else:
                   prob_new[f1]=prob_f1[f1-1]*(1-q_cond[f1-1])+prob_f1[f1]*q_cond[f1] 
            prob_f1=prob_new#update
            
        #second slowest (M-1)species
        for f1 in range(m+1):
            Tf=((M-1)/(3-k)-f1)  
            q_cond[f1]=1-uniform.cdf(Tf,0,1)                
            q_fav[M-2]+=prob_f1[f1]*q_cond[f1]
            #print(q_fav[M-2])
        prob_new=np.zeros([m+1])    
        for f1 in range (m+1):
                #calculate p(f1) for the next species
               if f1==0:
                   prob_new[f1]=prob_f1[f1]*q_cond[f1]
               else:
                   prob_new[f1]=prob_f1[f1-1]*(1-q_cond[f1-1])+prob_f1[f1]*q_cond[f1] 
        prob_f1=prob_new#update
        #slowest M species
        q_fav[M-1]=prob_f1[m]
        #plotthe results
        #print(q_fav)
        if i==0:
            plt.plot(species, q_fav[::-1], color="c",marker="o",markersize=10)
        else:
            plt.plot(species, q_fav[::-1], color="b",marker="D",markersize=10)
    plt.xticks([0,M-1],["slowest","fastest"],fontsize=12)
    plt.xlabel("evolutionary rate",fontsize=12)
    plt.ylim(0.0,1)
    plt.ylabel("favorablity",fontsize=12)
    fname=str('favorability_M%d'%(M))
    plt.savefig(fname+'.pdf')
    plt.show()
         
            
            
        
Ejemplo n.º 10
0
    def compute_CDF(self, x_grid):
        '''
        Returns numpy array of uniform CDF values at the points contained
        in x_grid
        '''

        return scipyuniform.cdf(x_grid, self._minimum_value, self._range_size)
Ejemplo n.º 11
0
def first(data, a, b):
    N = len(data)
    fig = pyplot.figure()
    ax = fig.gca()
    ax.set_yticks(arange(0, 1.1, 0.1))
    pyplot.grid(True)
    x = list([i for i in arange(a, b, 0.01)])
    dn = -100
    prev_dn = -100
    for i in range(len(x) - 1):
        y = get_emp_func(data, x[i])
        pyplot.plot([x[i], x[i + 1]], [y, y], color='blue', linewidth=0.5)
    for i in range(len(x) - 1):
        y = (x[i] - a) / (b - a)
        pyplot.plot([x[i], x[i + 1]], [y, y], color='red', linewidth=0.5)
    pyplot.show()

    ans = [0 for i in range(9)]
    ans[0], ans[1], ans[2] = a, b, N
    for i in range(N):
        # y = get_emp_func(data, data[i]) + 1 / N
        # y2 = get_emp_func(data, data[i])
        y = (i + 1) / N
        y2 = i / N
        Fx = ((data[i] - a) / (b - a))
        dn = max(dn, max(fabs(y - Fx), fabs(y2 - Fx)))
        if dn != prev_dn:
            ans[5] = data[i]
            ans[6] = Fx
            ans[7], ans[8] = y, y2
            prev_dn = dn
    ans[3], ans[4] = dn, dn * sqrt(N)
    print(ans)
    print(kstest(data, lambda param: uniform.cdf(param, loc=a, scale=b - a)))
    print()
Ejemplo n.º 12
0
    def run():
        N = 10000  #experiments
        x_Values = sorted(genRandos(0, 11, N))  #get rando nums
        pdf_list = pdf(x_Values, N)  #list returned
        cdf_list = cdf(x_Values, N)

        fig, axs = plt.subplots(2)  #created 2 graphs
        fig.suptitle("Part A")
        plt.setp(axs, xticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])  #x marks
        axs[0].plot(list(x_Values),
                    pdf_list)  #sorted values with appended pdf list
        axs[0].set_title("Probability Distribution Function")
        axs[1].plot(list(x_Values),
                    cdf_list)  #sorted values with appended cdf list
        axs[1].set_title("Cumulative Distribution Function")

        #Using stats.uniform for pdf and cdf
        x = np.linspace(0, 11, 10000)  #spacing for graph points
        fig, axs = plt.subplots(2)
        fig.suptitle("Part B")
        plt.setp(axs, xticks=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
        axs[0].plot(x, uniform.pdf(x, 1, 9))  #pre built function for pdf
        axs[0].set_title("Probability Distribution Function")
        axs[1].plot(x, uniform.cdf(x, 1, 9))  #prebuilt function for cdf
        axs[1].set_title("Cumulative Distribution Function")
        plt.show()
Ejemplo n.º 13
0
    def evaluate(self, group_name):
        fig = plt.figure()
        Utils.create_folder_if_not_exist(self.output_folder)
        save_path = os.path.join(self.output_folder, self.name)
        Utils.create_folder_if_not_exist(save_path)

        for validation_source in self.method_roc.keys():
            method_roc = self.method_roc[validation_source]
            methods = []
            for method_name, roc in method_roc.items():
                pred = roc['pred']
                pred = np.nan_to_num(pred, True, 0.0, 1.0, 0.0)
                fpr, tpr, _ = roc_curve(roc['y'], pred)
                score = roc_auc_score(roc['y'], pred)
                tpr = tpr - uniform.cdf(fpr)
                plt.plot(fpr, tpr)
                plt.xlabel('False Positive Rate', fontsize=16)
                plt.ylabel('True Positive Rate', fontsize=16)
                methods.append(f"{method_name} AUC {score:.4f}")
            name = f"{group_name}_{validation_source}"
            path_file = f"{name}.png"
            path_method = os.path.join(save_path, path_file)
            plt.legend(methods)
            plt.savefig(path_method)
            plt.title(f"{name}")
            plt.clf()
            fig.clf()
        self.method_roc = {}
        self.logger.info("done")
 def tirage_uniform(self, x, v_ordered, i):
     v_ordered = np.append(v_ordered, np.inf)
     if x <= v_ordered[i + 1] and x >= v_ordered[i]:
         return uniform.cdf(
             (x - v_ordered[i]) / (v_ordered[i + 1] - v_ordered[i]))
     else:
         return 0
Ejemplo n.º 15
0
def get_p_in(upper_known, lower_known, attacker, full_width):
    """
    Normalizes the upper and lower bounds to compute how likely the attacker is to hit the goal, given the distance
    between attacker and last known point of the goal

    input:
    upper_known: np array or pandas series, holding the presented upper part of the goal (can span several trials)
    lower_known: np array or pandas series, holding the lower presented part of the goal (can span several trials)
    attacker: np array or pandas series, holding the (estimated) end point of the attacker (must have as many entries as
    the two other vectors)
    full_width: the span that is covered by the full goal

    returns:
    P_in: the probability that the attacker is inside the goal (value between 0 and 1)

    """

    # part 1: normalize the values to be aligned with the mean of the known goal, and to have positive values
    mean_goal = (upper_known + lower_known) / 2
    upper_norm = abs(upper_known - mean_goal)
    lower_norm = abs(lower_known - mean_goal)
    attacker_norm = abs(attacker - mean_goal)

    # part 2: compute the covered and the unknown size of the goal
    covered_goal = abs(upper_known - lower_known)
    free_width = full_width - covered_goal

    # part 3: compute the probability that the attacker hits inside the goal
    p_in = 1 - uniform.cdf(attacker_norm, loc=upper_norm, scale=free_width)

    return p_in
Ejemplo n.º 16
0
    def Uniform_View(self, range_v):
        """

        Parameters
        ----------
        range_v :
            

        Returns
        -------

        
        """

        NSim, _ = self.MPrior.shape
        # get NSim
        NViews = self.P_mat.shape[0]
        # get NViews

        P_ort = Utility.nullspace(self.P_mat)[1].T  # compute P_bar
        P_bar = r_[self.P_mat, P_ort]  # compute P_bar

        V = self.MPrior @ P_bar.T
        # transform input

        W = np.sort(V[:, :NViews], axis=0)  # for empirical copula
        Cin = np.array(V[:, :NViews].argsort(axis=0),
                       dtype=np.float64) + 1  # for empirical copula
        Grid = np.arange(NSim) + 1.0

        C = zeros(Cin.shape)
        for k in range(NViews):
            f = interpolate.interp1d(Cin[:, k], Grid)
            C[:, k] = f(Grid) / (NSim + 1)

        F = zeros((NSim, NViews))
        F_hat = zeros((NSim, NViews))
        F_tilda = zeros((NSim, NViews))
        V_tilda = zeros((NSim, NViews))

        for k in range(NViews):  # determine the posterior margianl per view
            F[:, k] = Grid.T / (NSim + 1)
            # Uniform view
            F_hat[:, k] = uniform.cdf(W[:, k], range_v[k, 0], range_v[k, 1])
            F_tilda[:,
                    k] = (1 - self.Conf[k]) * F[:, k] + self.Conf[k] * F_hat[:,
                                                                             k]
            #  weighted distribution
            # joint postorior
            f = interpolate.interp1d(F_tilda[:, k],
                                     W[:, k],
                                     fill_value='extrapolate')
            V_tilda[:, k] = f(C[:, k])

        V_tilda = c_[V_tilda, V[:, NViews:]]
        # joint posterior distribution
        MPost = V_tilda @ inv(P_bar.T)
        # new distribution incl. views
        return MPost
Ejemplo n.º 17
0
 def cdf(self, x, log=False):
     """ Get the value of the cumulative distribution function for a x """
     if self.log:
         x = np.log(x)
     if log:
         return uniform.logcdf(x, self._min, self.scale)
     else:
         return uniform.cdf(x, self._min, self.scale)
Ejemplo n.º 18
0
 def plot_cdf_test(test):
     x, y = Utils.ecdf(test)
     x = np.append(x, [1.0])
     y = np.append(y, [1.0])
     y = y - uniform.cdf(x)
     plt.plot(x, y)
     plt.xlabel('rank', fontsize=16)
     plt.ylabel('cdf(r)-r', fontsize=16)
Ejemplo n.º 19
0
    def _Cough_Chen(self):
        """
            Distribution fitted from Chen.

        :return:
        """

        # Small droplets.  < 10micron
        nparticles_small = 230
        k = 3.75
        D_small = numpy.arange(0, 20, 0.1)
        Fcdf_small = gamma.cdf(D_small, 3.75)

        Dsmall_avg = (D_small[:-1] + D_small[1:]) / 2.
        F_small = numpy.diff(Fcdf_small)

        Volume_small = (4.0 / 3.0) * numpy.pi * (
            Dsmall_avg * 1e-6)**3 * F_small * nparticles_small
        vol_small_ml = (Volume_small.sum() * m**3).asUnit(ml)

        # medium droplets 10micron < x < 225 micron
        # upto 100 evaporates in air.
        nparticles_medium = 210
        params = dict(a=0.2, b=1, loc=53, scale=200)
        D_medium = numpy.arange(10, 100, 1)
        Fcdf_medium = beta.cdf(D_medium, **params)
        Dmedium_avg = (D_medium[:-1] + D_medium[1:]) / 2.
        F_medium = numpy.diff(Fcdf_medium)

        Volume_small = (4.0 / 3.0) * numpy.pi * (
            Dmedium_avg * 1e-6)**3 * F_medium * nparticles_medium
        vol_medium_ml = (Volume_small.sum() * m**3).asUnit(ml)

        evaporatingDropletsVolume = vol_small_ml + vol_medium_ml

        ##### == None evaporating
        D_medium = numpy.arange(100, 225, 1)
        Fcdf_medium = beta.cdf(D_medium, **params)
        Dmedium_avg = (D_medium[:-1] + D_medium[1:]) / 2.
        F_medium = numpy.diff(Fcdf_medium)

        Volume_small = (4.0 / 3.0) * numpy.pi * (
            Dmedium_avg * 1e-6)**3 * F_medium * nparticles_medium
        vol_medium_ml = (Volume_small.sum() * m**3).asUnit(ml)

        nparticles_large = 20
        D_large = numpy.arange(225, 800, 1)
        Fcdf_large = uniform.cdf(D_large, loc=225, scale=800 - 225)
        Dlarge_avg = (D_large[:-1] + D_large[1:]) / 2.
        F_large = numpy.diff(Fcdf_large)

        Volume_small = (4.0 / 3.0) * numpy.pi * (
            Dlarge_avg * 1e-6)**3 * F_large * nparticles_large
        vol_large_ml = (Volume_small.sum() * m**3).asUnit(ml)

        nonEvaporatingDropletsVolume = vol_large_ml + vol_medium_ml

        return evaporatingDropletsVolume, nonEvaporatingDropletsVolume
Ejemplo n.º 20
0
    def cdf(self, x: float):
        """Find the CDF for a certain x value.

        Args:
            x (float): The value for which the CDF is needed.
        """
        return uniform.cdf(x,
                           loc=self.lower_bound,
                           scale=self.upper_bound - self.lower_bound)
Ejemplo n.º 21
0
def uniform_distribution(select_size,
                         loc=-m.sqrt(3),
                         scale=2 * m.sqrt(3),
                         asked=rvs,
                         x=0):
    if asked == rvs:
        return uniform.rvs(size=select_size, loc=loc, scale=scale)
    elif asked == pdf:
        return uniform.pdf(x, loc=loc, scale=scale)
    elif asked == cdf:
        return uniform.cdf(x, loc=loc, scale=scale)
Ejemplo n.º 22
0
def compute_uniform_cdf(min_val, max_val, saving_folder):

    # range of values for which CDF is computed (100 is arbitrary and should be high enough)
    x_range = np.linspace(0, 50, 1000)

    # Cumulative distribution function
    pdf = uniform.pdf(x_range, loc=min_val, scale=(max_val - min_val))
    cdf = uniform.cdf(x_range, loc=min_val, scale=(max_val - min_val))

    # Storing cdf, pdf and x_range vectors together
    proba_mat = np.vstack([x_range, cdf, pdf]).T

    return proba_mat
Ejemplo n.º 23
0
def distribution_function(x, mu, sigma, distribution):
    if distribution == Distribution.NORMAL:
        return norm.cdf(x, mu, sigma)
    elif distribution == Distribution.CAUCHY:
        return cauchy.cdf(x, mu, sigma)
    elif distribution == Distribution.LAPLACE:
        return laplace.cdf(x, mu, sigma)
    elif distribution == Distribution.POISSON:
        return poisson.cdf(x, mu, sigma)
    elif distribution == Distribution.UNIFORM:
        return uniform.cdf(x, mu, sigma)
    else:
        return None
Ejemplo n.º 24
0
def update_uniform_data(attrname, old, new):
    xmin, xmax = slider_uni_xminmax.value
    a, b = slider_uni_ab.value
    scale = b - a

    x = np.linspace(xmin, xmax, d_N)
    y_uni_pdf = uniform.pdf(x, loc=a, scale=scale)
    y_uni_cdf = uniform.cdf(x, loc=a, scale=scale)

    data_uniform = {
        'x': x,
        'y_pdf': y_uni_pdf,
        'y_cdf': y_uni_cdf
    }
    source_uniform.data = data_uniform
Ejemplo n.º 25
0
    def univariate_invert_normalization(self, uni_gaussian_data, trans_params):
        """
        Inverts the marginal normalization
        See the companion, univariate_make_normal.py, for more details
        """
        if self.base == "gauss":
            uni_uniform_data = norm.cdf(uni_gaussian_data)
        elif self.base == "uniform":
            uni_uniform_data = uniform.cdf(uni_gaussian_data)
        else:
            raise ValueError(f"Unrecognized base dist.: {base}.")

        uni_data = self.univariate_invert_uniformization(
            uni_uniform_data, trans_params)
        return uni_data
Ejemplo n.º 26
0
 def analytical(self):
     # analytical stress/strain relationship for an infinite number of filaments
     # as the actual strain multiplyied by the survival probability and integrated
     # over random slack
     y_analytical = []
     thetas = linspace(self.theta_loc, self.theta_scale, 1000)
     CDF = uniform.cdf(thetas, loc = self.theta_loc, scale = self.theta_scale)
     for eps in self.e:
          integ_term = (eps - thetas)*Heaviside(eps - thetas)*\
          (1-weibull_min.cdf((eps - thetas), self.xi_shape, scale = self.xi_scale))
          time.clock()
          y_analytical.append(trapz(integ_term, CDF))
          print time.clock()
     self.peaks[2] = max(y_analytical)
     
     return self.e, array(y_analytical)*self.E
Ejemplo n.º 27
0
def simulate_MDP():

    policy = pickle.load(open("./Policies 097 002 001 mean/33.0-17.0-infinite.p", "rb"))
    # policy = np.zeros((len(pi.policy), 3))
    # policy[np.arange(len(pi.policy)), np.array(pi.policy)] = 1
    # policy = pi.policy
    policy_unravelled = np.array(policy).reshape((x_max, y_max, z_max))

    uniform_cdf = [uniform.cdf(x, gateway_distance_min_q, max(gateway_distance_max_q - gateway_distance_min_q, 0.001))
                   for x in range(z_max)]
    harvesting_rates = getEnergyHarvestingRates()

    # 8333.3333
    battery = 0
    distance = 0
    packet = 0

    tot_rewards = []
    max_time = int(100 / deltaT)
    for _ in range(100):
        reward = 0

        for it in range(max_time):
            action = policy_unravelled[battery, packet, distance] # pi.policy[get_state(battery, packet, distance)]

            if np.random.random() <= uniform_cdf[distance]:
                distance = 0
            else:
                distance = (distance + 1) % z_max

            if action == 0:
                battery = int(np.clip(battery + harvesting_rates[battery], 0, energy_quantum_storable_max))
            elif action == 1:
                battery = int(np.clip(battery + harvesting_rates[battery] - ram_consumption, 0, energy_quantum_storable_max))
            else:
                battery = int(np.clip(battery + harvesting_rates[battery] - transmit_consumption[packet], 0, energy_quantum_storable_max))

            reward += (packet_length[packet] * packets_priorities[packet]) * (action == 2)

            if action != 1:
                packet = np.random.choice(3, p=gen_prob)

        tot_rewards.append(reward)

    print(np.mean(tot_rewards))
    exit(-1)
    print("ok")
Ejemplo n.º 28
0
    def cdf(self, x):
        '''
        Evaluate the cumulative distribution function (cdf) of the uniform
        random variable at points x.

        Parameters
        ----------
        x : list or numpy.ndarray
            The points at which the cdf is to be evaluated.

        Returns
        -------
        numpy.ndarray
            The evaluations of the cdf.

        '''
        return uniform.cdf(x, self.inf, self.sup - self.inf)
def uniform_stats(a=1, b=10, n=1000000):
    x = np.linspace(a-1, b+1, n)
    pdf_y = uni.pdf(x, a, b-a)
    cdf_y = uni.cdf(x, a, b-a)

    # Plot PDF and CDF horizontally
    plt.subplot(1,2,1)
    plt.title("Uniform Probability Density Function (scipy) For (" + str(a) + ", " + str(b) + ") Over n=" + str(n) + " values")
    plt.plot(x, pdf_y, 'r-')
    plt.xlabel("Interval Values")
    plt.ylabel("f(x)")

    plt.subplot(1,2,2)
    plt.title("Cumulative Distribution Function (scipy) For (" + str(a) + ", " + str(b) + ") Over n=" + str(n) + " values")
    plt.plot(x, cdf_y, 'b-')
    plt.xlabel("Interval Values")
    plt.ylabel("F(x)")
    plt.show()
Ejemplo n.º 30
0
def compute_asst_loglik(prec_df, param_df, i, precdist):
    theta = param_df.loc[i, 'theta']
    loc = param_df.loc[i, 'loc']
    scale = param_df.loc[i, 'scale']

    if precdist == 'uniform':
        cdf_val = uniform.cdf(prec_df['midpoint'], loc, scale)
    elif precdist == 'norm':
        cdf_val = norm.cdf(prec_df['midpoint'], loc, scale)
    elif precdist == 'laplace':
        cdf_val = laplace.cdf(prec_df['midpoint'], loc, scale)
    elif precdist == 'mixnorm':
        cdf_val = 0.5 * norm.cdf(prec_df['midpoint'], loc, scale) + \
            0.5 * norm.cdf(prec_df['midpoint'], -loc, scale)
    else:
        print 'Cannot find cdf given precinct distribution, see ' + \
                'compute_asst_loglik().'
        assert (False)

    return binom.logpmf(prec_df['num_votes_0'], prec_df['tot_votes'],
                        cdf_val) + np.log(theta)
Ejemplo n.º 31
0
def PDF_CDF():
    N = 10000
    x_Values = sorted(genRandos(0, 11, N))
    pdf_list = pdf(x_Values, N)
    cdf_list = cdf(x_Values, N)

    fig, axs = plt.subplots(2)
    fig.suptitle("Part A")
    plt.setp(axs, x_axis=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
    axs[0].plot(list(x_Values), pdf_list)  #sorted values with appended list
    axs[0].set_title("Probability Distribution Function")
    axs[1].plot(list(x_Values), cdf_list)
    axs[1].set_title("Cumulative Distribution Function")

    #Using stats.uniform for pdf and cdf
    x = np.linspace(0, 11, 10000)
    fig, axs = plt.subplots(2)
    fig.suptitle("Part B")
    plt.setp(axs, x_axis=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
    axs[0].plot(x, uniform.pdf(x, 1, 9))
    axs[0].set_title("Probability Distribution Function")
    axs[1].plot(x, uniform.cdf(x, 1, 9))
    axs[1].set_title("Cumulative Distribution Function")
    plt.show()
Ejemplo n.º 32
0
def SA_SOBOL(driver):
    # Uses the Sobel Method for SA.
    # Input:
    # inpt : no. of input factors
    # N: number of Sobel samples
    #
    # Output:
    # SI[] : sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------

    methd = 'SOBOL'
    method = '7'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------
    value = asarray(LHS.LHS(2*inpt, nSOBOL))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = norm.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
            value[:,j+inpt] = lognorm.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
            value[:,j+inpt] = beta.ppf(uniform.cdf(value[:, j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
            value[:,j+inpt] = uniform.ppf(uniform.cdf(value[:,j+inpt], 0, 1), stvars[j].param[0], stvars[j].param[1])

    values = []
    XMA = value[0:nSOBOL, 0:inpt]
    XMB = value[0:nSOBOL, inpt:2 * inpt]
    YXMA = zeros((nSOBOL, otpt))
    YXMB = zeros((nSOBOL, otpt))
    if krig == 1:
        load("dmodel")
        YXMA = predictor(XMA, dmodel)
        YXMB = predictor(XMB, dmodel)
    else:
        values.extend(list(XMA))
        values.extend(list(XMB))

    YXMC = zeros((inpt, nSOBOL, otpt))
    for i in range(inpt):
        XMC = deepcopy(XMB)
        XMC[:, i] = deepcopy(XMA[:, i])
        if krig == 1:
            YXMC[i] = predictor(XMC, dmodel)
        else:
            values.extend(list(XMC))

    if krig != 1:
        out = iter(run_list(driver, values))
        for i in range(nSOBOL):
            YXMA[i] = out.next()
        for i in range(nSOBOL):
            YXMB[i] = out.next()
        for i in range(inpt):
            for j in range(nSOBOL):
                YXMC[i, j] = out.next()

    f0 = mean(YXMA,0)
    if otpt==1:
        V = cov(YXMA,None,0,1)
    else:  #multiple outputs
        V = diag(cov(YXMA,None,0,1))
    Vi = zeros((otpt, inpt))
    Vci = zeros((otpt, inpt))
    for i in range(inpt):
        for p in range(otpt):
            Vi[p,i] = 1.0/nSOBOL*sum(YXMA[:,p]*YXMC[i,:,p])-f0[p]**2;
            Vci[p,i]= 1.0/nSOBOL*sum(YXMB[:,p]*YXMC[i,:,p])-f0[p]**2;

    Si = zeros((otpt,inpt));
    Sti = zeros((otpt,inpt));
    for j in range(inpt):
        Si[:, j] = Vi[:, j] / V
        Sti[:, j] = 1 - Vci[:, j] / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(XMA, YXMA, otpt, nSOBOL)

# ----------------------  Analyze  ---------------------------

    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Ejemplo n.º 33
0
def SA_FAST(driver):
    
    # First order indicies for a given model computed with Fourier Amplitude Sensitivity Test (FAST).
    # R. I. Cukier, C. M. Fortuin, Kurt E. Shuler, A. G. Petschek and J. H. Schaibly.
    # Study of the sensitivity of coupled reaction systems to uncertainties in rate coefficients.
    # I-III Theory/Applications/Analysis The Journal of Chemical Physics
    #
    # Input:
    # inpt : no. of input factors
    #
    # Output:
    # SI[] : sensitivity indices
    # Other used variables/constants:
    # OM[] : frequencies of parameters
    # S[] : search curve
    # X[] : coordinates of sample points
    # Y[] : output of model
    # OMAX : maximum frequency
    # N : number of sample points
    # AC[],BC[]: fourier coefficients
    # V : total variance
    # VI : partial variances

    # ----------------------  Setup  ---------------------------
    methd = 'FAST'
    method = '9'
    
    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars
    
    # ----------------------  Model  ---------------------------
    
    #
    MI = 4#: maximum number of fourier coefficients that may be retained in
    # calculating the partial variances without interferences between the assigned frequencies
    #
    # Frequency assignment to input factors.
    OM = SETFREQ(inpt)
    # Computation of the maximum frequency
    # OMAX and the no. of sample points N.
    OMAX = int(OM[inpt-1])
    N = 2 * MI * OMAX + 1
    # Setting the relation between the scalar variable S and the coordinates
    # {X(1),X(2),...X(inpt)} of each sample point.
    S = pi / 2.0 * (2 * arange(1,N+1) - N-1) / N
    ANGLE = matrix(OM).T * matrix(S)
    X = 0.5 + arcsin(sin(ANGLE.T)) / pi
    # Transform distributions from standard uniform to general.

    for j in range(inpt):    
        if stvars[j].dist == 'NORM':
            X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])        
        elif stvars[j].dist == 'LNORM':        
            X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':        
            X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':        
            X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # Do the N model evaluations.
    Y = zeros((N, otpt))        
    if krig == 1:            
        load("dmodel")            
        Y = predictor(X, dmodel)            
    else:
        values = []            
        for p in range(N):
#            print 'Running simulation on test',p+1,'of',N
#            Y[p] = run_model(driver, array(X[p])[0])
            values.append(array(X[p])[0])
        Y = run_list(driver, values)

    # Computation of Fourier coefficients.
    AC = zeros((N, otpt))# initially zero
    BC = zeros((N, otpt))# initially zero
#    q = int(N / 2)-1
    q = (N-1)/2
    for j in range(2,N+1,2):    # j is even
#        print "Y[q]",Y[q]
#        print "matrix(cos(pi * j * arange(1,q+) / N))",matrix(cos(pi * j * arange(1,q+1) / N))
#        print "matrix(Y[q + arange(0,q)] + Y[q - arange(0,q)])",matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)])
        AC[j-1] = 1.0 / N * matrix(Y[q] + matrix(cos(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] + Y[q - arange(1,q+1)]))
    for j in range(1,N+1,2):    # j is odd
        BC[j-1] = 1.0 / N * matrix(sin(pi * j * arange(1,q+1) / N)) * matrix(Y[q + arange(1,q+1)] - Y[q - arange(1,q+1)])

    # Computation of the general variance V in the frequency domain.
    V = 2 * (matrix(AC).T * matrix(AC) + matrix(BC).T * matrix(BC))
    # Computation of the partial variances and sensitivity indices.
    # Si=zeros(inpt,otpt);
    Si = zeros((otpt,otpt,inpt));
    for i in range(inpt):    
        Vi = zeros((otpt, otpt))    
        for j in range(1,MI+1): 
            idx = j * OM[i]-1     
            Vi = Vi + AC[idx].T * AC[idx] + BC[idx].T * BC[idx]
        Vi = 2. * Vi
        Si[:, :, i] = Vi / V

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------
    
    Sti = []# appears right after the call to this method in the original PCC_Computation.m
    
#    if plotf == 1:    
#        piecharts(inpt, otpt, Si, Sti, method, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):        
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T

    Results = {'FirstOrderSensitivity': Si}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
Ejemplo n.º 34
0
def UP_MCS(driver):
    # Uses the MCS method for UP

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    #*****************RANDOM DRAWS FROM INPUT DISTRIBUTIONS********************
    value = asarray(LHS.LHS(inpt, nMCS))

    for j in range(inpt):
        if stvars[j].dist == 'NORM':
            value[:,j] = norm.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
        elif stvars[j].dist == 'LNORM':
            value[:,j] = lognorm.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
        elif stvars[j].dist == 'BETA':
            value[:,j] = beta.ppf(uniform.cdf(value[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
        elif stvars[j].dist == 'UNIF':
            value[:,j] = uniform.ppf(uniform.cdf(value[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

    # ----------------------  Model  ---------------------------

    out = zeros((nMCS, otpt))
    if krig == 1:
        load("dmodel")
        out = predictor(value, dmodel)
    else:
#        for i in range(nMCS):
#            print 'Running simulation',i+1,'of',nMCS,'with inputs',value[i]
#            out[i] = run_model(driver, value[i])
        out = run_list(driver, value)

    limstate = asarray(limstate)
    limstate1 = asarray(kron(limstate[:, 0], ones(nMCS))).reshape(otpt,nMCS).transpose()
    limstate2 = asarray(kron(limstate[:, 1], ones(nMCS))).reshape(otpt,nMCS).transpose()
    B = logical_and(greater_equal(out,limstate1),less_equal(out,limstate2))
    PCC = sum(B,0) / nMCS
    B_t = B[sum(B,1) == otpt]
    if otpt > 1 and not 0 in PCC[0:otpt]:
        PCC = append(PCC,len(B_t) / nMCS)

    #Moments
    CovarianceMatrix = matrix(cov(out,None,0))#.transpose()
    Moments = {'Mean': mean(out,0), 'Variance': diag(CovarianceMatrix), 'Skewness': skew(out), 'Kurtosis': kurtosis(out,fisher=False)}

    # combine the display of the correlation matrix with setting a var that will be needed below
    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    # ----------------------  Analyze  ---------------------------

    if any(Moments['Variance']==0):
        print "Warning: One or more outputs does not vary over given parameter variation."

    C_Y = [0]*otpt
    for k in range(0,otpt):
        if Moments['Variance'][k]!=0:
            C_Y[k] = estimate_complexity.with_samples(out[:,k],nMCS)

    sigma_mat=matrix(sqrt(diag(CovarianceMatrix)))
    seterr(invalid='ignore')    #ignore problems with divide-by-zero, just give us 'nan' as usual
    CorrelationMatrix= CovarianceMatrix/multiply(sigma_mat,sigma_mat.transpose())

    Distribution = {'Complexity': C_Y}

    CorrelationMatrix=where(isnan(CorrelationMatrix), None, CorrelationMatrix)
			
    Results = {'Moments': Moments, 'CorrelationMatrix': CorrelationMatrix,
    'CovarianceMatrix': CovarianceMatrix, 'Distribution': Distribution, 'PCC': PCC}

    return Results
Ejemplo n.º 35
0
def SA_EFAST(driver):

    #[SI,STI] = EFAST(K,WANTEDN)
    # First order and total effect indices for a given model computed with
    # Extended Fourier Amplitude Sensitivity Test (EFAST).
    # Andrea Saltelli, Stefano Tarantola and Karen Chan. 1999
    # A quantitative model-independent method for global sensitivity analysis of model output.
    # Technometrics 41:39-56
    #
    # Input:
    # inpt : no. of input factors
    # WANTEDN : wanted no. of sample points
    #
    # Output:
    # SI[] : first order sensitivity indices
    # STI[] : total effect sensitivity indices
    # Other used variables/constants:
    # OM[] : vector of inpt frequencies
    # OMI : frequency for the group of interest
    # OMCI[] : set of freq. used for the compl. group
    # X[] : parameter combination rank matrix
    # AC[],BC[]: fourier coefficients
    # FI[] : random phase shift
    # V : total output variance (for each curve)
    # VI : partial var. of par. i (for each curve)
    # VCI : part. var. of the compl. set of par...
    # AV : total variance in the time domain
    # AVI : partial variance of par. i
    # AVCI : part. var. of the compl. set of par.
    # Y[] : model output
    # N : no. of runs on each curve

    # ----------------------  Setup  ---------------------------
    methd = 'EFAST'
    method = '10'

    mu      = [inp.get_I_mu() for inp in driver.inputs]
    I_sigma = [inp.get_I_sigma() for inp in driver.inputs]
    inpt    = len(driver.inputs)
    input   = driver.inputNames
    krig    = driver.krig
    limstate= driver.limstate
    lrflag  = driver.lrflag
    n_meta  = driver.n_meta
    nEFAST  = driver.nEFAST
    nSOBOL  = driver.nSOBOL
    nMCS    = driver.nMCS
    nodes   = driver.nodes
    order   = driver.order
    otpt    = len(driver.outputNames)
    output  = driver.outputNames
    p       = driver.p
    plotf   = 0
    r       = driver.r
    simple  = driver.simple
    stvars  = driver.stvars

    # ----------------------  Model  ---------------------------

    NR = 1#: no. of search curves
    MI = 4#: maximum number of fourier coefficients that may be retained in calculating
    # the partial variances without interferences between the assigned frequencies
    #
    # Computation of the frequency for the group of interest OMi and the no. of sample points N.
    OMi = int(floor((nEFAST / NR - 1) / (2 * MI) / inpt))
    N = 2 * MI * OMi + 1
    total_sims = N*NR*inpt
    sim = 0
    if (N * NR < 65):
        logging.error('sample size must be >= 65 per factor.')
        raise ValueError,'sample size must be >= 65 per factor.'

    # Algorithm for selecting the set of frequencies. OMci(i), i=1:inpt-1, contains
    # the set of frequencies to be used by the complementary group.
    OMci = SETFREQ(N - 1, OMi / 2 / MI)
    # Loop over the inpt input factors.
    Si = zeros((otpt,otpt,inpt));
    Sti = zeros((otpt,otpt,inpt));
    for i in range(inpt):
        # Initialize AV,AVi,AVci to zero.
        AV = 0
        AVi = 0
        AVci = 0
        # Loop over the NR search curves.
        for L in range(NR):
            # Setting the vector of frequencies OM for the inpt factors.
            cj = 1
            OM = zeros(inpt)
            for j in range(inpt):
                if (j == i):
                    # For the factor of interest.
                    OM[i] = OMi
                else:
                    # For the complementary group.
                    OM[j] = OMci[cj]
                    cj = cj + 1
            # Setting the relation between the scalar variable S and the coordinates
            # {X(1),X(2),...X(inpt)} of each sample point.
            FI = zeros(inpt)
            for j in range(inpt):
                FI[j] = random.random() * 2 * pi        # random phase shift
            S_VEC = pi * (2 * arange(1,N+1) - N - 1) / N
            OM_VEC = OM[range(inpt)]
            FI_MAT = transpose(array([FI]*N))
            ANGLE = matrix(OM_VEC).T*matrix(S_VEC) + matrix(FI_MAT)
            X = 0.5 + arcsin(sin(ANGLE.T)) / pi
            # Transform distributions from standard uniform to general.

            for j in range(inpt):
                if stvars[j].dist == 'NORM':
                    X[:,j] = norm.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])
                elif stvars[j].dist == 'LNORM':
                    X[:,j] = lognorm.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[1], 0, exp(stvars[j].param[0]))
                elif stvars[j].dist == 'BETA':
                    X[:,j] = beta.ppf(uniform.cdf(X[:, j], 0, 1), stvars[j].param[0], stvars[j].param[1], stvars[j].param[2], stvars[j].param[3] - stvars[j].param[2])
                elif stvars[j].dist == 'UNIF':
                    X[:,j] = uniform.ppf(uniform.cdf(X[:,j], 0, 1), stvars[j].param[0], stvars[j].param[1])

            # Do the N model evaluations.
            Y = zeros((N, otpt))
            if krig == 1:
                load("dmodel")
                Y = predictor(X, dmodel)
            else:
                values = []
                for p in range(N):
#                    sim += 1
#                    print 'Running simulation on test',sim,'of',total_sims
#                    Y[p] = run_model(driver, array(X[p])[0])
                    values.append(array(X[p])[0])
                Y = run_list(driver, values)

            # Subtract the average value.
            Y = Y - kron(mean(Y,0), ones((N, 1)))

            # Fourier coeff. at [1:OMi/2].
            NQ = int(N / 2)-1
            N0 = NQ + 1
            COMPL = 0
            Y_VECP = Y[N0+1:] + Y[NQ::-1]
            Y_VECM = Y[N0+1:] - Y[NQ::-1]
#            AC = zeros((int(ceil(OMi / 2)), otpt))
#            BC = zeros((int(ceil(OMi / 2)), otpt))
            AC = zeros((OMi * MI, otpt))
            BC = zeros((OMi * MI, otpt))
            for j in range(int(ceil(OMi / 2))+1):
                ANGLE = (j+1) * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j] = (Y[N0] +matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j]).T * matrix(AC[j]) + matrix(BC[j]).T * matrix(BC[j])
            # Computation of V_{(ci)}.
            Vci = 2 * COMPL
            AVci = AVci + Vci
            # Fourier coeff. at [P*OMi, for P=1:MI].
            COMPL = 0
# Do these need to be recomputed at all?
#            Y_VECP = Y[N0 + range(NQ)] + Y[N0 - range(NQ)]
#            Y_VECM = Y[N0 + range(NQ)] - Y[N0 - range(NQ)]
            for j in range(OMi, OMi * MI + 1, OMi):
                ANGLE = j * 2 * arange(1,NQ+2) * pi / N
                C_VEC = cos(ANGLE)
                S_VEC = sin(ANGLE)
                AC[j-1] = (Y[N0] + matrix(C_VEC)*matrix(Y_VECP)) / N
                BC[j-1] = matrix(S_VEC) * matrix(Y_VECM) / N
                COMPL = COMPL + matrix(AC[j-1]).T * matrix(AC[j-1]) + matrix(BC[j-1]).T * matrix(BC[j-1])
            # Computation of V_i.
            Vi = 2 * COMPL
            AVi = AVi + Vi
            # Computation of the total variance in the time domain.
            AV = AV +  matrix(Y).T * matrix(Y) / N
        # Computation of sensitivity indicies.
        AV = AV / NR
        AVi = AVi / NR
        AVci = AVci / NR
        Si[:, :, i] = AVi / AV
        Sti[:, :, i] = 1 - AVci / AV

    if lrflag == 1:
        SRC, stat = SRC_regress.SRC_regress(X, Y, otpt, N)

    # ----------------------  Analyze  ---------------------------

#    if plotf == 1:
#        piecharts(inpt, otpt, Si, Sti, methd, output)
    if simple == 1:
        Si_t = zeros((inpt,otpt))
        for p in range(inpt):
            Si_t[p] = diag(Si[:, :, p])
        Si = Si_t.T
    if simple == 1:
        Sti_t = zeros((inpt,otpt))
        for p in range(inpt):
            Sti_t[p] = diag(Sti[:, :, p])
        Sti = Sti_t.T
    Results = {'FirstOrderSensitivity': Si, 'TotalEffectSensitivity': Sti}
    if lrflag == 1:
        Results.update({'SRC': SRC, 'R^2': stat})
    return Results
 def cdf(self, dist, v):
     return uniform.cdf(v, *self._get_params(dist))