Ejemplo n.º 1
0
def listener():
    global data
    global veh, fea
    data = DataCollect.DataCollect(2, 1)
    p1 = np.matrix('1.01; 0.1')
    p2 = np.matrix('2.9; 1.9')
    cov1 = np.matrix('0.5 0; 0 0.5')
    cov2 = np.matrix('0.5 0; 0 0.5')
    init_veh_distr = [
        Distribution.Distribution(p1, cov1),
        Distribution.Distribution(p2, cov2)
    ]
    p3 = np.matrix('0.5; 0.5')
    cov3 = np.matrix('5 0; 0 5')
    init_feat_distr = [Distribution.Distribution(p3, cov3)]

    data.init(2, 1, init_veh_distr, init_feat_distr)
    var_pos1 = 200
    var_pos2 = 200
    var_init = [[var_pos1, var_pos1], [var_pos2, var_pos2]]
    veh = algorithm.init_veh(2, 1, init_veh_distr, var_init)
    fea = algorithm.init_feat(1, 2, init_feat_distr)
    #print "Feature in listner" + str(fea)
    rospy.init_node('listener', anonymous=True)
    # rospy.Subscriber("position2", Pos, callback, queue_size=100)
    # spin() simply keeps python from exiting until this node is stopped
    #print "inside listener"
    pos = message_filters.Subscriber('position', Pos)
    pos2 = message_filters.Subscriber('position2', Pos)
    ts = message_filters.ApproximateTimeSynchronizer([pos, pos2], 2, 1)
    ts.registerCallback(callback)
    rospy.spin()
Ejemplo n.º 2
0
 def __init__(self, id, init_pos_distr):
     self.id = id
     self.init_pos_distr = init_pos_distr
     self.pos_belief = init_pos_distr  #some initial belief of the position, which is a pdf!!!!!!!!!!!!!!!!!!!1
     self.pos_belief_iterative = Distribution.Distribution(None, None)
     self.m_gf_covinv = None
     self.m_gf_covinvmu = None
     self.m_fg = None
     self.consensus = Distribution.Distribution(None, None)
Ejemplo n.º 3
0
 def __init__(self, id, is_experienced: bool):
     """
     Chef is the person who makes ice-cream for customers
     Each chef requires info about his ID, experience level, salary based on the experience
     and the required time to process an ice-cream based on the ice-cream size
     :param id: chef id to identify an chef, automatically assigned when a chef is added
     :param is_experienced: whether the chef has any experience (True or False)
     """
     Employee.__init__(self, id, is_experienced)
     if is_experienced:
         self._salary = 17  # $17/hr
         self._prep_time = Distribution.NormalDist(60, 5, 30, 90).random()
     else:
         self._salary = 14  # $14/hr
         self._prep_time = Distribution.NormalDist(120, 5, 70, 180).random()
Ejemplo n.º 4
0
    def __init__(self,
                 consumer_id=0,
                 type_search='onebyone',
                 search_costs=0,
                 switch_costs=0,
                 distributions=Distribution.Uniform(0, 1),
                 valuations=np.array([])):
        """
        Assumptions:
            - Level of information can be deduced from valuations, i.e
        if a valuation is missing, she does not know the product
            - Valuations are never equal
        
        Difficulty:
            - We know the shops she visited, but not her valuations (useful?)
            - Modeling what the consumer does not know
        """

        self.consumer_id = consumer_id
        self.type_search = type_search
        self.search_costs = search_costs
        self.switch_costs = switch_costs
        self.distributions = distributions

        self.valuations = valuations
        self.maximal_valuation = self.maximal_valuation()
        self.favourite = self.get_favorite()
Ejemplo n.º 5
0
def init_belief(v, gnss_meas_distr, data,
                T_s):  #happens every time iteration in ICP
    v.update_time_step(T_s)
    pred_msg = v.pred_msg()  #mean and covariance from the prediction
    meas_msg = gnss_meas_distr[v.id]
    data.save_pred_veh(v.id, pred_msg)

    h_gnss = np.matrix('1 0 0 0; 0 1 0 0')

    rho_gnss = meas_msg.get_mean()
    r_gnss = meas_msg.get_cov()
    x_pred = pred_msg.get_mean()
    p_pred = pred_msg.get_cov()

    k = np.dot(
        p_pred,
        np.dot(
            np.transpose(h_gnss),
            np.linalg.inv(
                r_gnss +
                np.dot(np.dot(h_gnss, p_pred), np.transpose(h_gnss)))))
    x_update = x_pred + np.dot(k, (rho_gnss - np.dot(h_gnss, x_pred)))
    p_update = p_pred - np.dot(np.dot(k, h_gnss), p_pred)

    # x_update[0] = meas_msg.get_mean()[0]
    # x_update[1] = meas_msg.get_mean()[1]
    #mean and covariance of the measurement
    new_belief = Distribution.Distribution(x_update, p_update)
    # print 'update'
    # print new_belief.get_mean()
    # print new_belief.get_cov()
    v.update_updt_pos_belief(new_belief)
    data.save_updt_veh(v.id, v.updt_pos_belief)
Ejemplo n.º 6
0
    def feature(self, n, f, i):

        mu = self.meas_fea[f][n][:, 0]
        cov = self.var_fea[f][n] + self.var_feature_sensor
        out = Distribution.Distribution(mu, cov)
        #print "mean Feature" + str(mu)
        return out
    def vehicle(self, n, i):

        mu = self.meas_veh[n][:, 0].reshape(2, 1)
        cov = self.var_veh[n] + self.var_uwb_sensor
        out = Distribution.Distribution(mu, cov)
        #print "mean Vehicle" + str(mu)
        return out
Ejemplo n.º 8
0
def update_veh_belief(veh, data):
    for v in veh:
        temp_covinv = None
        temp_covinvmu = None

        for f_id in v.visible_feat:
            if temp_covinv is None:
                temp_covinv = v.m_gx_covinv[f_id]
                temp_covinvmu = v.m_gx_covinvmu[f_id]
            else:
                temp_covinv = temp_covinv + v.m_gx_covinv[f_id]
                temp_covinvmu = temp_covinvmu + v.m_gx_covinvmu[f_id]

        belief_cov = v.updt_pos_belief.get_cov()
        belief_mu = v.updt_pos_belief.get_mean()
        if temp_covinvmu is None and temp_covinv is None:
            temp_cov = belief_cov
            temp_mu = belief_mu
        else:
            temp_cov = np.linalg.inv(temp_covinv + np.linalg.inv(belief_cov))
            temp_mu = np.dot(
                temp_cov,
                (temp_covinvmu + np.dot(np.linalg.inv(belief_cov), belief_mu)))
        # print 'vehicle belief covariance'
        # print temp_mu
        # print temp_cov
        v.pos_belief = Distribution.Distribution(temp_mu, temp_cov)
        data.save_veh(v.id, v.pos_belief)
Ejemplo n.º 9
0
    def get_estimator(self,
                      ts: np.ndarray,
                      idx_params: np.ndarray,
                      init_guess=None):

        x0 = init_guess
        constr = self.model.get_constraints()
        bounds = self.model.get_variable_bounds()

        d = dist.Normal()  # We should be able to specify other distributions
        d.add_model(self.model)

        res = minimize(d.neg_log_likelihood,
                       x0,
                       constraints=constr,
                       bounds=bounds,
                       args=(ts, idx_params),
                       method='SLSQP')

        estimators_var = np.linalg.inv(
            d.hessian_log_likelihood(res.x, ts, idx_params))
        estimators_var *= -1.0
        estimators_var = np.squeeze(estimators_var)
        estimators_var = np.diag(estimators_var)

        return [res.x[idx_params[i]] for i in range(len(idx_params))], [
            estimators_var[idx_params[i]] for i in range(len(idx_params))
        ], res
Ejemplo n.º 10
0
 def feature(self, n, f, t):
     mu = self.meas_fea[f][n][:, t]
     cov = self.var_fea[f][n]
     if mu[0] != mu[0]:
         return None
     else:
         out = Distribution.Distribution(mu, cov)
         return out
Ejemplo n.º 11
0
 def feature(self, n, f, t):
     mu = self.meas_fea[f][n][:, t]
     cov = self.var_fea[f][n]  # + np.matrix([[1000000, 0], [0, 1000000]])
     if mu[0] != mu[0]:
         return None
     else:
         out = Distribution.Distribution(mu, cov)
         return out
Ejemplo n.º 12
0
def consensus_fnc(feat, veh):
    for f in feat:
        consensus = Distribution.Distribution(None, None)
        for v in veh:
            if consensus.get_cov() is None:
                consensus = f.m_gf[v.id]
            else:
                consensus = Distribution.Distribution.pdf_product(
                    consensus, f.m_gf[v.id])
    return consensus
Ejemplo n.º 13
0
def m_xg_calc(veh):
    # for v in veh:
    #     temp_covinv = None
    #     temp_covinvmu = None
    #     pos_belief_prev = v.updt_pos_belief
    #     for f_id in v.visible_feat:
    #         for f_id2 in v.visible_feat:
    #             if f_id != f_id2:
    #                 if temp_covinv is None:
    #                     temp_covinv = v.m_gx_covinv[f_id2]
    #                     temp_covinvmu = v.m_gx_covinvmu[f_id2]
    #                 else:
    #                     temp_covinv += v.m_gx_covinv[f_id2]
    #                     temp_covinvmu += v.m_gx_covinvmu[f_id2]
    #
    #         belief_cov = pos_belief_prev.get_cov()
    #         belief_mu = pos_belief_prev.get_mean()
    #
    #         if temp_covinv is None and temp_covinvmu is None:
    #             temp_cov = belief_cov
    #             temp_mu = belief_mu
    #         else:
    #             temp_cov = np.linalg.inv(temp_covinv + np.linalg.inv(belief_cov))
    #             temp_mu = temp_cov*(temp_covinvmu + np.linalg.inv(belief_cov)*belief_mu)
    #
    #         v.m_xg[f_id] = Distribution.Distribution(temp_mu, temp_cov)

    for v in veh:
        pos_belief_prev = v.updt_pos_belief
        for f_id in v.visible_feat:
            temp_covinv = None
            temp_covinvmu = None
            for f_id2 in v.visible_feat:
                if f_id != f_id2:
                    if temp_covinv is None:
                        temp_covinv = v.m_gx_covinv[f_id2]
                        temp_covinvmu = v.m_gx_covinvmu[f_id2]
                    else:
                        temp_covinv = temp_covinv + v.m_gx_covinv[f_id2]
                        temp_covinvmu = temp_covinvmu + v.m_gx_covinvmu[f_id2]

            belief_cov = pos_belief_prev.get_cov()
            belief_mu = pos_belief_prev.get_mean()

            if temp_covinv is None and temp_covinvmu is None:
                temp_cov = belief_cov
                temp_mu = belief_mu
            else:
                temp_cov = np.linalg.inv(temp_covinv +
                                         np.linalg.inv(belief_cov))
                temp_mu = np.dot(
                    temp_cov, (temp_covinvmu +
                               np.dot(np.linalg.inv(belief_cov), belief_mu)))

            v.m_xg[f_id] = Distribution.Distribution(temp_mu, temp_cov)
    def feature(self, n, f, i):

        mu = self.meas_fea[f][n][:, 0]
        cov = self.var_fea[f][n] + self.var_feature_sensor
        if mu[0] == 0.0 and mu[1] == 0.0:
            return None
            print 'Feat meas returns None'
        else:
            out = Distribution.Distribution(mu, cov)
            # print "mean Feature" + str(mu)
            return out
Ejemplo n.º 15
0
def update_veh_belief(veh, data):
    for v in veh:
        prod = Distribution.Distribution(None, None)
        for m in v.m_gx:
            if prod.get_cov() is None:
                prod = m
            else:
                prod = Distribution.Distribution.pdf_product(prod, m)

        v.pos_belief = Distribution.Distribution.pdf_product(
            prod, v.pred_pos_belief)
        data.save_veh(v.id, v.pos_belief)
Ejemplo n.º 16
0
 def __init__(self, cust_id: int, arrival_time: int):
     """
     Customer visits the shop to buy ice-cream.
     A customer requires below information:
     - customer id to identify the customer (cust_id)
     - arrival time (arrival_time)
     - customer order containing the amount of ice-cream in each size he wants to buy (cust_order: dict)
     - amount of time to make the order (order_time)
     - amount of time to think about what to order (thinking_time)
     :param cust_id: customer id
     :param arrival_time: the second the customer arrives after the shop opens
     """
     self._cust_id = cust_id
     self._cust_order = {
         'S': Distribution.GaussianDiscrete(1, 1, 1, 5).random(),
         'M': Distribution.GaussianDiscrete(1, 1, 0, 5).random(),
         'L': Distribution.GaussianDiscrete(2, 1, 0, 5).random()
     }
     self._arrival_time = arrival_time
     self._order_time = Distribution.NormalDist(120, 3, 60, 300).random()
     self._thinking_time = random.uniform(0, 120)
Ejemplo n.º 17
0
def update_feat_belief(veh, data):
    Q = np.matrix([[0.1, 0], [0, 0.1]])
    for v in veh:
        for f_id in v.visible_feat:
            pos_belief_prev = v.feat[f_id].pos_belief
            pos_belief_new = Distribution.Distribution(
                pos_belief_prev.get_mean(),
                pos_belief_prev.get_cov() + Q)
            v.feat[f_id].pos_belief = Distribution.Distribution.pdf_product(
                pos_belief_new, v.feat[f_id].consensus)
        for f2 in v.feat:
            data.save_feat(v.id, f2.id, v.feat[f2.id].pos_belief)
Ejemplo n.º 18
0
def m_xg_calc(feat, veh):
    for v in veh:
        product = Distribution.Distribution(None, None)
        pos_belief_prev = v.pred_pos_belief
        for f in feat:
            for f2 in feat:
                if product.get_mean() is None:
                    product = v.m_gx[f2.id]
                else:
                    if f.id != f2.id:
                        product = Distribution.Distribution.pdf_product(
                            product, v.m_gx[f2.id])
            v.m_xg[f.id] = Distribution.Distribution.pdf_product(
                product, pos_belief_prev)
Ejemplo n.º 19
0
def m_fg_calc(feat, veh):
    for f in feat:
        product = Distribution.Distribution(None, None)
        pos_belief_prev = f.pos_belief
        for v in veh:
            for v2 in veh:
                if product.get_mean() is None:
                    product = f.m_gf[v2.id]
                else:
                    if v.id != v2.id:
                        product = Distribution.Distribution.pdf_product(
                            product, f.m_gf[v2.id])
            f.m_fg[v.id] = Distribution.Distribution.pdf_product(
                product, pos_belief_prev)
Ejemplo n.º 20
0
 def __init__(self, id: int, is_experienced: bool):
     """
     Cashier is the person who takes order from customers.
     Each cashier requires data about his ID, experience level, salary based on the experience
     and the required time to process an order
     :param id: cashier id to identify an cashier, automatically assigned when a cashier is added
     :param is_experienced: whether the cashier has any experience (True or False)
     """
     Employee.__init__(self, id, is_experienced)
     if is_experienced:
         self._salary = 12  # $12/hr
         self._process_time = 0
     else:
         self._salary = 10  # $10/hr
         self._process_time = Distribution.NormalDist(5, 1, 2, 15).random()
Ejemplo n.º 21
0
def m_fg_calc(veh):

    # for v in veh:
    #     temp_covinv = [None for i in range(len(veh[0].feat))]
    #     temp_covinvmu = [None for i in range(len(veh[0].feat))]
    #     for v2 in veh:
    #         if v.id != v2.id:
    #             for f_id2 in v2.visible_feat:
    #                 if temp_covinv[f_id2] is None:
    #                     temp_covinv[f_id2] = v2.feat[f_id2].m_gf_covinv
    #                     temp_covinvmu[f_id2] = v2.feat[f_id2].m_gf_covinvmu
    #                 else:
    #                     temp_covinv[f_id2] += v2.feat[f_id2].m_gf_covinv
    #                     temp_covinvmu[f_id2] += v2.feat[f_id2].m_gf_covinvmu
    #     for f_id in v.visible_feat:
    #         pos_belief_prev = v.feat[f_id].pos_belief
    #         belief_cov = pos_belief_prev.get_cov()
    #         belief_mu = pos_belief_prev.get_mean()
    #         if temp_covinv[f_id] is None and temp_covinvmu[f_id] is None:
    #             temp_cov = belief_cov
    #             temp_mu = belief_mu
    #         else:
    #             temp_cov = np.linalg.inv(temp_covinv[f_id] + np.linalg.inv(belief_cov))
    #             temp_mu = np.dot(temp_cov, (temp_covinvmu[f_id] + np.dot(np.linalg.inv(belief_cov),belief_mu)))
    #         v.feat[f_id].m_fg = Distribution.Distribution(temp_mu, temp_cov)

    # MAKE THE CONSENSUS BASED CALCULATIONS INSTEAD!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1

    for v in veh:
        for f_id in v.visible_feat:
            pos_belief_prev = v.feat[f_id].pos_belief
            belief_cov = pos_belief_prev.get_cov()
            belief_mu = pos_belief_prev.get_mean()
            temp_cov = np.linalg.inv(
                np.linalg.inv(v.feat[f_id].consensus.get_cov()) +
                np.linalg.inv(belief_cov) - v.feat[f_id].m_gf_covinv)
            # print 'consensus' + str(v.feat[f_id].consensus.get_mean())
            # print 'consensusCov' + str(v.feat[f_id].consensus.get_cov())

            # print belief_cov
            # print v.feat[f_id].m_gf_covinvmu
            temp_mu = temp_cov * (
                np.linalg.inv(v.feat[f_id].consensus.get_cov()) *
                v.feat[f_id].consensus.get_mean() + np.linalg.inv(belief_cov) *
                belief_mu - v.feat[f_id].m_gf_covinvmu)
            v.feat[f_id].m_fg = Distribution.Distribution(temp_mu, temp_cov)
Ejemplo n.º 22
0
def init_belief(v, gnss_meas_distr,
                data):  #happens every time iteration in ICP
    pred_msg = v.pred_msg()  #mean and covariance from the prediction
    meas_msg = gnss_meas_distr[v.id]
    data.save_pred_veh(v.id, pred_msg)

    h_gnss = np.matrix('1 0 0 0; 0 1 0 0')

    rho_gnss = meas_msg.get_mean()
    r_gnss = meas_msg.get_cov()
    x_pred = pred_msg.get_mean()
    p_pred = pred_msg.get_cov()

    k = p_pred * np.transpose(h_gnss) * np.linalg.inv(
        r_gnss + h_gnss * p_pred * np.transpose(h_gnss))
    x_update = x_pred + k * (rho_gnss - h_gnss * x_pred)
    p_update = p_pred - k * h_gnss * p_pred
    #mean and covariance of the measurement
    new_belief = Distribution.Distribution(x_update, p_update)
    v.update_updt_pos_belief(new_belief)
    data.save_updt_veh(v.id, v.updt_pos_belief)
Ejemplo n.º 23
0
def consensus_fnc(veh):
    temp_covinv = [None for i in range(len(veh[0].feat))]
    temp_covinvmu = [None for i in range(len(veh[0].feat))]
    for v in veh:
        for f_id in v.visible_feat:
            if temp_covinv[f_id] is None:
                temp_covinv[f_id] = v.feat[f_id].m_gf_covinv
                temp_covinvmu[f_id] = v.feat[f_id].m_gf_covinvmu
            else:
                temp_covinv[
                    f_id] = temp_covinv[f_id] + v.feat[f_id].m_gf_covinv
                temp_covinvmu[
                    f_id] = temp_covinvmu[f_id] + v.feat[f_id].m_gf_covinvmu
    for v in veh:
        for f_id2 in v.visible_feat:
            consensus_cov = np.linalg.inv(temp_covinv[f_id2])
            consensus_mu = np.dot(consensus_cov, temp_covinvmu[f_id2])
            # print consensus_cov
            # print consensus_mu
            v.feat[f_id2].consensus = Distribution.Distribution(
                consensus_mu, consensus_cov)
Ejemplo n.º 24
0
elif emg == 1:
    # unplanned islanding
    #emgdisp(self,Pdiesel,P_ES,start_ds,Pess,Type)
    Pdiesel = 0.5
    Pess = 1
    start_ds = 1
    Uplan = Unplan1.Unplan1()
    Uplan.emgdisp(Pdiesel, PES, start_ds, Pess, Type)
    Pcurt = Uplan.PCwd
    PSLd = Uplan.PSLd

# Call distribution of Ppv and Pwd
#Ppv=0.5
#Pwd=0.7
#Pcurt=0.4
dist2 = Distribution.distribution()
dist2.dist(Ppv, Pwd, Pcurt)
Pwdreft = dist2.Pwdref  #
Ppvreft = dist2.Ppvref

u = np.array(np.zeros(1))
v = np.array(np.zeros(1))

#Call WandQ of wind
WandQ1 = WandQ.WandQ()
WandQ1.shed(u, v, np.array([Pwdf]), np.array([Pwdv]), Pwd - Pwdreft)
Pwdfref = WandQ1.P11_new
Pwdvref = WandQ1.P21_new

# Call WandQ of PV
WandQ2 = WandQ.WandQ()
    rangeNmp = 5

    T_s = 0.1
    n_v = 2
    n_f = 3
    p1 = np.matrix('1; 0; 0; 0')
    p2 = np.matrix('3; 2; 0; 0')
    cov1 = np.matrix('4 0 0 0; '
                     '0 4 0 0; '
                     '0 0 2 0; '
                     '0 0 0 2')
    cov2 = np.matrix('4 0 0 0; '
                     '0 4 0 0; '
                     '0 0 2 0; '
                     '0 0 0 2')
    init_veh_distr = [Distribution.Distribution(p1, cov1), Distribution.Distribution(p2, cov2)]

    p3 = np.matrix('0.5; 0.5')
    cov3 = np.matrix('5 0; 0 5')

    p4 = np.matrix('0.5; 0.5')
    cov4 = np.matrix('5 0; 0 5')

    p5 = np.matrix('0.5; 0.5')
    cov5 = np.matrix('5 0; 0 5')

    p6 = np.matrix('0.5; 0.5')
    cov6 = np.matrix('5 0; 0 5')

    p7 = np.matrix('0.5; 0.5')
    cov7 = np.matrix('5 0; 0 5')
Ejemplo n.º 26
0
def normal_probability_between(lo, hi, mu=0, sigma=1):
    return Distribution.normal_cdf(hi, mu, sigma) - Distribution.normal_cdf(lo, mu, sigma)
Ejemplo n.º 27
0
    def vehicle(self, n, t):

        mu = self.meas_veh[n][:, t].reshape(2, 1)
        cov = self.var_veh[n]
        out = Distribution.Distribution(mu, cov)
        return out
Ejemplo n.º 28
0
def main():
    # ----------------------------------------------------------------------------------------
    # Attributes of Mesh
    # the value dp  is result of equation w(dp) * h(dp) = num_nodes
    # you have to change this value manually in order to have the same number of node
    # ----------------------------------------------------------------------------------------

    width = 2
    height = 4
    num_nodes = 200
    dp = 5
    radius = 1.0
    D = 1
    T1 = 100

    # ----------------------------------------------------------------------------------------
    # Create Domain Regular
    # ----------------------------------------------------------------------------------------

    d1 = dm.Domain(width, height)
    d1.createSquare(dp=dp)
    xd1 = d1.nodes_x()
    yd1 = d1.nodes_y()
    dst = dt.Distribution(domain=d1, dp=dp)

    # ----------------------------------------------------------------------------------------
    # show boundary of Domain
    # ----------------------------------------------------------------------------------------

    fig, ax = plt.subplots(nrows=1, ncols=1)
    plt.plot(d1.nodes_x()[0][0], d1.nodes_y()[0][0])
    plt.plot(d1.nodes_x()[1][0], d1.nodes_y()[1][0])
    plt.plot(d1.nodes_x()[2][0], d1.nodes_y()[2][0])
    plt.plot(d1.nodes_x()[3][0], d1.nodes_y()[3][0])

    # ----------------------------------------------------------------------------------------
    # Make the knots of Domain: ngd (gaussian distribution) or rdp (regular)
    # ----------------------------------------------------------------------------------------

    dst.calcDist(shape='ngd',
                 nodes=num_nodes,
                 width=width,
                 height=height,
                 bx=d1.nodes_x(),
                 by=d1.nodes_y(),
                 dp=dp)
    #dst.calcDist(shape='rdp', width=width, height=height, bx=xd1, by=yd1, dp=dp, nodes=num_nodes)

    # ----------------------------------------------------------------------------------------
    # Kernel selection
    # ----------------------------------------------------------------------------------------

    kernel = Multiquadric2D(1 / np.sqrt(dst.nodes()))

    # ----------------------------------------------------------------------------------------
    # Gramm matrix allocation
    # ----------------------------------------------------------------------------------------

    matrix = GrammMatrix(dst)
    matrix.fillMatrixLaplace2D(kernel, D)

    # ----------------------------------------------------------------------------------------
    # Dirichlet boundary condition
    # ----------------------------------------------------------------------------------------

    matrix.setDirichletRegular(T1, 3)
    # print(dst.NI(), dst.NB(), test[dst.NI():], len(test[dst.NI():]), len(test[0:dst.NI()]))

    # ----------------------------------------------------------------------------------------
    # Gram matrix solution
    # ----------------------------------------------------------------------------------------

    solv = Solver(matrix, 'linalg')
    solv.solve()
    solv.evaluate(kernel)

    # ----------------------------------------------------------------------------------------
    # Solution storage(optional)
    # ----------------------------------------------------------------------------------------

    zx = solv.interpolate(kernel)
    u = solv.getSol()
    lam = solv.lam()

    # ----------------------------------------------------------------------------------------
    # Solution and point cloud plotting
    # ----------------------------------------------------------------------------------------
    title = 'Heat difussion in two dimensional domain'
    xlabel = 'Lx [m]'
    ylabel = 'Ly [m]'
    barlabel = 'Temparature °C'
    plot = plotter(solv, kernel)
    # plot.regularMesh2D (title='Spatial created grid', xlabel=xlabel, ylabel=ylabel)
    plot.surface3D(title=title,
                   xlabel=xlabel,
                   ylabel=ylabel,
                   barlabel=barlabel)
    plot.levelplot(title=title,
                   xlabel=xlabel,
                   ylabel=ylabel,
                   barlabel=barlabel)

    plt.spy(matrix.getMatrix(), markersize=1.0)
    plt.show()

    # ----------------------------------------------------------------------------------------
    # Select the search method and time of execution
    # ----------------------------------------------------------------------------------------
    nn = nb.Neighbor(method='bf', x=dst.a(), y=dst.b(), r=radius)
    neighborhood = nn.nearest_neighbors()
    nn = nb.Neighbor(method='bt', x=dst.a(), y=dst.b(), r=radius)
    neighborhood = nn.nearest_neighbors()
    nn = nb.Neighbor(method='ball', x=dst.a(), y=dst.b(), r=radius)
    neighborhood = nn.nearest_neighbors()
    #print (neighborhood)
    #print (nn.location())
    start_time = time.time()
    painter(neighborhood)
    print("Painter Time in NN method:")
    print("--- %s seconds ---" % (time.time() - start_time))
    print("Data Domain")
    print('_' * 20)
    print("number points: ", len(dst.a()))
    plt.scatter(dst.a(), dst.b())
    plt.grid()
    plt.axis([-2, width + 2, -1, height + 1])
    warnings.filterwarnings("ignore")
    #ax.set_axis_bgcolor("lightslategray")
    plt.show()

    # ----------------------------------------------------------------------------------------
    # Gramm matrix allocation with NN
    # ----------------------------------------------------------------------------------------

    matrixNN = GrammMatrix(dst)
    matrixNN.fillMatrixLapace2D_CSupported(kernel, D, nn.location())

    # ----------------------------------------------------------------------------------------
    # Dirichlet boundary condition
    # ----------------------------------------------------------------------------------------

    matrixNN.setDirichletRegular(T1, 3)
    # print(dst.NI(), dst.NB(), test[dst.NI():], len(test[dst.NI():]), len(test[0:dst.NI()]))

    # ----------------------------------------------------------------------------------------
    # Gram matrix solution with NN
    # ----------------------------------------------------------------------------------------

    solvnn = Solver(matrixNN, 'linalg')
    solvnn.solve()
    solvnn.evaluate(kernel)
    # ----------------------------------------------------------------------------------------
    # Solution storage(optional)
    # ----------------------------------------------------------------------------------------

    zx = solvnn.interpolate(kernel)
    u = solvnn.getSol()
    lam = solvnn.lam()

    # ----------------------------------------------------------------------------------------
    # Solution and point cloud plotting
    # ----------------------------------------------------------------------------------------
    title = 'Heat difussion in two dimensional domain'
    xlabel = 'Lx [m]'
    ylabel = 'Ly [m]'
    barlabel = 'Temparature °C'
    plot = plotter(solvnn, kernel)
    # plot.regularMesh2D (title='Spatial created grid', xlabel=xlabel, ylabel=ylabel)
    plot.surface3D(title=title,
                   xlabel=xlabel,
                   ylabel=ylabel,
                   barlabel=barlabel)
    plot.levelplot(title=title,
                   xlabel=xlabel,
                   ylabel=ylabel,
                   barlabel=barlabel)

    plt.spy(matrixNN.getMatrix(), markersize=1.0)
    plt.show()

    print(matrixNN.N(), matrixNN.NI())
Ejemplo n.º 29
0
    # cov1 = np.matrix('25 0 0 0; '
    #                  '0 25 0 0; '
    #                  '0 0 25 0; '
    #                  '0 0 0 25')
    # cov2 = np.matrix('25 0 0 0; '
    #                  '0 25 0 0; '
    #                  '0 0 25 0; '
    #                  '0 0 0 25')

    init_veh_distr1mean = np.reshape(
        np.random.multivariate_normal(np.array(p1).flatten(), cov1, 1), [4, 1])
    init_veh_distr2mean = np.reshape(
        np.random.multivariate_normal(np.array(p2).flatten(), cov2, 1), [4, 1])

    init_veh_distr = [
        Distribution.Distribution(init_veh_distr1mean, cov1),
        Distribution.Distribution(init_veh_distr2mean, cov2)
    ]

    p3 = np.matrix('1; 1')
    cov3 = np.matrix('100 0; 0 100')

    p4 = np.matrix('1; 1')
    cov4 = np.matrix('5 0; 0 5')

    p5 = np.matrix('2; 2')
    cov5 = np.matrix('5 0; 0 5')

    p6 = np.matrix('0.5; 0.5')
    cov6 = np.matrix('5 0; 0 5')
n_v = 2
n_f = 3
N_mp = 2
p1 = np.matrix('1; 0; 0; 0')
p2 = np.matrix('3; 2; 0; 0')

cov1 = np.matrix('36 0 0 0; ' '0 36 0 0; ' '0 0 16 0; ' '0 0 0 16')
cov2 = np.matrix('36 0 0 0; ' '0 36 0 0; ' '0 0 16 0; ' '0 0 0 16')

# init_veh_distr1mean = np.reshape(np.random.multivariate_normal(np.array(p1).flatten(), cov1, 1), [4, 1])
# init_veh_distr2mean = np.reshape(np.random.multivariate_normal(np.array(p2).flatten(), cov2, 1), [4, 1])
#
# init_veh_distr = [Distribution.Distribution(init_veh_distr1mean, cov1),
#                   Distribution.Distribution(init_veh_distr2mean, cov2)]
init_veh_distr = [
    Distribution.Distribution(p1, cov1),
    Distribution.Distribution(p2, cov2)
]
p3 = np.matrix('0.5; 0.5')
cov3 = np.matrix('5 0; 0 5')

p4 = np.matrix('0.5; 0.5')
cov4 = np.matrix('5 0; 0 5')

p5 = np.matrix('0.5; 0.5')
cov5 = np.matrix('5 0; 0 5')

p6 = np.matrix('0.5; 0.5')
cov6 = np.matrix('5 0; 0 5')

p7 = np.matrix('0.5; 0.5')
Ejemplo n.º 31
0
def normal_lower_bound(probability, mu=0, sigma=1):
    return Distribution.inverse_normal_cdf(1 - probability, mu, sigma)
# 2/ Initialise MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

# ------------------------------------------------------------------------
# 3/ Prepare and set parameter
# filenamein  = 'CaseU_C2_AxiTransBump.hdf'
filenameout = 'Test_T3.hdf'

path = ['/SuperName/', '/SuperName/SuperGirl/']
NbVtx = 1000

# > Prepare
distribVtx = numpy.empty((size + 1), order='F', dtype='int32')
sVtx, rVtx = DIST.computeStepAndReminder(NbVtx, size)

# > Compute Distribution
DIST.computeDistribution(distribVtx, sVtx, rVtx)

# > Compute NbEntry
NbE = distribVtx[rank + 1] - distribVtx[rank]

# > Create numpy array
CoordX = numpy.ones(NbE, order='F', dtype=numpy.float64) * rank + 1
CoordY = numpy.ones(NbE, order='F', dtype=numpy.int32) * rank

# ------------------------------------------------------------------------
# 4/ Define filter
DataSpaceMMRY = [[0], [1], [NbE], [1]]
DataSpaceFILE = [[distribVtx[rank]], [1], [NbE], [1]]
Ejemplo n.º 33
0
				oldr2, oldc2 = r2,c2
				while (int(oldr2),int(oldc2)) == (int(r2),int(c2)):
					r2 = r2-gy
					c2 = c2-gx

visited = copy.deepcopy(circles)

for r in range(1,img.height-1):
	for c in range(1,img.width-1):
		circles.set_pixel(r,c,-1)
		visited.set_pixel(r,c,0)

for r in range(1,img.height-1):
	for c in range(1,img.width-1):
		if centers.get_pixel(r,c) < center_threshold:
			probs.set_pixel(r,c,min(int(centers.get_pixel(r,c)*Distribution.stddev(Distribution.remove_outliers(radii[r][c]))),255))
		else:
			probs.set_pixel(r,c,255)

final_centers = []
for r in range(1,img.height-1):
	for c in range(1,img.width-1):
		if probs.get_pixel(r,c) < prob_threshold:
			if visited.get_pixel(r,c)!=0:
				continue
			queue = []
			queue.append((r,c))
			rtot, ctot = 0,0
			num = 0
			while len(queue) > 0:
				r2,c2 = queue.pop(0)
Ejemplo n.º 34
0
def normal_probability_above(lo, mu=0, sigma=1):
    return 1 - Distribution.normal_cdf(lo, mu, sigma)