Пример #1
0
def rollout(start, policy, plant, cost, H):
    """
    Generate a state trajectory using an ODE solver.
    """
    odei = plant.odei
    poli = plant.poli
    dyno = plant.dyno
    angi = plant.angi

    nX = len(odei)
    nU = len(policy.max_u)
    nA = len(angi)

    state = start
    x = np.zeros([H + 1, nX + 2 * nA])
    x[0, odei] = multivariate_normal(start, plant.noise)

    u = np.zeros([H, nU])
    y = np.zeros([H, nX])
    L = np.zeros(H)
    latent = np.zeros([H + 1, nX + nU])

    for i in range(H):
        s = x[i, odei]
        a, _, _ = gaussian_trig(s, 0 * np.eye(nX), angi)
        s = np.hstack([s, a])
        x[i, -2 * nA:] = s[-2 * nA:]

        if hasattr(policy, "fcn"):
            u[i, :], _, _ = policy.fcn(s[poli], 0 * np.eye(len(poli)))
        else:
            u[i, :] = policy.max_u * (2 * rand(nU) - 1)
        latent[i, :] = np.hstack([state, u[i, :]])

        dynamics = plant.dynamics
        dt = plant.dt
        next = odeint(dynamics, state[odei], [0, dt], args=(u[i, :], ))
        state = next[-1, :]
        x[i + 1, odei] = multivariate_normal(state[odei], plant.noise)

        if hasattr(cost, "fcn"):
            L[i] = cost.fcn(state[dyno], 0 * np.eye(len(dyno)))

    y = x[1:H + 1, :nX]
    x = np.hstack([x[:H, :], u[:H, :]])
    latent[H, :nX] = state

    return x, y, L, latent
Пример #2
0
def draw_z2_z1s(chsi, rho, M, r):
    ''' Draw from f(z^{l+1} | z^{l}, s, Theta) 
    chsi (list of nd-arrays): The chsi parameters for all paths starting at each layer
    rho (list of ndarrays): The rho parameters (covariance matrices) for
                                    all paths starting at each layer
    M (list of int): The number of MC to draw on each layer
    r (list of int): The dimension of each layer
    ---------------------------------------------------------------------------
    returns (list of nd-arrays): z^{l+1} | z^{l}, s, Theta for all (l,s)
    '''

    L = len(chsi)
    S = [chsi[l].shape[0] for l in range(L)]

    z2_z1s = []
    for l in range(L):
        z2_z1s_l = np.zeros((M[l + 1], M[l], S[l], r[l + 1]))
        for s in range(S[l]):
            z2_z1s_kl = multivariate_normal(size = M[l + 1], \
                    mean = rho[l][:,s].flatten(order = 'C'), \
                    cov = block_diag(*np.repeat(chsi[l][s][n_axis], M[l], axis = 0)))

            z2_z1s_l[:, :, s] = z2_z1s_kl.reshape(M[l + 1],
                                                  M[l],
                                                  r[l + 1],
                                                  order='C')

        z2_z1s_l = t(z2_z1s_l, (1, 0, 2, 3))
        z2_z1s.append(z2_z1s_l)

    return z2_z1s
Пример #3
0
def draw_z_s(mu_s, sigma_s, eta, M):
    ''' Draw from f(z^{l} | s) for all s in Omega and return the centered and
    non-centered draws
    mu_s (list of nd-arrays): The means of the Gaussians starting at each layer
    sigma_s (list of nd-arrays): The covariance matrices of the Gaussians 
                                                        starting at each layer
    eta (list of nb_layers elements of shape (K_l x r_{l-1}, 1)): mu parameters
                                                        for each layer
    M (list of int): The number of MC to draw on each layer
    -------------------------------------------------------------------------
    returns (list of ndarrays): z^{l} | s for all s in Omega and all l in L
    '''

    L = len(mu_s) - 1
    r = [mu_s[l].shape[1] for l in range(L + 1)]
    S = [mu_s[l].shape[0] for l in range(L + 1)]

    z_s = []
    zc_s = []  # z centered (denoted c) or all l

    for l in range(L + 1):
        zl_s = multivariate_normal(size = (M[l], 1), \
            mean = mu_s[l].flatten(order = 'C'), cov = block_diag(*sigma_s[l]))

        zl_s = zl_s.reshape(M[l], S[l], r[l], order='C')
        z_s.append(t(zl_s, (0, 2, 1)))

        if l < L:  # The last layer is already centered
            eta_ = np.repeat(t(eta[l], (2, 0, 1)), S[l + 1], axis=1)
            zc_s.append(zl_s - eta_)

    return z_s, zc_s
Пример #4
0
def draw_z2_z1s(chsi, rho, M, r):
    ''' Draw from f(z^{l+1} | z^{l}, s, Theta) 
    chsi (list of nd-arrays): The chsi parameters for all paths starting at each layer
    rho (list of ndarrays): The rho parameters (covariance matrices) for
                                    all paths starting at each layer
    M (dict): The number of MC to draw on each layer
    r (dict): The dimension of each layer
    ---------------------------------------------------------------------------
    returns (list of nd-arrays): z^{l+1} | z^{l}, s, Theta for all (l,s)
    '''

    L = len(chsi)
    S = [chsi[l].shape[0] for l in range(L)]

    z2_z1s = []
    for l in range(L):
        z2_z1s_l = np.zeros((M[l], M[l + 1], S[l], r[l + 1]))
        for s in range(S[l]):
            for m in range(M[l]):
                z2_z1s_l[m,:, s] = multivariate_normal(size = M[l + 1], \
                    mean = rho[l][m, s].flatten(order = 'C'), \
                    cov = chsi[l][s])

        z2_z1s.append(z2_z1s_l)

    return z2_z1s
Пример #5
0
 def reset(self):
     #pos = random.uniform(self.world_box[1], self.world_box[0])
     pos = random.multivariate_normal(np.zeros(2), np.eye(2)*4)
     ang = math.atan2(-pos[1], -pos[0]) + random.uniform(-pi/4, pi/4)
     ang %= 2*pi
     self.x = np.append(pos, [ang, 0., 0.])
     self.P = np.eye(5) * (0.0001**2)
     self.L = np.linalg.cholesky(self.P)
     ind_tril = np.tril_indices(self.L.shape[0])
     self.counter = 0
     self.state = np.append(self.x, self.L[ind_tril])
     #print("pretag:", self.state)
     return self.state
Пример #6
0
    def _update_W(Ys, A, Ps, etasq):
        # Collect covariates
        Xs = []
        for Y, P in zip(Ys, Ps):
            Xs.append(np.dot(Y, P.T))
        X = np.vstack(Xs)

        W = np.zeros((N, N))
        for n in range(N):
            if np.sum(A[n]) == 0:
                continue

            xn = X[1:, n]
            Xpn = X[:-1][:, A[n]]
            Jn = np.dot(Xpn.T, Xpn) / etasq + sigmasq_W * np.eye(A[n].sum())
            Sign = np.linalg.inv(Jn)
            hn = np.dot(Xpn.T, xn) / etasq
            W[n, A[n]] = npr.multivariate_normal(np.dot(Sign, hn), Sign)
        return W
Пример #7
0
from utils import samples, draw_ellipse, plot_GMM, calculate_ELBO, HiddenPrints, cols

import pickle

# playing with samples

# Define GMMs for generation
centres = [np.array([0.,3.]), np.array([2.,0.])]
covs = [np.eye(2), np.array([[0.6,0.4],
                             [0.4,0.6]])]

K = 2

N_total = 1000  # total number of datapoints wanted
X1 = multivariate_normal(mean=centres[0],
                         cov=covs[0],
                         size=int(N_total/2))
X2 = multivariate_normal(mean=centres[1],
                         cov=covs[1],
                         size=int(N_total/2))
X = np.concatenate((X1,X2))

a = np.ones(2)*(10**0.5)  # large alpha means pi values are ~=
b = np.ones(2)*(1000**0.5)  # large beta keeps Gaussian from which mu is drawn small
V = [inv(cholesky(covs[k]))/(1000**0.5) for k in range(K)]
m = centres
u = np.ones(2)*(1000) - 2

alpha, beta, nu = a**2, b**2, abs(u)+2
W = [np.dot(V[k].T,V[k]) for k in range(K)]
Пример #8
0
def MIAMI(y, n_clusters, r, k, init, var_distrib, nj, authorized_ranges,\
          target_nb_pseudo_obs = 500, it = 50, \
          eps = 1E-05, maxstep = 100, seed = None, perform_selec = True,\
              dm = [], max_patience = 1): # dm: Hack to remove
    ''' Generates pseudo-observations from a trained M1DGMM
    
    y (numobs x p ndarray): The observations containing mixed variables
    n_clusters (int): The number of clusters to look for in the data
    r (list): The dimension of latent variables through the first 2 layers
    k (list): The number of components of the latent Gaussian mixture layers
    init (dict): The initialisation parameters for the algorithm
    var_distrib (p 1darray): An array containing the types of the variables in y 
    nj (p 1darray): For binary/count data: The maximum values that the variable can take. 
                    For ordinal data: the number of different existing categories for each variable
    authorized_ranges (ndarray): The ranges in which the observations have to lie in
    target_nb_pseudo_obs (int): The number of pseudo-observations to generate         
    it (int): The maximum number of MCEM iterations of the algorithm
    eps (float): If the likelihood increase by less than eps then the algorithm stops
    maxstep (int): The maximum number of optimisation step for each variable
    seed (int): The random state seed to set (Only for numpy generated data for the moment)
    perform_selec (Bool): Whether to perform architecture selection or not
    dm (np array): The distance matrix of the observations. If not given M1DGMM computes it
    ------------------------------------------------------------------------------------------------
    returns (dict): The predicted classes, the likelihood through the EM steps
                    and a continuous representation of the data
    '''

    out = M1DGMM(y, 'auto', r, k, init, var_distrib, nj, it,\
             eps, maxstep, seed, perform_selec = perform_selec,\
                 dm = dm, max_patience = max_patience)

    # Upacking the model from the M1DGMM output
    #best_z = out['best_z']
    k = out['best_k']
    r = out['best_r']
    w_s = out['best_w_s']
    lambda_bin = out['lambda_bin']
    lambda_ord = out['lambda_ord']
    lambda_categ = out['lambda_categ']
    lambda_cont = out['lambda_cont']
    mu_s = out['mu']
    sigma_s = out['sigma']

    nj_bin = nj[pd.Series(var_distrib).isin(['bernoulli',
                                             'binomial'])].astype(int)
    nj_ord = nj[var_distrib == 'ordinal'].astype(int)
    nj_categ = nj[var_distrib == 'categorical'].astype(int)

    y_std = y[:, var_distrib == 'continuous'].std(axis=0, keepdims=True)

    M0 = 100  # The number of z to draw
    S0 = np.prod(k)
    MM = 30  # The number of y to draw for each z

    #=======================================================
    # Data augmentation part
    #=======================================================

    # Create pseudo-observations iteratively:
    nb_pseudo_obs = 0

    y_new_all = []
    w_snorm = np.array(w_s) / np.sum(w_s)

    total_nb_obs_generated = 0
    while nb_pseudo_obs <= target_nb_pseudo_obs:

        #===================================================
        # Generate a batch of latent variables
        #===================================================

        # Draw some z^{(1)} | Theta using z^{(1)} | s, Theta
        z = np.zeros((M0, r[0]))

        z0_s = multivariate_normal(size = (M0, 1), \
            mean = mu_s[0].flatten(order = 'C'), cov = block_diag(*sigma_s[0]))
        z0_s = z0_s.reshape(M0, S0, r[0], order='C')

        comp_chosen = np.random.choice(S0, M0, p=w_snorm)
        for m in range(M0):  # Dirty loop for the moment
            z[m] = z0_s[m, comp_chosen[m]]

        #===================================================
        # Generate a batch of pseudo-observations
        #===================================================

        y_bin_new = []
        y_categ_new = []
        y_ord_new = []
        y_cont_new = []

        for mm in range(MM):
            y_bin_new.append(draw_new_bin(lambda_bin, z, nj_bin))
            y_categ_new.append(draw_new_categ(lambda_categ, z, nj_categ))
            y_ord_new.append(draw_new_ord(lambda_ord, z, nj_ord))
            y_cont_new.append(draw_new_cont(lambda_cont, z))

        # Stack the quantities
        y_bin_new = np.vstack(y_bin_new)
        y_categ_new = np.vstack(y_categ_new)
        y_ord_new = np.vstack(y_ord_new)
        y_cont_new = np.vstack(y_cont_new)

        # "Destandardize" the continous data
        y_cont_new = y_cont_new * y_std

        # Put them in the right order and append them to y
        type_counter = {'count': 0, 'ordinal': 0,\
                        'categorical': 0, 'continuous': 0}

        y_new = np.full((M0 * MM, y.shape[1]), np.nan)

        # Quite dirty:
        for j, var in enumerate(var_distrib):
            if (var == 'bernoulli') or (var == 'binomial'):
                y_new[:, j] = y_bin_new[:, type_counter['count']]
                type_counter['count'] = type_counter['count'] + 1
            elif var == 'ordinal':
                y_new[:, j] = y_ord_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            elif var == 'categorical':
                y_new[:, j] = y_categ_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            elif var == 'continuous':
                y_new[:, j] = y_cont_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            else:
                raise ValueError(var, 'Type not implemented')

        #===================================================
        # Acceptation rule
        #===================================================

        # Check that each variable is in the good range
        y_new_exp = np.expand_dims(y_new, 1)

        total_nb_obs_generated += len(y_new)

        mask = np.logical_and(y_new_exp >= authorized_ranges[0][np.newaxis],\
                       y_new_exp <= authorized_ranges[1][np.newaxis])

        # Keep an observation if it lies at least into one of the ranges possibility
        mask = np.any(mask.mean(2) == 1, axis=1)

        y_new = y_new[mask]
        y_new_all.append(y_new)
        nb_pseudo_obs = len(np.concatenate(y_new_all))

    # Keep target_nb_pseudo_obs pseudo-observations
    y_new_all = np.concatenate(y_new_all)
    y_new_all = y_new_all[:target_nb_pseudo_obs]

    y_all = np.vstack([y, y_new_all])
    share_kept_pseudo_obs = len(y_new_all) / total_nb_obs_generated

    out['y_all'] = y_all
    out['share_kept_pseudo_obs'] = share_kept_pseudo_obs

    return (out)
Пример #9
0
def MIAMI(y, n_clusters, r, k, init, var_distrib, nj, authorized_ranges,\
          target_nb_pseudo_obs = 500, it = 50, \
          eps = 1E-05, maxstep = 100, seed = None, perform_selec = True,\
              dm = [], max_patience = 1): # dm: Hack to remove
    ''' Complete the missing values using a trained M1DGMM
    
    y (numobs x p ndarray): The observations containing mixed variables
    n_clusters (int): The number of clusters to look for in the data
    r (list): The dimension of latent variables through the first 2 layers
    k (list): The number of components of the latent Gaussian mixture layers
    init (dict): The initialisation parameters for the algorithm
    var_distrib (p 1darray): An array containing the types of the variables in y 
    nj (p 1darray): For binary/count data: The maximum values that the variable can take. 
                    For ordinal data: the number of different existing categories for each variable
    nan_mask (ndarray): A mask array equal to True when the observation value is missing False otherwise
    target_nb_pseudo_obs (int): The number of pseudo-observations to generate         
    it (int): The maximum number of MCEM iterations of the algorithm
    eps (float): If the likelihood increase by less than eps then the algorithm stops
    maxstep (int): The maximum number of optimisation step for each variable
    seed (int): The random state seed to set (Only for numpy generated data for the moment)
    perform_selec (Bool): Whether to perform architecture selection or not
    dm (np array): The distance matrix of the observations. If not given M1DGMM computes it
    n_neighbors (int): The number of neighbors to use for NA imputation
    ------------------------------------------------------------------------------------------------
    returns (dict): The predicted classes, the likelihood through the EM steps
                    and a continuous representation of the data
    '''

    # !!! Hack
    cols = y.columns
    # Formatting
    if not isinstance(y, np.ndarray): y = np.asarray(y)

    assert len(k) < 2  # Not implemented for deeper MDGMM for the moment


    out = M1DGMM(y, n_clusters, r, k, init, var_distrib, nj, it,\
             eps, maxstep, seed, perform_selec = perform_selec,\
                 dm = dm, max_patience = max_patience, use_silhouette = True)

    # Compute the associations
    vars_contributions(pd.DataFrame(y, columns = cols), out['Ez.y'], assoc_thr = 0.0, \
                           title = 'Contribution of the variables to the latent dimensions',\
                           storage_path = None)

    # Upacking the model from the M1DGMM output
    p = y.shape[1]
    k = out['best_k']
    r = out['best_r']
    mu = out['mu'][0]
    sigma = out['sigma'][0]
    w = out['best_w_s']
    #eta = out['eta'][0]

    #Ez_y = out['Ez.y']

    lambda_bin = np.array(out['lambda_bin'])
    lambda_ord = out['lambda_ord']
    lambda_categ = out['lambda_categ']
    lambda_cont = np.array(out['lambda_cont'])

    nj_bin = nj[pd.Series(var_distrib).isin(['bernoulli',
                                             'binomial'])].astype(int)
    nj_ord = nj[var_distrib == 'ordinal'].astype(int)
    nj_categ = nj[var_distrib == 'categorical'].astype(int)

    y_std = y[:,var_distrib == 'continuous'].astype(float).std(axis = 0,\
                                                                    keepdims = True)

    nb_points = 200

    # Bloc de contraintes
    '''
    is_constrained = np.isfinite(authorized_ranges).any(1)[0]
    is_min_constrained = np.isfinite(authorized_ranges[0])[0]
    is_max_constrained = np.isfinite(authorized_ranges[1])[0]

    is_continuous = (var_distrib == 'continuous') | (var_distrib == 'binomial')
    min_unconstrained_cont = is_continuous & ~is_min_constrained
    max_unconstrained_cont = is_continuous & ~is_max_constrained
    
    authorized_ranges[0] = np.where(min_unconstrained_cont, np.min(y, 0), authorized_ranges[0])
    authorized_ranges[1] = np.where(max_unconstrained_cont, np.max(y, 0), authorized_ranges[1])
    '''

    #from scipy.stats import norm
    '''
    #==============================================
    # Constraints determination
    #==============================================
    
    # Force to stay in the support for binomial and continuous variables

    #authorized_ranges = np.expand_dims(np.stack([[-np.inf,np.inf] for var in var_distrib]).T, 1)
    #authorized_ranges[:, 0, 8] = [0, 0]  # Of more than 60 years old
    #authorized_ranges[:, 0, 0] = [-np.inf, np.inf]  # Of more than 60 years old

    # Look for the constrained variables
    #authorized_ranges[:,:,0] = np.array([[-np.inf],[np.inf]])
    is_constrained = np.isfinite(authorized_ranges).any(1)[0]
    
    #bbox = np.dstack([Ez_y.min(0),Ez_y.max(0)])
    #bbox * np.array([0.6, 1.4])
    
    proba_min = 1E-3
    proba = proba_min
      
    epsilon = 1E-12
    best_A = []
    best_b = []
    
    is_solution = True
    while is_solution:
        b = []#np.array([])
        A = []#np.array([[]]).reshape((0, r[0]))
        
        bbox = np.array([[-10, 10]] * r[0]) # !!! A corriger
        
        alpha = 1 - proba
        q = norm.ppf(1 - alpha / 2)  
        
        #=========================================
        # Store the constraints for each datatype
        #=========================================

        for j in range(p):
            if is_constrained[j]:
                bounds_j = authorized_ranges[:,:,j]
                # The index of the variable among the variables of the same type
                idx_among_type = (var_distrib[:j] == var_distrib[j]).sum()
                
                if var_distrib[j] == 'continuous':
                    # Lower bound
                    lb_j = bounds_j[0] / y_std[0, idx_among_type] - lambda_cont[idx_among_type, 0] + q
                    A.append(- lambda_cont[idx_among_type,1:])
                    b.append(- lb_j)
                    
                    # Upper bound                                
                    ub_j = bounds_j[1] / y_std[0, idx_among_type] - lambda_cont[idx_among_type, 0] - q
                    A.append(lambda_cont[idx_among_type,1:])
                    b.append(ub_j)
                
                elif var_distrib[j] == 'binomial':
                    idx_among_type = ((var_distrib[:j] == 'bernoulli') | (var_distrib[:j] == 'binomial')).sum()
    
                    # Lower bound
                    lb_j = bounds_j[0]
                    lb_j = logit(lb_j / nj_bin[idx_among_type]) - lambda_bin[idx_among_type,0]
                    A.append(- lambda_bin[idx_among_type,1:])
                    b.append(- lb_j)
                    
                    # Upper bound
                    ub_j = bounds_j[1]
                    ub_j = logit(ub_j / nj_bin[idx_among_type]) - lambda_bin[idx_among_type,0]
                    
                    A.append(lambda_bin[idx_among_type, 1:])
                    b.append(ub_j)
                    
                elif var_distrib[j] == 'bernoulli':
                    idx_among_type = ((var_distrib[:j] == 'bernoulli') | (var_distrib[:j] == 'binomial')).sum()
                    assert bounds_j[0] == bounds_j[1] # !!! To improve
                    
                    # Lower bound
                    lb_j = proba if bounds_j[0] == 1 else  0 + epsilon
                    lb_j = logit(lb_j / nj_bin[idx_among_type]) - lambda_bin[idx_among_type,0]
                    A.append(- lambda_bin[idx_among_type,1:])
                    b.append(- lb_j)
                    
                    # Upper bound
                    ub_j = 1 - epsilon if bounds_j[0] == 1 else 1 - proba
                    ub_j = logit(ub_j / nj_bin[idx_among_type]) - lambda_bin[idx_among_type,0]
                    A.append(lambda_bin[idx_among_type, 1:])
                    b.append(ub_j)
                    
                elif var_distrib[j] ==  'categorical':
                    continue
                    assert bounds_j[0] == bounds_j[1] # !!! To improve
                    modality_idx = int(bounds_j[0][0])        
                    
                    # Define the probability to draw the modality of interest to proba
                    pi = np.full(nj_categ[idx_among_type],\
                                 (1 - proba) / (nj_categ[idx_among_type] - 1))
                       
                    # For the inversion of the softmax a constant C = 0 is taken:
                    pi[modality_idx] = proba
                    lb_j = np.log(pi) - lambda_categ[idx_among_type][:, 0] 
    
                    # -1 Mask
                    mask = np.ones((nj_categ[idx_among_type], 1))
                    mask[modality_idx] = -1
                    A.append(lambda_categ[idx_among_type][:, 1:] * mask)
                    b.append(lb_j * mask[:,0])
    
                    
                elif var_distrib[j] == 'ordinal':
                    assert bounds_j[0] == bounds_j[1] # !!! To improve
                    modality_idx = int(bounds_j[0][0])  
                    
                    RuntimeError('Not implemented for the moment')
                        
        #=========================================
        # Try if the solution is feasible
        #=========================================
        try:

            points, interior_point, hs = solve_convex_set(np.reshape(A, (-1, r[0]),\
                                    order = 'C'), np.hstack(b), bbox)
        
            # If yes store the new constraints
            best_A = deepcopy(A)
            best_b = deepcopy(b)
            
            proba = np.min([1.05 * proba, 0.8])
            if proba >= 0.8:
                is_solution = False
        
        except QhullError:
            is_solution = False
                    
            
    best_A = np.reshape(best_A, (-1, r[0]), order = 'C')
    best_b = np.hstack(best_b)
    points, interior_point, hs = solve_convex_set(best_A, best_b, bbox)
    polygon = Polygon(points)    
    '''
    #=======================================================
    # Data augmentation part
    #=======================================================

    # Create pseudo-observations iteratively:
    nb_pseudo_obs = 0

    y_new_all = []
    zz = []

    total_nb_obs_generated = 0
    while nb_pseudo_obs <= target_nb_pseudo_obs:

        #===================================================
        # Generate a batch of latent variables (try)
        #===================================================
        '''
        # Simulate points in the Polynom
        pts = generate_random(nb_points, polygon)
        pts = np.array([np.array([p.x, p.y]) for p in pts])
        
        # Compute their density and resample them
        pts_density = fz(pts, mu, sigma, w)
        pts_density = pts_density / pts_density.sum(keepdims = True) # Normalized the pdfs
        
        idx = np.random.choice(np.arange(nb_points), size = target_nb_pseudo_obs,\
                               p = pts_density, replace=True)
        z = pts[idx]
        '''
        #===================================================
        # Generate a batch of latent variables
        #===================================================

        # Draw some z^{(1)} | Theta using z^{(1)} | s, Theta
        z = np.zeros((nb_points, r[0]))

        z0_s = multivariate_normal(size = (nb_points, 1), \
            mean = mu.flatten(order = 'C'), cov = block_diag(*sigma))
        z0_s = z0_s.reshape(nb_points, k[0], r[0], order='C')

        comp_chosen = np.random.choice(k[0], nb_points, p=w / w.sum())
        for m in range(nb_points):  # Dirty loop for the moment
            z[m] = z0_s[m, comp_chosen[m]]

        #===================================================
        # Draw pseudo-observations
        #===================================================

        y_bin_new = []
        y_categ_new = []
        y_ord_new = []
        y_cont_new = []

        y_bin_new.append(draw_new_bin(lambda_bin, z, nj_bin))
        y_categ_new.append(draw_new_categ(lambda_categ, z, nj_categ))
        y_ord_new.append(draw_new_ord(lambda_ord, z, nj_ord))
        y_cont_new.append(draw_new_cont(lambda_cont, z))

        # Stack the quantities
        y_bin_new = np.vstack(y_bin_new)
        y_categ_new = np.vstack(y_categ_new)
        y_ord_new = np.vstack(y_ord_new)
        y_cont_new = np.vstack(y_cont_new)

        # "Destandardize" the continous data
        y_cont_new = y_cont_new * y_std

        # Put them in the right order and append them to y
        type_counter = {'count': 0, 'ordinal': 0,\
                        'categorical': 0, 'continuous': 0}

        y_new = np.full((nb_points, y.shape[1]), np.nan)

        # Quite dirty:
        for j, var in enumerate(var_distrib):
            if (var == 'bernoulli') or (var == 'binomial'):
                y_new[:, j] = y_bin_new[:, type_counter['count']]
                type_counter['count'] = type_counter['count'] + 1
            elif var == 'ordinal':
                y_new[:, j] = y_ord_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            elif var == 'categorical':
                y_new[:, j] = y_categ_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            elif var == 'continuous':
                y_new[:, j] = y_cont_new[:, type_counter[var]]
                type_counter[var] = type_counter[var] + 1
            else:
                raise ValueError(var, 'Type not implemented')

        #===================================================
        # Acceptation rule
        #===================================================

        # Check that each variable is in the good range
        y_new_exp = np.expand_dims(y_new, 1)

        total_nb_obs_generated += len(y_new)

        mask = np.logical_and(y_new_exp >= authorized_ranges[0][np.newaxis],\
                       y_new_exp <= authorized_ranges[1][np.newaxis])

        # Keep an observation if it lies at least into one of the ranges possibility
        mask = np.any(mask.mean(2) == 1, axis=1)

        y_new = y_new[mask]
        y_new_all.append(y_new)
        nb_pseudo_obs = len(np.concatenate(y_new_all))

        zz.append(z[mask])
        #print(nb_pseudo_obs)

    # Keep target_nb_pseudo_obs pseudo-observations
    y_new_all = np.concatenate(y_new_all)
    y_new_all = y_new_all[:target_nb_pseudo_obs]

    #y_all = np.vstack([y, y_new_all])
    share_kept_pseudo_obs = len(y_new_all) / total_nb_obs_generated

    out['zz'] = zz
    out['y_all'] = y_new_all
    out['share_kept_pseudo_obs'] = share_kept_pseudo_obs

    return (out)
    '''
Пример #10
0
plant.odei = odei
plant.angi = angi
plant.poli = poli
plant.dyno = dyno
plant.dyni = dyni
plant.difi = difi

m, s, c = gaussian_trig(mu0, S0, angi)
m = np.hstack([mu0, m])
c = np.dot(S0, c)
s = np.vstack([np.hstack([S0, c]), np.hstack([c.T, s])])

policy = GPModel()
policy.max_u = [10]
policy.p = {
    'inputs': multivariate_normal(m[poli], s[np.ix_(poli, poli)], nc),
    'targets': 0.1 * randn(nc, len(policy.max_u)),
    'hyp': log([1, 1, 1, 0.7, 0.7, 1, 0.01])
}

Loss.fcn = loss_cp
cost = Loss()
cost.p = 0.5
cost.gamma = 1
cost.width = [0.25]
cost.angle = plant.angi
cost.target = np.array([0, 0, 0, np.pi])

start = multivariate_normal(mu0, S0)
x, y, L, latent = rollout(start, policy, plant, cost, H)
policy.fcn = lambda m, s: concat(congp, gaussian_sin, policy, m, s)
Пример #11
0
def sample(kern, nsamples):
    samples = npr.multivariate_normal(np.zeros([kern.shape[0]]), kern,
                                      nsamples)
    return samples
Пример #12
0
def sample_mu(m, beta, lam):
    return multivariate_normal(m, inv(beta * lam))
Пример #13
0
 def sample(self, var_param, n_samples):
     z_0 = npr.multivariate_normal(mean=[0] * self.input_dim,
                                   cov=np.identity(self.input_dim),
                                   size=n_samples)
     z_k, _ = self.forward(var_param, z_0)
     return z_k
Пример #14
0
 def disturbance(z):
     mean = np.zeros(n)
     covr = np.diag([0, 0, 0.001, 0.001])
     return npr.multivariate_normal(mean, covr)