コード例 #1
0
 def reset(self):
     pos = random.uniform(self.world_box[1]+1, self.world_box[0]-1)
     #pos = random.multivariate_normal(np.zeros(2), np.eye(2)*1.25)
     ang = math.atan2(-pos[1], -pos[0]) + random.uniform(-pi/8, pi/8)
     ang %= 2*pi
     self.x = np.append(pos, [ang, 0., 0.])
     self.P = np.eye(5) * (0.0001**2)
     self.L = np.linalg.cholesky(self.P)
     ind_tril = np.tril_indices(self.L.shape[0])
     self.counter = 0
     self.state = np.append(self.x, self.L[ind_tril])
     #state = self.state#self._get_state()
     return self.state
コード例 #2
0
 def data_set_RBF(dimensions, mu_correct):
     # a = normal(scale=0.2, size = size)
     values = uniform(-10, 10, dimensions)
     b = []
     for element in values:
         b.append(
             np.exp(-np.linalg.norm(element)**2 / (2 * mu_correct[0]**2)))
     b = np.array(b)  #+ normal(0, 0.25)
     return b, values
コード例 #3
0
def sample_latent_sb(a,b, n_samples):
    # a, b have K-1 columns
    # this function samples from the Kumaraswamy distribution

    # sample from uniform distribution
    u=npr.uniform(0, 1, (n_samples, truncation_level-1)) # every row corresponds to a datapoint x_i
    a=np.exp(a)
    b=np.exp(b)

    return (1-u**(1/a))**(1/b)
コード例 #4
0
    def objective(flattened_combined_params):

        combined_params = unflat_params(flattened_combined_params)
        data_idx = batch
        gen_params, rec_params = combined_params

        # We binarize the data
        on = train_images[data_idx, :] > npr.uniform(size=train_images[data_idx, :].shape)
        images = train_images[data_idx, :] * 0.0
        images[on] = 1.0

        return vae_lower_bound(gen_params, rec_params, images)
コード例 #5
0
ファイル: firefly.py プロジェクト: svd3/DDPG
 def reset(self):
     #pos = random.uniform(self.world_box[1], self.world_box[0])
     pos = random.multivariate_normal(np.zeros(2), np.eye(2)*4)
     ang = math.atan2(-pos[1], -pos[0]) + random.uniform(-pi/4, pi/4)
     ang %= 2*pi
     self.x = np.append(pos, [ang, 0., 0.])
     self.P = np.eye(5) * (0.0001**2)
     self.L = np.linalg.cholesky(self.P)
     ind_tril = np.tril_indices(self.L.shape[0])
     self.counter = 0
     self.state = np.append(self.x, self.L[ind_tril])
     #print("pretag:", self.state)
     return self.state
コード例 #6
0
    def __init__(self):
        self.counter = 0
        self.targetUpdatefreq = 100 # Not being used

        self.max_action = 0.01
        self.world_box = np.array([[5.0, 5.0], [-5.0, -5.0]])
        #self.min_position = np.array([-5.0, -5.0])
        self.xlow = np.append(self.world_box[1], [0., -1., -1.])
        self.xhigh = np.append(self.world_box[0], [2*pi, 1., 1.])
        self.low_state = np.append(self.xlow, -10*np.ones(15))       #
        self.high_state = np.append(self.xhigh, 10*np.ones(15))       #

        self.action_space = spaces.Box(-np.ones(2), np.ones(2))
        self.observation_space = spaces.Box(self.low_state, self.high_state)

        self.viewer = None
        #self.state = self.observation_space.sample()

        self.noise = np.array([0.01]*5 + [0.2]*2) #std
        dt = 0.1
        self.Q = np.eye(5) * (0.01**2)
        self.R = np.eye(2) * (0.2**2)
        self.P = np.eye(5) * (0.0001**2)
        self.Id = np.eye(5)

        # initialize state
        pos = random.uniform(self.world_box[0], self.world_box[1])
        ang = math.atan2(pos[1], pos[1]) -pi + random.uniform(-pi/8, pi/8)
        ang %= 2*pi
        self.x = np.append(pos, [ang, 0., 0.])
        self.L = np.linalg.cholesky(self.P)

        self.goal_position = np.array([0., 0.])

        self.A = jacobian(self.dynamics)
        self.H = jacobian(self.obs)
        self.seed()
        self.reset()
コード例 #7
0
def sample_latent_sb(a,b, n_samples):
    # this function samples from the Kumaraswamy distribution

    # sample from uniform distribution
    u=npr.uniform(0, 1, (n_samples, truncation_level)) # every row corresponds to a datapoint x_i
    a=np.exp(a)
    b=np.exp(b)
#    print("max(1/b) : ", max(1/b))
#    print("max(1/a) : ", max(1/a))

#    print("min(1/b) : ", min(1 / b))
#    print("min(1/a) : ", min(1 / a))
    uu=[(1-u[:,i:(i+1)]**(1/b))**(1/a) for i in range(truncation_level)]
    return np.concatenate(uu, axis=1)
コード例 #8
0
def concrete_s(p=0.5, n=1, t=0.1):
    """
    Sample from the concrete distribution
    p - Probability of ''success''
    n - Number of samples
    t - ''Temperature'' of the distribution, controls the level of
        smoothing basically
    """
    if p >= 1:
        return np.ones(n)
    u = npr.uniform(size=n)
    u2 = np.log(p) - np.log(1 - p) + np.log(u) - np.log(1 - u)
    sample = sigmoid(u2 / t)

    return sample
コード例 #9
0
ファイル: data_preprocessing.py プロジェクト: RobeeF/M1DGMM
def bin_to_bern(Nj, yj_binom, zM_binom):
    ''' Split the binomial variable into Bernoulli. Them just recopy the corresponding zM.
    It is necessary to fit binary logistic regression
    Example: yj has support in [0,10]: Then if y_ij = 3 generate a vector with 3 ones and 7 zeros 
    (3 success among 10).
    
    Nj (int): The upper bound of the support of yj_binom
    yj_binom (numobs 1darray): The Binomial variable considered
    zM_binom (numobs x r nd-array): The continuous representation of the data
    -----------------------------------------------------------------------------------
    returns (tuple of 2 (numobs x Nj) arrays): The "Bernoullied" Binomial variable
    '''

    n_yk = len(yj_binom)  # parameter k of the binomial

    # Generate Nj Bernoullis from each binomial and get a (numobsxNj, 1) table
    u = uniform(size=(n_yk, Nj))
    p = (yj_binom / Nj)[..., n_axis]
    yk_bern = (u > p).astype(int).flatten('A')  #[..., n_axis]

    return yk_bern, np.repeat(zM_binom, Nj, 0)
 def exp_series(center, spread):
     return np.exp(npr.uniform(center - spread, center + spread, count))
 def lin_series(center, spread):
     return npr.uniform(center - spread, center + spread, count)
コード例 #12
0
ファイル: test_niw.py プロジェクト: zilongzhong/svae
def rand_niw(n):
    S = rand_psd(n) + n * np.eye(n)
    m = npr.randn(n)
    kappa = n + npr.uniform(1, 3)
    nu = n + npr.uniform(1, 3)
    return standard_to_natural(S, m, kappa, nu)
コード例 #13
0
ファイル: pendulum_world.py プロジェクト: mzhuang1/opfmbrl
 def new_day(self):
     angles = npr.uniform(0, 2 * np.pi, self.N)
     omegas = npr.uniform(-1.0, 1.0, self.N)
     self.state = angles, omegas
コード例 #14
0
def sample_kumaraswamy(a, b):  # [BS, nz-1]
    u = npr.uniform(size=b.shape)
    #a,b = np.exp(a), np.exp(b)
    #print(a,b)
    return (1 - u**(1 / b))**(1 / a)
コード例 #15
0
def initializer(args):
    r = np.sqrt(6 / sum(args))
    return rnd.uniform(low=-r, high=r, size=args)
コード例 #16
0
 def __init__(self, K, D, M):
     super(InputVonMisesObservations, self).__init__(K, D, M)
     self.mus = npr.randn(
         K, D, M)  #this is a kernel acting on input for vonMises mean
     self.log_kappas = np.log(-1 * npr.uniform(low=-1, high=0, size=(K, D)))
コード例 #17
0
ファイル: test_niw.py プロジェクト: mattjj/svae
def rand_niw(n):
    S = rand_psd(n) + n * np.eye(n)
    m = npr.randn(n)
    kappa = n + npr.uniform(1, 3)
    nu = n + npr.uniform(1, 3)
    return standard_to_natural(S, m, kappa, nu)
コード例 #18
0
 def __init__(self, K, D, M=0):
     super(VonMisesObservations, self).__init__(K, D, M)
     self.mus = npr.randn(K, D)
     max_k = 9
     self.log_kappas = np.log(
         -1 * npr.uniform(low=-1 * max_k, high=0, size=(K, D)))
コード例 #19
0
# Importing data
#===========================================#
os.chdir('C:/Users/rfuchs/Documents/These/Stats/mixed_dgmm/datasets')

heart = pd.read_csv('heart_statlog/heart.csv', sep=' ', header=None)
y = heart.iloc[:, :-1]
labels = heart.iloc[:, -1]
labels = np.where(labels == 1, 0, labels)
labels = np.where(labels == 2, 1, labels)

y = y.infer_objects()
numobs = len(y)

# Too many zeros for this "continuous variable". Add a little noise to avoid
# the correlation matrix for each group to blow up
uniform_draws = uniform(0, 1E-12, numobs)
y.iloc[:, 9] = np.where(y[9] == 0, uniform_draws, y[9])

n_clusters = len(np.unique(labels))
p = y.shape[1]

#===========================================#
# Formating the data
#===========================================#
var_distrib = np.array(['continuous', 'bernoulli', 'categorical', 'continuous',\
                        'continuous', 'bernoulli', 'categorical', 'continuous',\
                        'bernoulli', 'continuous', 'ordinal', 'ordinal',\
                        'categorical'])

# Ordinal data already encoded
コード例 #20
0
    def run(self,
            epochs,
            batch_size,
            samples,
            learning_rate,
            algorithm='SGD',
            optimizer='adam'):
        epochs = epochs
        batches = self.model.N / batch_size
        batch_size = batch_size
        samples = samples
        learning_rate = learning_rate

        means, unflatten = flatten(self.params['means'])
        log_sigmas, unflatten = flatten(self.params['log_sigmas'])
        D = len(means)

        self.F = np.zeros(epochs * batches)
        self.time = np.zeros(epochs * batches)
        adam = Adam(2 * D)
        f = 0

        grad_p_log_prob = grad(model.p_log_prob, argnum=1)
        grad_q_log_prob = grad(model.q_log_prob, argnum=1)

        if algorithm == 'SGD':
            for e in range(epochs):
                for b in range(batches):
                    start = time.clock()
                    losses = 0.
                    d_elbo = 0.
                    idx = np.random.choice(np.arange(self.model.N),
                                           batch_size,
                                           replace=False)
                    d_elbo = 0.

                    for s in range(samples):
                        eps = npr.randn(D)
                        z = np.exp(log_sigmas) * eps + means
                        p_log_prob = model.p_log_prob(idx, unflatten(z))
                        dp_log_prob, _ = flatten(
                            grad_p_log_prob(idx, unflatten(z)))
                        g = model.grad_params(dp_log_prob, eps, log_sigmas)
                        d_elbo += g
                        q_log_prob = model.q_log_prob(means, log_sigmas, z)
                        losses += (p_log_prob - q_log_prob)
                    loss = losses / samples
                    d_elbo /= samples
                    means_old, log_sigmas_old = means, log_sigmas
                    means, log_sigmas = adam.update(
                        d_elbo, np.concatenate([means, log_sigmas]),
                        learning_rate)
                    if np.sum(np.isnan(means)) > 0 or np.sum(
                            np.isnan(log_sigmas)) > 0:
                        means, log_sigmas = means_old, log_sigmas_old
                        learning_rate = learning_rate * .1
                    self.F[f] = -loss

                    stop = time.clock()
                    self.time[f] = stop - start
                    f += 1
                if e % 1 == 0:
                    pstate = 'Epoch = ' + "{0:0=3d}".format(
                        e) + ': Loss = {0:.3f}'.format(self.F[f - 1])
                    print(pstate, end='\r')
                    sys.stdout.flush()

        if algorithm == 'iSGD':
            n = 1.
            z_old = [0.] * samples
            dp_log_prob_old = [0.] * samples
            phi_log_prob_old = [0.] * samples

            for e in range(epochs):
                for b in range(batches):
                    start = time.clock()
                    losses = 0.
                    d_elbo = 0.
                    idx = np.random.choice(np.arange(self.model.N),
                                           batch_size,
                                           replace=False)
                    """ Choice of when to use the importance sampled estimate of the gradient dependent on n = npr.uniform()
						Here, the inference uses the importance sampled estimates 90% of the time. """
                    if n > .9:
                        for s in range(samples):
                            eps = npr.randn(D)
                            z = np.exp(log_sigmas) * eps + means
                            p_log_prob = model.p_log_prob(idx, unflatten(z))
                            q_log_prob = model.q_log_prob_sep(
                                means, log_sigmas, z)
                            dp_log_prob, _ = flatten(
                                grad_p_log_prob(idx, unflatten(z)))
                            g = model.grad_params(dp_log_prob, eps, log_sigmas)
                            d_elbo += g
                            losses += (p_log_prob - np.sum(q_log_prob))

                            z_old[s] = z
                            dp_log_prob_old[s] = dp_log_prob
                            phi_log_prob_old[s] = model.phi_log_prob_sep(eps)
                        loss = losses / samples
                        d_elbo /= samples
                    else:
                        for s in range(samples):
                            eps = (z_old[s] - means) / np.exp(log_sigmas)
                            phi_log_prob = model.phi_log_prob_sep(eps)
                            w = np.exp(phi_log_prob - phi_log_prob_old[s])
                            g = model.grad_params(w * dp_log_prob_old[s], eps,
                                                  log_sigmas)
                            d_elbo += g
                        d_elbo /= samples
                    n = npr.uniform()
                    means_old, log_sigmas_old = means, log_sigmas
                    means, log_sigmas = adam.update(
                        d_elbo, np.concatenate([means, log_sigmas]),
                        learning_rate)
                    if np.sum(np.isnan(means)) > 0 or np.sum(
                            np.isnan(log_sigmas)) > 0:
                        means, log_sigmas = means_old, log_sigmas_old
                        learning_rate = learning_rate * .9
                        n = 1.
                    self.F[f] = -loss
                    stop = time.clock()
                    self.time[f] = stop - start
                    f += 1
                if e % 1 == 0:
                    pstate = 'Epoch = ' + "{0:0=3d}".format(
                        e) + ': Loss = {0:.3f}'.format(self.F[f - 1])
                    print(pstate, end='\r')
                    sys.stdout.flush()
        self.params = {
            'means': unflatten(means),
            'log_sigmas': unflatten(log_sigmas)
        }