def classification_data(seed=0):
    """
    Load 2D data. 2 Classes. Class labels generated from a 2-2-1 network.
    :param seed: random number seed
    :return:
    """
    npr.seed(seed)
    data = np.load("./data/2D_toy_data_linear.npz")
    x = data['x']
    y = data['y']
    ids = np.arange(x.shape[0])
    npr.shuffle(ids)
    # 75/25 split
    num_train = int(np.round(0.01 * x.shape[0]))
    x_train = x[ids[:num_train]]
    y_train = y[ids[:num_train]]
    x_test = x[ids[num_train:]]
    y_test = y[ids[num_train:]]
    mu = np.mean(x_train, axis=0)
    std = np.std(x_train, axis=0)
    x_train = (x_train - mu) / std
    x_test = (x_test - mu) / std
    train_stats = dict()
    train_stats['mu'] = mu
    train_stats['sigma'] = std
    return x_train, y_train, x_test, y_test, train_stats
Exemple #2
0
    def __init__(self, K, D, M=0, L=1.0, J=2):
        """
        K: number of discrete states (integer)
        D: data dimension (unused)
        M: input dimension (unused)
        L: length scale (positive real)
        J: latent embedding dimension (integer)
        """
        super(DistanceDependentMazeTransitions, self).__init__(K, D, M=M)
        self.L = L

        # random initialization
        self.ell_labels = np.arange(K)
        npr.shuffle(self.ell_labels)

        self.log_p = np.ones(K)

        ### Initialize with a known maze environment
        ell = np.zeros((K, J))
        ell = make_maze(ell)
        ell_adj = make_adjacency(ell)
        G, ell_dist = make_graph(ell, ell_adj)

        ell_dist_sum = np.sum(ell_dist, axis=1)
        # normalized empirical distances so that comparable with diagonal log_p (~1)
        dist_norm = ell_dist / ell_dist_sum[None, :]
        self.dist_norm = dist_norm
Exemple #3
0
        #print('elbo ', lower_bound)
        return -(lower_bound + entropy)

    gradient = grad(variational_objective)

    return variational_objective, gradient, unpack_params


if __name__ == '__main__':
    # -------------- LOADING DATASET ------------------------
    # load the images
    npr.seed(0)
    _, train_images, train_labels, test_images, test_labels = load_mnist()

    rand_idx = np.arange(train_images.shape[0])
    npr.shuffle(rand_idx)

    train_images = train_images[rand_idx]
    train_labels = train_labels[rand_idx]

    # UNIFORM CLASS SAMPLE CODE
    # uniformly sample each class
    cls_labels = train_labels.argmax(axis=1)
    cls_images = [train_images[cls_labels == i] for i in range(10)]
    rand_cls = np.int32(npr.random(30) / 0.1)
    rand_idx = [npr.randint(cls_images[cls].shape[0]) for cls in rand_cls]

    train_images = np.vstack(
        [cls_images[rand_cls[i]][rand_idx[i]] for i in range(30)])
    train_labels = np.vstack([
        train_labels[cls_labels == rand_cls[i]][rand_idx[i]] for i in range(30)
Exemple #4
0
 pcdata=np.reshape(pcdata,(numtrials,numtimepoints,len(areas)))
 print(pcdata.shape)
 
 # Set the parameters of the HMM
 T = pcdata.shape[1]    # number of time bins
 K = K       # number of discrete states
 N = pcdata.shape[2]      # number of observed dimensions
 
 print('T:',T,', K:',K,', N:',N)
 
 # Put data in list format to feed into the HMM
 trialdata=[pcdata[i,:,:] for i in np.arange(pcdata.shape[0])]
 
 # shuffle the trials
 sequence = [i for i in range(len(trialdata))]
 npr.shuffle(sequence)
 sequence=np.array(sequence)
 
 # Divide into training and testing (I didn't really end up using the testing - but can check log likelihoods to decide K)
 traintrials=[trialdata[j] for j in sequence[:int(np.ceil(0.8*len(trialdata)))]]
 testtrials=[trialdata[j] for j in sequence[int(np.ceil(0.8*len(trialdata))):]]
 print(len(traintrials)); print(len(testtrials))              
 
 # Run the ARHMM
 arhmm = HMM(K,N, observations="ar",
             transitions="sticky",
             transition_kwargs=dict(kappa=kappa))
 
 arhmm_em_lls = arhmm.fit(traintrials, method="em", num_em_iters=numiters)
 
 # Get the inferred states for train and test trials