def __init__(self, D, H, Hprime=0, gamma=0, sigma_sq_type='scalar', to_learn=['W', 'pi', 'mu', 'sigma_sq', 'psi_sq'], comm=MPI.COMM_WORLD): CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm) # Noise Policy tol = 1e-5 self.noise_policy = { 'W': (-np.inf, +np.inf, False), 'pi': (tol, 1. - tol, False), 'sigma_sq': (0., +np.inf, False), 'mu': (-np.inf, +np.inf, False), 'psi_sq': (0., +np.inf, False) } if gamma <= 0 or gamma > H: self.gamma = self.H if Hprime <= 0 or Hprime > H: self.Hprime = self.H elif Hprime < gamma: self.gamma = self.Hprime self.sigma_sq_type = sigma_sq_type self.dtype_precision = np.float64
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD): CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm)
def __init__(self, D, H, Hprime, gamma,states=np.array([-1.,0.,1.]), to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD): CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm) self.comm = comm if not type(states) == np.ndarray: raise TypeError("DSC: states must be of type numpy.ndarray") if Hprime > H: raise Exception("Hprime must be less or equal to H") if gamma > Hprime: raise Exception("gamma must be less or equal to Hprime") self.to_learn = to_learn # Model meta-parameters self.D = D self.H = H self.Hprime = Hprime self.gamma = gamma self.states = states self.K = self.states.shape[0] self.K_0 = int(np.argwhere(states == 0.)) # some sanity checks assert Hprime <= H assert gamma <= Hprime # Noise Policy tol = 1e-5 self.noise_policy = { 'W': (-np.inf, +np.inf, False), 'pi': (tol, 1. - tol, False), 'sigma': (0., +np.inf, False) } # Generate state-space list ss = np.empty((0, self.H), dtype=np.int8) for i in range(self.K): if (i == self.K_0): continue temp = np.eye(self.H, dtype=np.int8) * states[i] ss = np.concatenate((ss, temp)) # all hidden vectors with a single active cause - for ternary 2*HxH self.single_state_matrix = ss[np.sum(np.abs(np.sign(ss)), 1) == 1] # all hidden vectors with more than one active cause self.state_matrix = get_states( self.states, self.Hprime, self.gamma) # number of states with more than one active cause self.no_states = self.state_matrix.shape[0] # self.state_abs = np.empty((self.K, self.no_states)) for i in range(self.K): self.state_abs[i, :] = ( self.state_matrix == self.states[i]).sum(axis=1) self.state_abs[self.K_0, :] = self.H - \ self.state_abs.sum(0) + self.state_abs[self.K_0, :]
def __init__(self, D, H, Hprime, gamma, to_learn=['W', 'pi', 'sigma'], comm=MPI.COMM_WORLD): """ MCA-ET init method. Takes data dimension *D*, number of hidden causes *H*, and ET approximation parameters *Hprime* and *gamma*. Optional list of parameters *to_learn* and MPI *comm* object. """ CAModel.__init__(self, D, H, Hprime, gamma, to_learn, comm) # self.rho_temp_bound = 1.05 # for rho: never use a T smaller than this self.W_tol = 1e-4 # for W: ensure W[W<W_tol] = W_tol # Noise Policy W_tol = self.W_tol self.noise_policy = { 'W' : ( W_tol, +np.inf, True ), 'pi' : ( W_tol, 1-W_tol, False ), 'sigma': ( W_tol, +np.inf, False ) }
def check_params(self, model_params): """ Sanity-check the given model parameters. Raises an exception if something is severely wrong. """ # XXX model_params = CAModel.check_params(self, model_params) # Obey W_tol model_params['W'] = np.maximum(model_params['W'], self.W_tol) return model_params
def standard_init(self, my_data): """ Standard Initial of the model parameters. """ comm = self.comm temp_params = CAModel.standard_init(self, my_data) model_params = {} model_params['W'] = temp_params['W'].copy() # Initial pi pi = comm.bcast(np.random.rand(self.H)) * 0.95 pi[pi < 0.05] = 0.05 model_params['pi'] = pi my_y = my_data['y'] # Calculate averarge W W_mean = parallel.allmean(my_y, axis=0, comm=comm) # shape: (D, ) # Calculate data variance sigma_sq_sq = parallel.allmean((my_y - W_mean)**2, axis=0, comm=comm) # shape: (D, ) # Calculate sigma_sq if self.sigma_sq_type == 'full': model_params['sigma_sq'] = np.diag( np.diag(sigma_sq_sq)) + (0.001 * np.eye(self.D)) elif self.sigma_sq_type == 'diagonal': model_params['sigma_sq'] = sigma_sq_sq + 0.001 else: model_params['sigma_sq'] = np.mean(sigma_sq_sq) + 0.001 if 'mu' in self.to_learn: mu = comm.bcast(np.random.normal(0, 1, [self.H])) else: mu = np.zeros(self.H) model_params['mu'] = mu if 'psi_sq' in self.to_learn: psi_sq_diag = comm.bcast(np.random.rand(self.H)) * 2 psi_sq_diag[psi_sq_diag < 0.05] = 0.05 psi_sq = np.diag(psi_sq_diag) else: psi_sq = np.eye(self.H) model_params['psi_sq'] = psi_sq model_params = comm.bcast(model_params) return model_params