def noise(self, noise): """ Enables or disables additive Gaussian noise. Disabling the noise will delete the stored noise covariance matrix. @type noise: ndarray/bool @param noise: the covariance matrix of the assumed noise or True/False """ if isinstance(noise, ndarray): if not self._noise: self._noise = True # add Gaussian subspace representing noise self.subspaces.insert(0, GSM(self.num_visibles, 1)) self.A = hstack([eye(self.num_visibles) / 20., self.A]) self.num_hiddens += self.num_visibles self.A[:, :self.num_visibles] = sqrtm(noise) else: if self._noise != noise: self._noise = noise if self._noise: # add Gaussian subspace representing noise self.subspaces.insert(0, GSM(self.num_visibles, 1)) self.A = hstack([eye(self.num_visibles) / 20., self.A]) self.num_hiddens += self.num_visibles else: # remove subspace representing noise self.subspaces.remove(0) self.A = self.A[:, self.num_visibles:] self.num_hiddens -= self.num_visibles
def build_gsm(info, channels=None, conv_bias=False, num_segments=3): id = info['id'] attr = info['attrs'] if 'attrs' in info else list() out, op, in_vars = parse_expr(info['expr']) out_channels = attr['fPlane'] gsm = GSM(out_channels, num_segments=num_segments) return id, out[0], gsm, out_channels, in_vars[0]
def init_gsm(self): if not os.path.exists(options.gsm_port): return self.gsm = GSM(options.gsm_port, options.gsm_baudrate, options.gsm_pin) init_handles(self.gsm) self.gsm.run() self.gsm.process_stored_sms()
class Application(tornado.web.Application): def __init__(self): settings = dict( debug=True, static_path=make_path("static"), template_path=make_path("template") ) tornado.web.Application.__init__(self, controllers.routes, **settings) self.db = Session self.init_gsm() def init_gsm(self): if not os.path.exists(options.gsm_port): return self.gsm = GSM(options.gsm_port, options.gsm_baudrate, options.gsm_pin) init_handles(self.gsm) self.gsm.run() self.gsm.process_stored_sms()
def create_dadgm_gradients(self, loss, deterministic=False): grads = GSM.create_gradients(self, loss, deterministic) # combine and clip gradients clip_grad, max_norm = 1, 5 mgrads = total_norm_constraint(grads, max_norm=max_norm) cgrads = [T.clip(g, -clip_grad, clip_grad) for g in mgrads] return cgrads
def main(): import gsm.GSM as GSM gsm = GSM("/dev/ttyUSB0", timeout=0.5) gsm.begin() # Main loop to detect cards and read a block. print('Waiting for MiFare card...') while True: # Check if a card is available to read. uid = rfid.read_passive_target() # Try again if no card is available. if uid is None: continue if binascii.hexlify(uid) == "0d345b95": GPIO.output(LED, True) # Turn LED on sleep(1) GPIO.output(LED, False) # Turn LED on gsm.send_sms(18433033157, "You have mail.") sys.exit(0) # Exit the program
def __init__(self, num_visibles, num_hiddens=None, ssize=1, num_scales=10, noise=False): """ @type num_visibles: integer @param num_visibles: data dimensionality @type num_hiddens: integer @param num_hiddens: number of hidden units @type ssize: integer @param ssize: subspace dimensionality @type num_scales: integer @param num_scales: number of scales of each subspace GSM @type noise: bool/ndarray @param noise: add additional hidden units for noise """ if num_hiddens is None: num_hiddens = num_visibles self.dim = num_visibles self.num_visibles = num_visibles self.num_hiddens = num_hiddens # random linear feature self.A = randn(self.num_visibles, self.num_hiddens) / 10. # subspace densities self.subspaces = [ GSM(ssize, num_scales) for _ in range(int(num_hiddens) / int(ssize))] if mod(num_hiddens, ssize) > 0: self.subspaces.append(GSM(mod(num_hiddens, ssize))) self._noise = False self.noise = noise
def initialize(self, X=None, method='data'): """ Initializes parameter values with more sensible values. @type X: array_like @param X: data points stored in columns @type method: string @param method: type of initialization ('data', 'gabor' or 'random') """ if self.noise: L = self.A[:, :self.num_visibles] if method.lower() == 'data': # initialize features with data points if X is not None: if X.shape[1] < self.num_hiddens: raise ValueError('Number of data points to small.') else: # whitening matrix val, vec = eig(cov(X)) # whiten data X_ = dot(dot(diag(1. / sqrt(val)), vec.T), X) # sort by norm in whitened space indices = argsort(sqrt(sum(square(X_), 0)))[::-1] # pick 25% largest data points and normalize X_ = X_[:, indices[:max([X.shape[1] / 4, self.num_hiddens])]] X_ = X_ / sqrt(sum(square(X_), 0)) # pick first basis vector at random A = X_[:, [randint(X_.shape[1])]] for _ in range(self.num_hiddens - 1): # pick vector with large angle to all other vectors A = hstack([ A, X_[:, [argmin(max(abs(dot(A.T, X_)), 0))]]]) # orthogonalize and unwhiten A = dot(sqrtmi(dot(A, A.T)), A) A = dot(dot(vec, diag(sqrt(val))), A) self.A = A elif method.lower() == 'gabor': # initialize features with Gabor filters if self.subspaces[0].dim > 1 and not mod(self.num_hiddens, 2): for i in range(self.num_hiddens / 2): G = gaborf(self.num_visibles) self.A[:, 2 * i] = real(G) self.A[:, 2 * i + 1] = imag(G) else: for i in range(len(self.subspaces)): self.A[:, i] = gaborf(self.num_visibles, complex=False) elif method.lower() == 'random': # initialize with Gaussian white noise self.A = randn(num_visibles, num_hiddens) elif method.lower() in ['laplace', 'student', 'cauchy', 'exponpow']: if method.lower() == 'laplace': # approximate multivariate Laplace with GSM samples = randn(self.subspaces[0].dim, 10000) samples = samples / sqrt(sum(square(samples), 0)) samples = laplace.rvs(size=[1, 10000]) * samples elif method.lower() == 'student': samples = randn(self.subspaces[0].dim, 50000) samples = samples / sqrt(sum(square(samples), 0)) samples = t.rvs(2., size=[1, 50000]) * samples elif method.lower() == 'exponpow': exponent = 0.8 samples = randn(self.subspaces[0].dim, 200000) samples = samples / sqrt(sum(square(samples), 0)) samples = gamma(1. / exponent, 1., (1, 200000))**(1. / exponent) * samples else: samples = randn(self.subspaces[0].dim, 100000) samples = samples / sqrt(sum(square(samples), 0)) samples = cauchy.rvs(size=[1, 100000]) * samples if self.noise: # ignore first subspace gsm = GSM(self.subspaces[1].dim, self.subspaces[1].num_scales) gsm.train(samples, max_iter=200, tol=1e-8) for m in self.subspaces[1:]: m.scales = gsm.scales.copy() else: # approximate distribution with GSM gsm = GSM(self.subspaces[0].dim, self.subspaces[0].num_scales) gsm.train(samples, max_iter=200, tol=1e-8) for m in self.subspaces: m.scales = gsm.scales.copy() else: raise ValueError('Unknown initialization method \'{0}\'.'.format(method)) if self.noise: # don't initialize noise covariance self.A[:, :self.num_visibles] = L
def train_subspaces(self, Y, **kwargs): """ Improves likelihood through spliting and merging of subspaces. This function may rearrange the order of subspaces and corresponding linear features. @type max_merge: integer @param max_merge: maximum number of subspaces merged @type max_iter: integer @param max_iter: maximum number of iterations for training joint L{GSM} @type Y: array_like @param Y: hidden states @rtype: ndarray @return: data rearranged so that it aligns with subspaces """ max_merge = kwargs.get('max_merge', self.num_hiddens) max_iter = kwargs.get('max_iter', 10) if len(self.subspaces) > 1: # compute indices for each subspace indices = [] index = 0 for gsm in self.subspaces: indices.append(arange(gsm.dim) + index) index += gsm.dim # compute subspace energies energies = [] for i, gsm in enumerate(self.subspaces): energies.append(sqrt(sum(square(Y[indices[i]]), 0))) energies = vstack(energies) # determine correlation of subspace energies corr = corrcoef(energies) corr = corr - triu(corr) if self._noise: # noise subspace shouldn't get merged corr[:, 0] = -1. for _ in range(max_merge): # pick subspaces with maximal correlation col = argmax(max(corr, 0)) row = argmax(corr[:, col]) if corr[row, col] <= 0.: break corr[row, col] = 0. # extract data from subspaces Y_row = Y[indices[row]] Y_col = Y[indices[col]] Y_jnt = vstack([Y_row, Y_col]) # train joint model gsm = GSM(Y_jnt.shape[0], self.subspaces[col].num_scales) gsm.scales = self.subspaces[col].scales.copy() gsm.train(Y_jnt, max_iter=max_iter) # log-likelihood improvement mi = mean(gsm.loglikelihood(Y_jnt) \ - self.subspaces[col].loglikelihood(Y_col) \ - self.subspaces[row].loglikelihood(Y_row)) if mi > 0: self.subspaces.append(gsm) # rearrange linear filters subspace_indices = concatenate([indices[row], indices[col]]) self.A = hstack([self.A, self.A[:, subspace_indices]]) self.A = delete(self.A, subspace_indices, 1) # rearrange data Y = vstack([Y, Y[subspace_indices, :]]) Y = delete(Y, subspace_indices, 0) # remove subspaces from correlation matrix corr = delete(corr, [row, col], 0) corr = delete(corr, [row, col], 1) # update indices for k in range(row + 1, len(indices)): indices[k] -= self.subspaces[row].dim for k in range(col + 1, len(indices)): indices[k] -= self.subspaces[col].dim if row < col: del self.subspaces[col] del self.subspaces[row] del indices[col] del indices[row] else: del self.subspaces[row] del self.subspaces[col] del indices[row] del indices[col] if Distribution.VERBOSITY > 0: print 'Merged subspaces.' if corr.size == 0: break return Y
def __init__(self, dim, num_components, num_scales): Mixture.__init__(self) # initialize components for i in range(num_components): self.add_component(GSM(dim, num_scales))
def create_dadgm_updates(self, grads, params, alpha, opt_alg, opt_params): return GSM.create_updates(self, grads, params, alpha, opt_alg, opt_params)