def _sample_laddered_layer(lateral, s): s[...,:] += lateral[:,0] s[...,0] = sample_indicator(sigmoid(s[...,0])) for i in range(1, s.shape[-1]): j = min(i, lateral.shape[1]-1) s[...,i] += s[...,i-j:i].dot(lateral[i,j:0:-1]) s[...,i] = sample_indicator(sigmoid(s[...,i])) return s
def sample_generative_dist(self, size = None, all_layers = False, top_units = None): """ Sample the generative distribution. Parameters: ----------- size : int, optional [default None] The number of samples to draw. If None, returns a single sample. all_layers : bool, optional [default False] By default, an array of input unit samples is returned. If 'all_layers` is True, a list of sample arrays for *all* the layers, in top-to-bottom order, is returned. top_units : bit vector, optional By default, the top-level units are sampled from the generative biases. This parameter clamps the top-level units to specific values. Returns: -------- A (list of) 2D sample array(s), where the first dimension indexes the individual samples. See 'all_layers' parameter. """ d = self.G_top if top_units is None else top_units if size is not None: d = np.tile(d, (size,1)) if top_units is None: d = sample_indicator(sigmoid(d)) samples = _sample_factorial_network(self.G, d) return samples if all_layers else samples[-1]
def _sample_boltzmann_layer(group_size, s): # Shortcut for degenerate case. if group_size == 1: return sample_indicator(sigmoid(s, out=s), out=s) s.shape = s.shape[:-1] + (-1, group_size) boltzmann_dist(s, axis=-1, out=s) sample_exclusive_indicators(s, axis=-1, out=s) s.shape = s.shape[:-2] + (-1,) return s
def _sleep(G, G_top, R, rate): # Generate a dream. d = sample_indicator(sigmoid(G_top)) dreams = _sample_factorial_network(G, d) dreams.reverse() # Pass back up through the recognition network and adjust weights. R_probs = _probs_for_factorial_network(R, dreams) for R_weights, inputs, target, recognized, step \ in izip(R, dreams, dreams[1:], R_probs, rate[::-1]): R_weights[:-1] += step * np.outer(inputs, target - recognized) R_weights[-1] += step * (target - recognized)
def _sample_factorial_network(layers, s): samples = [ s ] for L in layers: s = sample_indicator(sigmoid(s.dot(L[:-1]) + L[-1])) samples.append(s) return samples