def build_weights_dict(self, input_shape): # Input shape is anything (all flattened) input_size = np.prod(input_shape, dtype=int) self.parser = WeightsParser() self.parser.add_weights('params', (input_size, self.size)) self.parser.add_weights('biases', (self.size,)) return self.parser.N, (self.size,)
def load_mnist(): print("Loading training data...") import imp, urllib partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:]))) one_hot = lambda x, K: np.array(x[:,None] == np.arange(K)[None, :], dtype=int) source, _ = urllib.urlretrieve( 'https://raw.githubusercontent.com/HIPS/Kayak/master/examples/data.py') data = imp.load_source('data', source).mnist() train_images, train_labels, test_images, test_labels = data train_images = partial_flatten(train_images) / 255.0 test_images = partial_flatten(test_images) / 255.0 train_labels = one_hot(train_labels, 10) test_labels = one_hot(test_labels, 10) N_data = train_images.shape[0] return N_data, train_images, train_labels, test_images, test_labels
def add_weights(self, name, shape): start = self.N self.N += np.prod(shape) self.idxs_and_shapes[name] = (slice(start, self.N), shape)
def forward_pass(self, inputs, param_vector): params = self.parser.get(param_vector, 'params') biases = self.parser.get(param_vector, 'biases') if inputs.ndim > 2: inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:]))) return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
def add_shape(self, name, shape): start = self.num_weights self.num_weights += np.prod(shape) self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)