def forward(self, x): x = functional.reshape(x, shape=(x.shape[0], -1)) maximum = 2**self._num_bits - 1 w = self._w * self._factor w = w.round() w = functional.where(w < maximum, w, maximum) w = functional.where(w > -maximum, w, -maximum) w /= self._factor return functional.dot(x, w) + self._b
def forward(self, x): maximum = 2**self._weight_bits - 1 w = self._k / self._k_max w *= maximum w = w.round() w = functional.where(w > maximum, maximum, w) w = functional.where(w < -maximum, -maximum, w) w /= maximum w += functional.random_normal(size=w.shape, mean=0.0, std=self._noise) w *= self._k_max return functional.conv2d(x, w, self._stride, self._padding) + functional.reshape( self._b, (1, -1, 1, 1))
def forward(self, x): x = functional.reshape(x, shape=(x.shape[0], -1)) maximum = 2**self._weight_bits - 1 w = self._w / self._w_max w *= maximum w = w.round() w = functional.where(w > maximum, maximum, w) w = functional.where(w < -maximum, -maximum, w) w /= maximum w += functional.random_normal(size=w.shape, mean=0.0, std=self._noise) w *= self._w_max return functional.dot(x, w) + self._b
def forward(self, x): x = functional.reshape(x, shape=(x.shape[0], -1)) maximum = 2**self._num_bits - 1 w = self._w * self._factor w = functional.where(w < maximum, w, maximum) w = functional.where(w > -maximum, w, -maximum) w = w.round() w += functional.random_normal(w.shape, 0, self._noise * self._one_cell_factor, w.dtype) w /= self._factor return functional.dot(x, w) + self._b
MaxPool2d([2, 2], 2), Conv2d([50, 20, 5, 5]), ReLU(), MaxPool2d([2, 2], 2), Linear([800, 1250]), ReLU(), Linear([1250, 120]), ReLU(), Linear([120, 10]), Softmax() ], name='mnist-lenet') x = functional.placeholder('x', dims=2) y = functional.placeholder('y', dims=1, dtype='int32') y_ = net.forward(functional.reshape(x, (-1, 1, 28, 28))) loss = CrossEntropy().minimize(y_, OneHot(10).turn(y)) accuracy = Accuracy().measure(y_, y) updates = SGD(learning_rate=0.05, momentum=0.9).updates(net.parameters(), net.differentiate(loss)) print('Begin compile') train_op = compile(inputs=[x, y], outputs=[accuracy], updates=updates) print('Compiled train_op') test_op = compile(inputs=[x, y], outputs=[accuracy]) print('Compiled test_op') batch_size = int(batch)
def forward(self, x): k = functional.where(self._mask, self._k, 0) return functional.conv2d(x, k, self._stride, self._padding) + functional.reshape( self._b, (1, -1, 1, 1))
def forward(self, x): x = functional.reshape(x, shape=(x.shape[0], -1)) w = functional.where(self._mask, self._w, 0) return functional.dot(x, w) + self._b
def forward(self, x): return functional.conv2d(x, self._k, self._stride, self._padding) + functional.reshape(self._b, (1, -1, 1, 1))
def forward(self, x): x = functional.reshape(x, shape=(x.shape[0], -1)) return functional.dot(x, self._w) + self._b