def numpy_err_input_update(self): """Backpropagate error (will compute err_input). """ if not self.need_err_input: return self.err_input.map_invalidate() self.err_output.map_read() self.weights.map_read() err_output = reshape(self.err_output.mem, [self.err_output.shape[0], self.err_output.sample_size]) err_input = reshape(self.err_input.mem, [self.err_input.shape[0], self.err_input.sample_size]) if self.weights_transposed: numpy.dot(err_output, self.weights.mem.transpose(), err_input) else: numpy.dot(err_output, self.weights.mem, err_input)
def numpy_weights_update(self): self.input.map_read() self.output.map_read() self.err_output.map_write() err_output = reshape(self.err_output.mem, [self.err_output.shape[0], self.err_output.sample_size]) inp = reshape(self.input.mem, [self.input.shape[0], self.input.sample_size]) self.gradient_weights.map_write() if self.weights_transposed: numpy.dot(inp.transpose(), err_output, self.gradient_weights.mem) else: numpy.dot(err_output.transpose(), inp, self.gradient_weights.mem) self.numpy_update("weights")
def numpy_err_input_update(self): """Backpropagate error (will compute err_input). """ if not self.need_err_input: return self.err_input.map_invalidate() self.err_output.map_read() self.weights.map_read() err_output = reshape( self.err_output.mem, [self.err_output.shape[0], self.err_output.sample_size]) err_input = reshape( self.err_input.mem, [self.err_input.shape[0], self.err_input.sample_size]) if self.weights_transposed: numpy.dot(err_output, self.weights.mem.transpose(), err_input) else: numpy.dot(err_output, self.weights.mem, err_input)
def numpy_weights_update(self): self.input.map_read() self.output.map_read() self.err_output.map_write() err_output = reshape( self.err_output.mem, [self.err_output.shape[0], self.err_output.sample_size]) inp = reshape(self.input.mem, [self.input.shape[0], self.input.sample_size]) self.gradient_weights.map_write() if self.weights_transposed: numpy.dot(inp.transpose(), err_output, self.gradient_weights.mem) else: numpy.dot(err_output.transpose(), inp, self.gradient_weights.mem) self.numpy_update('weights')
def numpy_run(self): """Forward propagation from batch on CPU only. """ self.err_input.map_invalidate() self.err_output.map_read() out = reshape(self.err_output.mem, self.output_shape) inp = self.err_input.mem inp[:] = 0 inp[:, self.padding[1]:self.padding[1] + self.output_shape[1], self.padding[0]:self.padding[0] + self.output_shape[2], :] = out
def numpy_apply_exp(self): self.output.map_write() self.max_idx.map_invalidate() out = self.output.mem out = reshape(out, (out.shape[0], out.size // out.shape[0])) for i, sample in enumerate(out): im = sample.argmax() self.max_idx[i] = im m = sample[im] sample -= m numpy.exp(sample, sample) smm = sample.sum() sample /= smm
def numpy_run(self): """Forward propagation from batch on CPU only. """ self.output.map_invalidate() self.input.map_read() self.weights.map_read() self.bias.map_read() mem = numpy.dot(self.input.matrix, self.weights.mem if self.weights_transposed else self.weights.mem.transpose()) if self.include_bias: mem += self.bias.mem reshape(self.output.mem, mem.shape)[:] = mem[:]
def numpy_run(self): """Forward propagation from batch on CPU only. """ self.output.map_invalidate() self.input.map_read() self.weights.map_read() self.bias.map_read() mem = numpy.dot( self.input.matrix, self.weights.mem if self.weights_transposed else self.weights.mem.transpose()) if self.include_bias: mem += self.bias.mem reshape(self.output.mem, mem.shape)[:] = mem[:]