def batches(self): x = ca.empty(self.x_shape, dtype=self.x.dtype) for start, stop in self._batch_slices(): if stop > start: x_np = self.x[start:stop] else: x_np = np.concatenate((self.x[start:], self.x[:stop])) ca.copyto(x, x_np) yield x,
def batches(self): x = ca.empty(self.x_shape, dtype=dp.float_) for start, stop in self._batch_slices(): if stop > start: x_np = self.x[start:stop] else: x_np = np.concatenate((self.x[start:], self.x[:stop])) if random.randint(0, 1) == 0: x_np = x_np[:, :, :, ::-1] x_np = img_transform(x_np, to_bc01=False) x_np = np.ascontiguousarray(x_np) ca.copyto(x, x_np) yield {'x': x}
def batches(self): x = ca.empty(self.x_shape, dtype=dp.float_) for start, stop in self._batch_slices(): if stop > start: x_np = self.x[start:stop] else: x_np = np.concatenate((self.x[start:], self.x[:stop])) if random.randint(0, 1) == 0: x_np = x_np[:, :, :, ::-1] x_np = img_transform(x_np, to_bc01=False) x_np = np.ascontiguousarray(x_np) ca.copyto(x, x_np) yield x,
def batches(self): x1 = ca.empty(self.x_shape, dtype=self.x.dtype) x2 = ca.empty_like(x1) for start, stop in self._batch_slices(): if stop > start: x1_np = self.x[start:stop] x2_np = self.x2[start:stop] else: x1_np = np.concatenate((self.x[start:], self.x[:stop])) x2_np = np.concatenate((self.x[start:], self.x[:stop])) ca.copyto(x1, x1_np) ca.copyto(x2, x2_np) yield x1, x2
def _update(self): # Forward propagation next_x = self.x.array x_feats = [None]*len(self.layers) x_grams = [None]*len(self.layers) for l, layer in enumerate(self.layers): next_x = layer.fprop(next_x) if self.subject_weights[l] > 0: x_feats[l] = next_x if self.style_weights[l] > 0: x_feats[l] = next_x x_grams[l] = gram_matrix(next_x) # Backward propagation grad = ca.zeros_like(next_x) loss = ca.zeros(1) for l, layer in reversed(list(enumerate(self.layers))): if self.subject_weights[l] > 0: diff = x_feats[l] - self.subject_feats[l] norm = ca.sum(ca.fabs(diff)) + 1e-8 weight = float(self.subject_weights[l]) / norm grad += diff * weight loss += 0.5*weight*ca.sum(diff**2) if self.style_weights[l] > 0: diff = x_grams[l] - self.style_grams[l] n_channels = diff.shape[0] x_feat = ca.reshape(x_feats[l], (n_channels, -1)) style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape) norm = ca.sum(ca.fabs(style_grad)) weight = float(self.style_weights[l]) / norm style_grad *= weight grad += style_grad loss += 0.25*weight*ca.sum(diff**2) grad = layer.bprop(grad) if self.tv_weight > 0: x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:]) tv = self.tv_conv.fprop(x, self.tv_kernel) tv *= self.tv_weight grad -= ca.reshape(tv, grad.shape) ca.copyto(self.x.grad_array, grad) return loss
def _update(self): # Forward propagation next_x = self.x.array x_feats = [None] * len(self.layers) for l, layer in enumerate(self.layers): next_x = layer.fprop(next_x) if self.subject_weights[l] > 0 or self.style_weights[l] > 0: x_feats[l] = next_x # Backward propagation grad = ca.zeros_like(next_x) loss = ca.zeros(1) for l, layer in reversed(list(enumerate(self.layers))): if self.subject_weights[l] > 0: diff = x_feats[l] - self.subject_feats[l] norm = ca.sum(ca.fabs(diff)) + 1e-8 weight = float(self.subject_weights[l]) / norm grad += diff * weight loss += 0.5 * weight * ca.sum(diff**2) if self.style_weights[l] > 0: diff = gram_matrix(x_feats[l]) - self.style_grams[l] n_channels = diff.shape[0] x_feat = ca.reshape(x_feats[l], (n_channels, -1)) style_grad = ca.reshape(ca.dot(diff, x_feat), x_feats[l].shape) norm = ca.sum(ca.fabs(style_grad)) weight = float(self.style_weights[l]) / norm style_grad *= weight grad += style_grad loss += 0.25 * weight * ca.sum(diff**2) grad = layer.bprop(grad) if self.tv_weight > 0: x = ca.reshape(self.x.array, (3, 1) + grad.shape[2:]) tv = self.tv_conv.fprop(x, self.tv_kernel) tv *= self.tv_weight grad -= ca.reshape(tv, grad.shape) ca.copyto(self.x.grad_array, grad) return loss
def test_copyto(): a_np = np.random.random(size=(7, 11)) a_ca = ca.array(a_np) b_np = np.zeros_like(a_np) b_ca = np.zeros_like(a_np) ca.copyto(b_np, a_ca) print(np.allclose(a_np, b_np)) ca.copyto(b_ca, a_np) print(np.allclose(np.array(a_ca), np.array(b_ca))) ca.copyto(b_ca, a_ca) print(np.allclose(np.array(a_ca), np.array(b_ca)))
def batches(self): x1 = ca.empty(self.x_shape, dtype=self.x.dtype) x2 = ca.empty_like(x1) y = ca.empty(self.y_shape, dtype=self.y.dtype) for start, stop in self._batch_slices(): if stop > start: x1_np = self.x[start:stop] x2_np = self.x2[start:stop] y_np = self.y[start:stop] else: x1_np = np.concatenate((self.x[start:], self.x[:stop])) x2_np = np.concatenate((self.x[start:], self.x[:stop])) y_np = np.concatenate((self.y[start:], self.y[:stop])) ca.copyto(x1, x1_np) ca.copyto(x2, x2_np) ca.copyto(y, y_np) yield {'x1': x1, 'x2': x2, 'y': y}
def bprop(self): for i, (start, end) in enumerate(self.slices): ca.copyto(self.x.out_grad[start:end, :], self.outputs[i].out_grad)
def bprop(self): for i in range(self.n_sources): ca.copyto(self.inputs[i].out_grad, self.out_grad[i])
def fprop(self): for i in range(self.n_sources): ca.copyto(self.out[i], self.inputs[i].out)
def bprop(self): for i in range(self.n_splits): ca.copyto(self.x.out_grad[i], self.outputs[i].out_grad)
def bprop(self): ca.copyto(self.parameter.grad_array, self.out_grad)
def fprop(self): for i in range(self.n_sources): ca.copyto(self.array[i], self.inputs[i].array)
def bprop(self): ca.copyto(self.x.out_grad, self.outputs[0].out_grad) for i in range(1, self.n_splits): self.x.out_grad += self.outputs[i].out_grad
def bprop(self): ca.copyto(self.x.grad_array, self.outputs[0].grad_array) for i in range(1, self.n_splits): self.x.grad_array += self.outputs[i].grad_array
def bprop(self): for i, (start, end) in enumerate(self.slices): ca.copyto(self.x.grad_array[start:end], self.outputs[i].grad_array)
def bprop(self): for i in range(self.n_sources): ca.copyto(self.inputs[i].grad_array, self.grad_array[i])
def bprop(self): for i in range(self.n_splits): ca.copyto(self.x.grad_array[i], self.outputs[i].grad_array)