def __call__(self, w, x=None, add_noise=False): h = x batch_size, _ = w.shape if self.upsample: assert h is not None if self.blur_k is None: k = np.asarray([1, 2, 1]).astype('f') k = k[:, None] * k[None, :] k = k / np.sum(k) self.blur_k = self.xp.asarray(k)[None, None, :] if self.enable_blur: h = blur(upscale2x(h), self.blur_k) else: h = upscale2x(h) h = self.c0(h) else: h = F.broadcast_to(self.W, (batch_size, self.ch_in, 4, 4)) # h should be (batch, ch, size, size) if add_noise: h = self.n0(h) h = F.leaky_relu(self.b0(h)) h = self.s0(w, h) h = self.c1(h) if add_noise: h = self.n1(h) h = F.leaky_relu(self.b1(h)) h = self.s1(w, h) return h
def forward(self, x, add_noise=False): h = x if self.blur_k is None: k = np.asarray([1, 2, 1]).astype('f') k = k[:, None] * k[None, :] k = k / np.sum(k) self.blur_k = self.xp.asarray(k)[None, None, :] if self.enable_blur: h = blur(upscale2x(h), self.blur_k) else: h = upscale2x(h) h = self.c0(h) # h should be (batch, ch, size, size) if add_noise: h = self.n0(h) h = F.leaky_relu(self.b0(h)) h = F.normalize(h) h = self.c1(h) if add_noise: h = self.n1(h) h = F.leaky_relu(self.b1(h)) h = F.normalize(h) return h
def __call__(self, h, w, stage): h1 = F.leaky_relu(self.c0(h)) h1 = self.s0(w, h1) h2 = F.leaky_relu(self.c1(h1)) h2 = self.s1(w, h2) h3 = F.leaky_relu(self.c4(h2)) h3 = self.s4(w, h3) h3 = upscale2x(h3) h3 = F.leaky_relu(self.c5(h3)) h3 = F.concat([self.s5(w, h3), h1]) h3 = upscale2x(h3) h3 = F.leaky_relu(self.c6(h3)) h3 = F.concat([self.s6(w, h3), h]) h = self.c7(h3) return h
def __call__(self, w, stage, add_noise=True, w2=None): ''' for alpha in [0, 1), and 2*k+2 + alpha < self.max_stage (-1 <= k <= ...): stage 0 + alpha : z -> block[0] -> out[0] * 1 stage 2*k+1 + alpha : z -> ... -> block[k] -> (up -> out[k]) * (1 - alpha) .................... -> (block[k+1] -> out[k+1]) * (alpha) stage 2*k+2 + alpha : z -> ............... -> (block[k+1] -> out[k+1]) * 1 over flow stages continues. ''' stage = min(stage, self.max_stage - 1e-8) alpha = stage - math.floor(stage) stage = math.floor(stage) h = None if stage % 2 == 0: k = (stage - 2) // 2 # Enable Style Mixing: if w2 is not None and k >= 0: lim = np.random.randint(1, k+2) else: lim = k+2 for i in range(0, (k + 1) + 1): # 0 .. k+1 if i == lim: w = w2 h = self.blocks[i](w, x=h, add_noise=add_noise) h = self.outs[k + 1](h) else: k = (stage - 1) // 2 if w2 is not None and k >= 1: lim = np.random.randint(1, k+1) else: lim = k+1 for i in range(0, k + 1): # 0 .. k if i == lim: w = w2 h = self.blocks[i](w, x=h, add_noise=add_noise) h_0 = self.outs[k](upscale2x(h)) h_1 = self.outs[k + 1](self.blocks[k + 1](w, x=h, add_noise=add_noise)) assert 0. <= alpha < 1. h = (1.0 - alpha) * h_0 + alpha * h_1 if chainer.configuration.config.train: return h else: min_sample_image_size = 64 if h.data.shape[2] < min_sample_image_size: # too small scale = int(min_sample_image_size // h.data.shape[2]) return F.unpooling_2d(h, scale, scale, 0, outsize=(min_sample_image_size, min_sample_image_size)) else: return h
def forward(self, z, stage, theta=None, style_mixing_rate=None, add_noise=True, return_feature=False): ''' for alpha in [0, 1), and 2*k+2 + alpha < self.max_stage (-1 <= k <= ...): stage 0 + alpha : z -> block[0] -> out[0] * 1 stage 2*k+1 + alpha : z -> ... -> block[k] -> (up -> out[k]) * (1 - alpha) .................... -> (block[k+1] -> out[k+1]) * (alpha) stage 2*k+2 + alpha : z -> ............... -> (block[k+1] -> out[k+1]) * 1 over flow stages continues. ''' # theta: (batchsize, ) # if self.rgbd: add_noise = False stage = min(stage, self.max_stage - 1e-8) alpha = stage - math.floor(stage) stage = math.floor(stage) if self.rgbd and theta is None: assert False, "theta is None" if self.rgbd: h = F.concat([z, theta * 10]) else: h = z h = self.linear(h).reshape(z.shape[0], self.ch, 4, 4) if stage % 2 == 0: k = (stage - 2) // 2 for i in range(0, (k + 1)): # 0 .. k+1 h = self.blocks[i](x=h, add_noise=add_noise) if return_feature and i == 2: feat = h h = self.outs[k](h) else: k = (stage - 1) // 2 for i in range(0, k): # 0 .. k h = self.blocks[i](x=h, add_noise=add_noise) if return_feature and i == 2: feat = h h_0 = upscale2x(self.outs[k - 1](h)) h_1 = self.outs[k](self.blocks[k](x=h, add_noise=add_noise)) assert 0. <= alpha < 1. h = (1.0 - alpha) * h_0 + alpha * h_1 if self.rgbd: # inverse depth depth = 1 / (F.softplus(h[:, -1:]) + 1e-4) # print(depth.array.mean(), depth.array.std()) h = h[:, :3] h = F.concat([h, depth]) if chainer.configuration.config.train: if return_feature: return h, feat else: return h else: min_sample_image_size = 64 if h.data.shape[2] < min_sample_image_size: # too small scale = int(min_sample_image_size // h.data.shape[2]) return F.unpooling_2d(h, scale, scale, 0, outsize=(min_sample_image_size, min_sample_image_size)) else: return h
def forward(self, w, w2, stage, theta=None, add_noise=True, return_feature=False): ''' for alpha in [0, 1), and 2*k+2 + alpha < self.max_stage (-1 <= k <= ...): stage 0 + alpha : z -> block[0] -> out[0] * 1 stage 2*k+1 + alpha : z -> ... -> block[k] -> (up -> out[k]) * (1 - alpha) .................... -> (block[k+1] -> out[k+1]) * (alpha) stage 2*k+2 + alpha : z -> ............... -> (block[k+1] -> out[k+1]) * 1 over flow stages continues. ''' # theta: (batchsize, ) # if self.rgbd: add_noise = False stage = min(stage, self.max_stage - 1e-8) alpha = stage - math.floor(stage) stage = math.floor(stage) if self.rgbd and theta is None: assert False, "theta is None" h = None if stage % 2 == 0: k = (stage - 2) // 2 for i in range(0, (k + 1) + 1): # 0 .. k+1 if i == 3: # resolution 32~ w = w2 if self.rgbd and i < 2: if self.rotate_conv_input: _w = self.w_from_theta(theta) else: _w = self.rotate_w(w, theta) else: _w = w h = self.blocks[i](_w, x=h, add_noise=add_noise) if return_feature and i == 3: feat = h h = self.outs[k + 1](h) else: k = (stage - 1) // 2 for i in range(0, k + 1): # 0 .. k if i == 3: w = w2 if self.rgbd and i < 2: if self.rotate_conv_input: _w = self.w_from_theta(theta) else: _w = self.rotate_w(w, theta) else: _w = w h = self.blocks[i](_w, x=h, add_noise=add_noise) if return_feature and i == 3: feat = h h_0 = upscale2x(self.outs[k](h)) h_1 = self.outs[k + 1](self.blocks[k + 1](w, x=h, add_noise=add_noise)) assert 0. <= alpha < 1. h = (1.0 - alpha) * h_0 + alpha * h_1 if self.rgbd: # inverse depth depth = 1 / (F.softplus(h[:, -1:]) + 1e-4) # print(depth.array.mean(), depth.array.std()) h = h[:, :3] h = F.concat([h, depth]) if chainer.configuration.config.train: if return_feature: return h, feat else: return h else: min_sample_image_size = 64 if h.data.shape[2] < min_sample_image_size: # too small scale = int(min_sample_image_size // h.data.shape[2]) return F.unpooling_2d(h, scale, scale, 0, outsize=(min_sample_image_size, min_sample_image_size)) else: return h