def get_output(self, input): alpha = T.exp(self.alpha_).dimshuffle('x', 0, 'x', 'x') lam = T.exp(self.lambda_).dimshuffle('x', 0, 'x', 'x') down = self.__downsampling(input) down_up = utils.unpool(down, self.ws) W = alpha + self.__penalty(input - down_up, lam) output = pool(input * W, self.ws, mode='average_exc_pad') weight = 1. / pool(W, self.ws, mode='average_exc_pad') return output * weight
def mean_interp_pad(x, padding): padding = (padding, padding) if isinstance(padding, int) else tuple(padding) size = tuple(np.array(padding) * 2 + 1) resize = ((x.shape[2] + 2 * padding[0], x.shape[2] - 2 * padding[0]), (x.shape[3] + 2 * padding[1], x.shape[3] - 2 * padding[1])) y = pool(x, size, (1, 1), mode='average_exc_pad') z = G.disconnected_grad(nn.utils.frac_bilinear_upsampling(y, resize)) _, _, h, w = z.shape return T.set_subtensor(z[:, :, padding[0]:h - padding[0], padding[1]:w - padding[1]], x)
def __downsampling(self, input): output = pool(input, self.ws, stride=self.ws, mode='average_exc_pad') output = conv(output, self.kern, border_mode='half') return output
def get_output(self, input, *args, **kwargs): return pool(input, self.ws, self.stride, self.mode, self.pad) if self.ignore_border else T.signal.pool.pool_2d( input, self.ws, self.ignore_border, self.stride, self.pad, self.mode)