Beispiel #1
0
    def __init__(self, config_ms, scale):
        super(EDSRLikeEnc, self).__init__()

        self.scale = scale
        self.config_ms = config_ms
        Cf = config_ms.Cf
        kernel_size = config_ms.kernel_size
        C, self.L = config_ms.q.C, config_ms.q.L

        n_resblock = config_ms.enc.num_blocks

        # Downsampling
        self.down = conv(Cf, Cf, kernel_size=5, stride=2)

        # Body
        m_body = [
            edsr.ResBlock(conv, Cf, kernel_size, act=nn.ReLU(True))
            for _ in range(n_resblock)
        ]
        m_body.append(conv(Cf, Cf, kernel_size))
        self.body = nn.Sequential(*m_body)

        # to Quantizer
        to_q = [conv(Cf, C, 1)]
        if self.training:
            to_q.append(
                # start scale from 1, as 0 is RGB
                vis.histogram_plot.HistogramPlot(
                    'train',
                    'histo/enc_{}_after_1x1'.format(scale + 1),
                    buffer_size=10,
                    num_inputs_to_buffer=1,
                    per_channel=False))
        self.to_q = nn.Sequential(*to_q)

        # We assume q.L levels, evenly distributed between q.levels_range[0] and q.levels_range[1]
        # In theory, the levels could be learned. But in this code, they are assumed to be fixed.
        levels_first, levels_last = config_ms.q.levels_range
        # Wrapping this in a nn.Parameter ensures it is copied to gpu when .to('cuda') is called
        self.levels = nn.Parameter(torch.linspace(levels_first, levels_last,
                                                  self.L),
                                   requires_grad=False)
        self._extra_repr = 'Levels={}'.format(','.join(
            map('{:.1f}'.format, list(self.levels))))
        self.q = Quantizer(self.levels, config_ms.q.sigma)
Beispiel #2
0
    def __init__(self, config_ms, scale):
        super(EDSRDec, self).__init__()

        self.scale = scale
        n_resblock = config_ms.dec.num_blocks

        Cf = config_ms.Cf
        kernel_size = config_ms.kernel_size
        C = config_ms.q.C

        after_q_kernel = 1
        self.head = conv(C, config_ms.Cf, after_q_kernel)
        m_body = [
            edsr.ResBlock(conv, Cf, kernel_size, act=nn.ReLU(True))
            for _ in range(n_resblock)
        ]

        m_body.append(conv(Cf, Cf, kernel_size))
        self.body = nn.Sequential(*m_body)
        self.tail = edsr.Upsampler(conv, 2, Cf, act=False)
Beispiel #3
0
    def __init__(self, config_ms, scale, C=3):
        super(DeepProbabilityClassifier, self).__init__()

        Cf = config_ms.Cf
        kernel_size = 3

        m_body = [
            edsr.ResBlock(conv, Cf, kernel_size, act=act.make(Cf, inverse=True),
                          res_scale=global_config.get('res_scale', 1))
            for _ in range(3)
        ]
        m_body.append(conv(Cf, Cf, kernel_size))

        self.body = nn.Sequential(*m_body)

        K = config_ms.prob.K

        # For RGB, generate the outputs specified by config_ms.prob.rgb_outputs
        # otherwise, generate means, sigmas, pis
        tail_outputs = (_parse_outputs_flag(config_ms.prob.rgb_outputs) if scale == 0
                        else RequiredOutputs(True, True, True, lambdas=False))

        self.tail = ProbClfTail(Cf, C, K, outputs=tail_outputs)
Beispiel #4
0
 def make_res_block(_act, _use_norm=True):
     return edsr.ResBlock(
         pe.default_conv, Cf, kernel_size, act=_act,
         norm_cls=norm_cls if _use_norm else None,
         res_scale=global_config.get('res_scale', 0.1))