Esempio n. 1
0
    def maybe_init(self, in_channels):
        if self.init:
            return
        elif in_channels is None:
            return

        if DEBUG1:
            print('maybe_init', in_channels)

        pad = self.ksize // 2 if self.padding else 0

        with self.init_scope():
            self.enc = L.Convolution2D(in_channels,
                                       self.out_channels,
                                       ksize=self.ksize,
                                       stride=1,
                                       pad=pad)
            self.dec = L.Deconvolution2D(self.out_channels,
                                         in_channels,
                                         ksize=self.ksize,
                                         stride=1,
                                         pad=pad)

            if self.batch_norm:
                self.bne = L.BatchRenormalization(self.out_channels)
                self.bnd = L.BatchRenormalization(in_channels)
            else:
                # self.bne = L.BatchNormalization(self.out_size)
                # self.bnd = L.BatchNormalization(in_size)
                self.bne = None
                self.bnd = None

        self.in_channels = in_channels
        self.init = True
        self.adjust()
Esempio n. 2
0
    def maybe_init(self, in_size_):
        if self.init:
            return
        elif in_size_ is None:
            return

        if type(in_size_) is tuple:
            in_size = np.prod(in_size_)
            if DEBUG1:
                print('maybe_init', in_size_, '->', in_size)
        else:
            in_size = in_size_
            if DEBUG1:
                print('maybe_init', in_size)

        with self.init_scope():
            self.enc = L.Linear(in_size, self.out_size)
            self.dec = L.Linear(self.out_size, in_size)

            if self.batch_norm:
                self.bne = L.BatchRenormalization(self.out_size)
                self.bnd = L.BatchRenormalization(in_size)
            else:
                # self.bne = L.BatchNormalization(self.out_size)
                # self.bnd = L.BatchNormalization(in_size)
                self.bne = None
                self.bnd = None

        self.in_size = in_size
        self.init = True
        self.adjust(device=self.device)
Esempio n. 3
0
 def __init__(self, channels, ksize, stride, pad):
     super(ResBlock, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(channels, channels, ksize, stride,
                                      pad)
         self.norm1 = L.BatchRenormalization(channels)
         self.conv2 = L.Convolution2D(channels, channels, ksize, stride,
                                      pad)
         self.norm2 = L.BatchRenormalization(channels)
         self.conv3 = L.Convolution2D(channels, channels, ksize, stride,
                                      pad)
         self.norm3 = L.BatchRenormalization(channels)
Esempio n. 4
0
 def setUp(self):
     self.decay = 0.9
     self.size = 3
     self.link = links.BatchRenormalization(self.size, self.decay)
     self.x = numpy.random.uniform(-1, 1, (self.nx, self.size)).astype(
         numpy.float32)
     self.y = numpy.random.uniform(-1, 1, (self.ny, self.size)).astype(
         numpy.float32)
Esempio n. 5
0
 def __init__(self, link_name, finetune, forget=False):
     super(Model, self).__init__()
     with self.init_scope():
         if link_name == 'bn':
             self.link = links.BatchNormalization(3)
         elif link_name == 'brn':
             self.link = links.BatchRenormalization(3)
     self.forget = forget
     self.finetune = finetune
Esempio n. 6
0
    def maybe_init(self, in_channels):
        if self.init:
            return
        elif in_channels is None:
            return

        if DEBUG1:
            print('maybe_init', in_channels)

        pad = self.ksize // 2 if self.padding else 0

        def mk_enc(i):
            ch_in = self.out_channels if i else in_channels
            ch_out = self.out_channels
            return L.Convolution2D(ch_in, ch_out, ksize=self.ksize, pad=pad)

        def mk_dec(i):
            ch_in = self.out_channels
            ch_out = self.out_channels if i else in_channels
            return L.Deconvolution2D(ch_in, ch_out, ksize=self.ksize, pad=pad)

        enc_s = list(map(mk_enc, range(self.n_conv)))
        dec_s = list(map(mk_dec, reversed(range(self.n_conv))))

        self.enc = lambda x: reduce(lambda h, f: f(h), enc_s, x)
        self.dec = lambda x: reduce(lambda h, f: f(h), dec_s, x)

        for i, f in enumerate(enc_s):
            self.add_link(f'enc{i}', f)  # =~ setattr(self, 'enc*', f)
        for i, f in enumerate(dec_s):
            self.add_link(f'dec{i}', f)  # =~ setattr(self, 'dec*', f)

        with self.init_scope():
            if self.batch_norm:
                self.bne = L.BatchRenormalization(self.out_channels)
                self.bnd = L.BatchRenormalization(in_channels)
            else:
                self.bne = None
                self.bnd = None

        self.in_channels = in_channels
        self.init = True
        self.adjust()
Esempio n. 7
0
    def __init__(self):
        super(VGG_ReNorm, self).__init__()
        with self.init_scope():
            # Convolution
            self.conv1_1 = L.Convolution2D(None, 64, (3, 3), stride=1,
                                           pad=1)  #64->64
            self.conv1_2 = L.Convolution2D(None, 64, (3, 3), stride=1,
                                           pad=1)  #64->64
            self.conv2_1 = L.Convolution2D(None, 128, (3, 3), stride=1,
                                           pad=1)  #32->32
            self.conv2_2 = L.Convolution2D(None, 128, (3, 3), stride=1,
                                           pad=1)  #32->32
            self.conv3_1 = L.Convolution2D(None, 256, (3, 3), stride=1,
                                           pad=1)  #16->16
            self.conv3_2 = L.Convolution2D(None, 256, (3, 3), stride=1,
                                           pad=1)  #16->16
            self.conv3_3 = L.Convolution2D(None, 256, (3, 3), stride=1,
                                           pad=1)  #16->16
            self.conv4_1 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #8->8
            self.conv4_2 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #8->8
            self.conv4_3 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #8->8
            self.conv5_1 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #4->4
            self.conv5_2 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #4->4
            self.conv5_3 = L.Convolution2D(None, 512, (3, 3), stride=1,
                                           pad=1)  #4->4

            # BatchNormalize
            self.bn1 = L.BatchRenormalization(64)
            self.bn2 = L.BatchRenormalization(128)
            self.bn3 = L.BatchRenormalization(256)
            self.bn4 = L.BatchRenormalization(512)
            self.bn5 = L.BatchRenormalization(512)

            self.fc6 = L.Linear(None, 4096)
            self.fc7 = L.Linear(None, 4096)
            self.fc8 = L.Linear(None, 2)
Esempio n. 8
0
    def __init__(self,
                channel,
                ksize,
                dilate=1,
                dropout_rate = 0.3,
                causal=True):
        self.dropout_rate = dropout_rate
        self.half = channel

        ls = {}
        ls["c"] = DilateConvCausal1D(channel, channel * 2, ksize, dilate=dilate, causal=causal)
        ls["bn"] = L.BatchRenormalization(channel*2, decay=0.9, eps=2e-5)
        super(GatedConv1D, self).__init__(**ls)
Esempio n. 9
0
    def setUp(self):
        self.expander = (None, Ellipsis) + (None, ) * self.ndim
        self.aggr_axes = (0, ) + tuple(six.moves.range(2, self.ndim + 2))

        self.rmax = self.dtype(3)
        self.dmax = self.dtype(5)

        self.link = links.BatchRenormalization(3,
                                               rmax=self.rmax,
                                               dmax=self.dmax,
                                               dtype=self.dtype)
        gamma = self.link.gamma.data
        gamma[...] = numpy.random.uniform(.5, 1, gamma.shape)
        beta = self.link.beta.data
        beta[...] = numpy.random.uniform(-1, 1, beta.shape)
        self.link.cleargrads()

        self.gamma = gamma.copy()[self.expander]  # fixed on CPU
        self.beta = beta.copy()[self.expander]  # fixed on CPU

        shape = (5, 3) + (2, ) * self.ndim
        self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
        self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)

        if self.test:
            self.mean = numpy.random.uniform(-1, 1, (3, )).astype(self.dtype)
            self.var = numpy.random.uniform(0.5, 1, (3, )).astype(self.dtype)
            self.link.avg_mean[...] = self.mean
            self.link.avg_var[...] = self.var
            self.running_mean = self.mean
            self.running_var = self.var
        else:
            self.mean = self.x.mean(axis=self.aggr_axes)
            self.var = self.x.var(axis=self.aggr_axes)
            # Need to add some noise to running_mean and running_var,
            # otherwise we will always get r=1, d=0
            self.running_mean = self.mean + numpy.random.uniform(
                -1, 1, self.mean.shape).astype(self.dtype)
            self.running_var = numpy.abs(
                self.var +
                numpy.random.uniform(-1, 1, self.var.shape).astype(self.dtype))
            self.link.avg_mean[...] = self.running_mean
            self.link.avg_var[...] = self.running_var
        self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
        self.check_backward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
        if self.dtype == numpy.float16:
            self.check_forward_optionss = {'atol': 1e-3, 'rtol': 1e-2}
            self.check_backward_optionss = {'atol': 5e-1, 'rtol': 1e-1}
Esempio n. 10
0
 def __init__(self, link_name, finetune, forget=False):
     super(Model, self).__init__()
     with self.init_scope():
         if link_name == 'bn':
             self.link = links.BatchNormalization(3)
         elif link_name == 'brn':
             # defaults rmax=1, dmax=0 are so trivial that BRN
             # becomes BN
             self.link = links.BatchRenormalization(3,
                                                    rmax=2.0,
                                                    dmax=1.0)
         elif link_name == 'dbn':
             self.link = links.DecorrelatedBatchNormalization(
                 3, groups=3)
     self.forget = forget
     self.finetune = finetune
Esempio n. 11
0
    def __init__(self,
                 n_unit,
                 ksize,
                 stride,
                 pad,
                 actfun=None,
                 dropout=0,
                 wd=0.02):

        super(DownSampleBlock, self).__init__()
        with self.init_scope():
            self.cnv = L.Convolution2D(None,
                                       n_unit,
                                       ksize=ksize,
                                       stride=stride,
                                       pad=pad,
                                       initialW=I.Normal(wd))
            self.brn = L.BatchRenormalization(n_unit)

        self.actfun = actfun
        self.dropout_ratio = dropout