示例#1
0
    def __init__(self,
                 dim_in,
                 dim_hidden,
                 dim_latent,
                 num_layers,
                 num_trans,
                 temperature,
                 num_zsamples=1):
        super(VAE, self).__init__()

        # initialise first encoder and decoder hidden layer separately because
        # the input and output dims differ from the other hidden layers
        self.qlin0 = L.Linear(dim_in, dim_hidden)
        self.plin0 = L.Linear(dim_latent, dim_hidden)
        self._children.append('qlin0')
        self._children.append('plin0')

        for i in range(num_layers - 1):
            # encoder
            layer_name = 'qlin' + str(i + 1)
            setattr(self, layer_name, L.Linear(2 * dim_hidden, dim_hidden))
            self._children.append(layer_name)

            # decoder
            layer_name = 'plin' + str(i + 1)
            setattr(self, layer_name, L.Linear(2 * dim_hidden, dim_hidden))
            self._children.append(layer_name)

        # initialise the encoder and decoder output layer separately because
        # the input and output dims differ from the other hidden layers
        self.qlin_mu = L.Linear(2 * dim_hidden, dim_latent)
        self.qlin_ln_var = L.Linear(2 * dim_hidden, dim_latent)
        self.plin_ln_var = L.Linear(2 * dim_hidden, dim_in)
        self.plin_mu = L.Linear(2 * dim_hidden, dim_in)
        self._children.append('qlin_mu')
        self._children.append('qlin_ln_var')
        self._children.append('plin_mu')
        self._children.append('plin_ln_var')

        # flow
        for i in range(num_trans):
            layer_name = 'flow_w_' + str(i)  # weights
            setattr(self, layer_name,
                    L.Scale(axis=1, W_shape=(dim_latent), bias_term=False))
            self._children.append(layer_name)

            layer_name = 'flow_b_' + str(i)  # bias
            setattr(self, layer_name, L.Bias(axis=0, shape=(1)))
            self._children.append(layer_name)

            layer_name = 'flow_u_' + str(i)  # scaling factor u
            setattr(self, layer_name,
                    L.Scale(axis=1, W_shape=(dim_latent), bias_term=False))
            self._children.append(layer_name)

        self.num_layers = num_layers
        self.num_trans = num_trans
        self.temperature = temperature
        self.num_zsamples = num_zsamples
        self.epochs_seen = 0
示例#2
0
    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
        self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
        self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
        self.y_expected = numpy.copy(self.x)
        for i, j, k in numpy.ndindex(self.y_expected.shape):
            self.y_expected[i, j, k] *= self.W[j]
            if self.bias_term:
                self.y_expected[i, j, k] += self.b[j]
        self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)

        bias_term = self.bias_term
        bias_shape = self.bias_shape
        axis = 1
        if self.learn_W:
            self.link = links.Scale(
                axis, self.W.shape, bias_term, bias_shape)
            self.link.W.data = self.W
            if bias_term:
                self.link.bias.b.data = self.b
        else:
            self.link = links.Scale(
                axis, None, bias_term, bias_shape)
            if bias_term:
                self.link.bias.b.data = self.b
        self.link.cleargrads()
示例#3
0
 def __init__(self, n_input_ch=6, at_once=True, mat_scale=1):
     self.mat_scale = mat_scale
     super().__init__(n_input_ch, 3, at_once)  # 3 outputs
     with self.init_scope():
         self.prob_scale = L.Scale(W_shape=(1,))
         self.prob_scale.W.copydata(np.array([100.0]))  # Initial parameter
         self.matting_link = MattingLink()
示例#4
0
 def _init_non_bottom(self, d_inp, d_out):
     super(DecNet, self).__init__(
         linear=L.Linear(d_inp, d_out),
         bn=BatchNormalization(d_out, decay=0.9,
                               use_gamma=False, use_beta=False, 
                               device=self.device),
         sb=L.Scale(W_shape=d_out, bias_term=True)
     )
示例#5
0
 def __init__(self, dim, act=F.relu, device=None, ):
     d_inp, d_out = dim
     super(EncNet, self).__init__(
         linear=L.Linear(d_inp, d_out),
         bn=L.BatchNormalization(d_out, decay=0.9,
                                 use_gamma=False, use_beta=False),
         sb=L.Scale(W_shape=d_out, bias_term=True)
     )
     self.sigma = 0.3
     self.act = act
示例#6
0
 def _init_non_top(self, d_inp, d_out, ksize, stride, pad):
     super(EncNet,
           self).__init__(conv=L.Convolution2D(d_inp, d_out, ksize, stride,
                                               pad),
                          bn=BatchNormalization(d_out,
                                                decay=0.9,
                                                use_gamma=False,
                                                use_beta=False,
                                                device=self.device),
                          sb=L.Scale(W_shape=d_out, bias_term=True))
示例#7
0
文件: models.py 项目: kzky/works
 def __init__(self, device, act=F.relu):
     super(MLPDecoder, self).__init__(
         linear0=L.Linear(210, 500),
         linear1=L.Linear(500, 1000),
         linear2=L.Linear(1000, 784),
         sc=L.Scale(W_shape=(210, ), bias_term=True),
         bn0=L.BatchNormalization(500, decay=0.9),
         bn1=L.BatchNormalization(1000, decay=0.9),
     )
     self.device = device
     self.act = act
示例#8
0
文件: models.py 项目: kzky/works
 def __init__(self, dim):
     super(Denoise, self).__init__(
         a0=L.Scale(W_shape=(dim, )),
         a1=L.Scale(W_shape=(dim, )),
         a2=L.Scale(W_shape=(dim, )),
         a3=L.Bias(shape=(dim, )),
         a4=L.Scale(W_shape=(dim, )),
         b0=L.Scale(W_shape=(dim, )),
         b1=L.Scale(W_shape=(dim, )),
         b2=L.Scale(W_shape=(dim, )),
         b3=L.Bias(shape=(dim, )),
         )
示例#9
0
文件: models.py 项目: kzky/works
 def __init__(self, device, act=F.relu):
     super(MLPEncoder,
           self).__init__(linear0=L.Linear(784, 1000),
                          linear1=L.Linear(1000, 500),
                          linear2=L.Linear(500, 200),
                          classifier=L.Linear(200, 10),
                          bn0=L.BatchNormalization(1000, decay=0.9),
                          bn1=L.BatchNormalization(500, decay=0.9),
                          bn2=BatchNormalization(200,
                                                 decay=0.9,
                                                 use_gamma=False,
                                                 use_beta=False),
                          sc2=L.Scale(W_shape=(200, ), bias_term=True))
     self.device = device
     self.act = act
示例#10
0
文件: models.py 项目: kzky/works
    def __init__(self,
                     dims,
                     act=F.relu,
                     noise=False,
                     rc=False,
                     device=None):

        # Setup layers
        layers = {}
        linears = OrderedDict()
        batch_norms = OrderedDict()
        scale_biases = OrderedDict()
        for l, d in enumerate(zip(dims[0:-1], dims[1:])):
            d_in, d_out = d[0], d[1]

            # Linear
            linear = L.Linear(d_in, d_out, )
            l_name = "linear-enc-{:03}".format(l)
            linears[l_name] = linear

            # Normalization and BatchCorrection
            batch_norm = BatchNormalization(d_out, decay=0.9, 
                                            use_gamma=False, use_beta=False)
            bn_name = "bn-enc-{:03d}".format(l)
            batch_norms[bn_name] = batch_norm

            scale_bias = L.Scale(W_shape=d_out, bias_term=True)
            sb_name = "sb-enc-{:03d}".format(l)
            scale_biases[sb_name] = scale_bias

        layers.update(linears)
        layers.update(batch_norms)
        layers.update(scale_biases)
        
        super(MLPEnc, self).__init__(**layers)
        self.dims = dims
        self.layers = layers
        self.linears = linears
        self.batch_norms = batch_norms
        self.scale_biases = scale_biases
        self.act = act
        self.noise = noise
        self.rc = rc
        self.device = device
        self.hiddens = []
    def __init__(self,
                 config,
                 batchsize,
                 n_classes=0,
                 softmax=True,
                 comm=None,
                 test=False):
        self.softmax = softmax
        if not test:
            self.l_emd = config.l_emd
            self.l_re = config.l_re
            self.l_patch_dis = config.l_patch_dis
            self.l_gp = config.l_gp
            self.l_per = config.l_per
            self.config = config
            self.comm = comm

        self.gen = BIGGAN()
        self.dim_z = 140
        params = {}
        if config.initial_z == "zero":
            params["z"] = L.Parameter(
                np.zeros((batchsize, self.dim_z)).astype("float32"))
        elif config.initial_z == "random":
            params["z"] = L.Parameter(
                np.random.normal(size=(batchsize,
                                       self.dim_z)).astype("float32"))

        initialW = initializers.HeNormal(0.001**0.5)
        params["linear"] = L.Linear(1, 128, initialW=initialW, nobias=True)
        params["BSA_linear"] = L.Scale(W_shape=(16 * self.gen.ch),
                                       bias_term=True)
        for i, (k, l) in enumerate(self.gen.namedlinks()):
            if "Hyper" in k.split("/")[-1]:
                params[f"hyper_bn{i}"] = l
        if config.lr_g_linear > 0:
            params["g_linear"] = self.gen.G_linear
        super(AdaBIGGAN, self).__init__(**params)
        if not test:
            self.setup_optimizer()
            self.z.W.update_rule.hyperparam.alpha = 0.05
            self.linear.W.update_rule.hyperparam.alpha = 0.001
            if config.lr_g_linear > 0:
                self.g_linear.W.update_rule.hyperparam.alpha = config.lr_g_linear
示例#12
0
    def __init__(self, in_channels, out_channels, LoopTimes=5):
        super(Block, self).__init__()
        with self.init_scope():
            """
            ffconv : feedforword Convolution.
            fbconv : feedback Convolution (deconvolution).
            bpconv : bypass Convolution (1x1 convolution).
            update_late : A learnable and non-negative parameter.
            """
            self.bn = L.BatchNormalization(in_channels)
            self.ffconv = L.Convolution2D(in_channels,
                                          out_channels,
                                          ksize=3,
                                          pad=1)
            self.fbconv = L.Deconvolution2D(out_channels,
                                            in_channels,
                                            ksize=3,
                                            pad=1)
            self.bpconv = L.Convolution2D(in_channels, out_channels, ksize=1)
            self.update_rate = L.Scale(axis=0, W_shape=(1, ))

            self.LoopTimes = LoopTimes
            self.out_channels = out_channels
示例#13
0
 def test_scale_invalid_argc1(self):
     func = links.Scale(self.axis, self.W.data.shape)
     with chainer.DebugMode(True):
         with self.assertRaises(AssertionError):
             func(self.x, self.W)
示例#14
0
    def __init__(self):
        super(SSD, self).__init__(
            conv1_1=L.Convolution2D(3, 64, 3, pad=1),
            conv1_2=L.Convolution2D(64, 64, 3, pad=1),
            conv2_1=L.Convolution2D(64, 128, 3, pad=1),
            conv2_2=L.Convolution2D(128, 128, 3, pad=1),
            conv3_1=L.Convolution2D(128, 256, 3, pad=1),
            conv3_2=L.Convolution2D(256, 256, 3, pad=1),
            conv3_3=L.Convolution2D(256, 256, 3, pad=1),
            conv4_1=L.Convolution2D(256, 512, 3, pad=1),
            conv4_2=L.Convolution2D(512, 512, 3, pad=1),
            conv4_3=L.Convolution2D(512, 512, 3, pad=1),
            conv5_1=L.Convolution2D(512, 512, 3, pad=1),
            conv5_2=L.Convolution2D(512, 512, 3, pad=1),
            conv5_3=L.Convolution2D(512, 512, 3, pad=1),

            #fc6 = L.Convolution2D(512, 1024,  3, pad=6),
            fc6=L.DilatedConvolution2D(512, 1024, 3, pad=6, dilate=6),
            fc7=L.Convolution2D(1024, 1024, 1),
            conv6_1=L.Convolution2D(1024, 256, 1),
            conv6_2=L.Convolution2D(256, 512, 3, stride=2, pad=1),
            conv7_1=L.Convolution2D(512, 128, 1),
            conv7_2=L.Convolution2D(128, 256, 3, stride=2, pad=1),
            conv8_1=L.Convolution2D(256, 128, 1),
            conv8_2=L.Convolution2D(128, 256, 3, stride=2, pad=1),
            normalize=L.Scale(W_shape=512),
            conv4_3_norm_mbox_loc=L.Convolution2D(512, 12, 3,
                                                  pad=1),  #3 prior boxes
            conv4_3_norm_mbox_conf=L.Convolution2D(512, 63, 3, pad=1),
            fc7_mbox_loc=L.Convolution2D(1024, 24, 3, pad=1),  #6 prior boxes
            fc7_mbox_conf=L.Convolution2D(1024, 126, 3, pad=1),
            conv6_2_mbox_loc=L.Convolution2D(512, 24, 3,
                                             pad=1),  #6 prior boxes
            conv6_2_mbox_conf=L.Convolution2D(512, 126, 3, pad=1),
            conv7_2_mbox_loc=L.Convolution2D(256, 24, 3,
                                             pad=1),  #6 prior boxes
            conv7_2_mbox_conf=L.Convolution2D(256, 126, 3, pad=1),
            conv8_2_mbox_loc=L.Convolution2D(256, 24, 3,
                                             pad=1),  #6 prior boxes
            conv8_2_mbox_conf=L.Convolution2D(256, 126, 3, pad=1),
            pool6_mbox_loc=L.Convolution2D(256, 24, 3, pad=1),
            pool6_mbox_conf=L.Convolution2D(256, 126, 3,
                                            pad=1),  #6 prior boxes
        )
        self.train = False
        self.conv4_3_norm_mbox_priorbox = self.prior((38, 38), 30., 0, [2], 1,
                                                     1, (0.1, 0.1, 0.2, 0.2))
        self.fc7_mbox_priorbox = self.prior((19, 19), 60., 114., [2, 3], 1, 1,
                                            (0.1, 0.1, 0.2, 0.2))
        self.conv6_2_mbox_priorbox = self.prior((10, 10), 114., 168., [2, 3],
                                                1, 1, (0.1, 0.1, 0.2, 0.2))
        self.conv7_2_mbox_priorbox = self.prior((5, 5), 168., 222., [2, 3], 1,
                                                1, (0.1, 0.1, 0.2, 0.2))
        self.conv8_2_mbox_priorbox = self.prior((3, 3), 222., 276., [2, 3], 1,
                                                1, (0.1, 0.1, 0.2, 0.2))
        self.pool6_mbox_priorbox = self.prior((1, 1), 276., 330., [2, 3], 1, 1,
                                              (0.1, 0.1, 0.2, 0.2))
        self.mbox_prior = np.hstack([
            self.conv4_3_norm_mbox_priorbox, self.fc7_mbox_priorbox,
            self.conv6_2_mbox_priorbox, self.conv7_2_mbox_priorbox,
            self.conv8_2_mbox_priorbox, self.pool6_mbox_priorbox
        ])
示例#15
0
 def test_scale_invalid_argc2(self):
     func = links.Scale(self.axis, None)
     with chainer.using_config('debug', True):
         with self.assertRaises(AssertionError):
             func(self.x)
示例#16
0
 def test_scale_no_bias_shape(self):
     axis = 1
     with self.assertRaises(ValueError):
         links.Scale(axis, None, True, None)
示例#17
0
 def test_scale_invalid_argc2(self):
     func = links.Scale(self.axis, None)
     with chainer.DebugMode(True):
         with self.assertRaises(AssertionError):
             func(self.x)