def __init__(self, mlp, mlp2, in_channels=None, use_bn=True,
              activation=functions.relu, residual=False):
     # k is number of sampled point (num_region)
     super(SetAbstractionGroupAllModule, self).__init__()
     # Feature Extractor channel list
     assert isinstance(mlp, list)
     fe_ch_list = [in_channels] + mlp
     # Head channel list
     if mlp2 is None:
         mlp2 = []
     assert isinstance(mlp2, list)
     head_ch_list = [mlp[-1]] + mlp2
     with self.init_scope():
         self.sampling_grouping = SamplingGroupingAllModule()
         self.feature_extractor_list = chainer.ChainList(
             *[ConvBlock(fe_ch_list[i], fe_ch_list[i+1], ksize=1,
                         use_bn=use_bn, activation=activation,
                         residual=residual
                         ) for i in range(len(mlp))])
         self.head_list = chainer.ChainList(
             *[ConvBlock(head_ch_list[i], head_ch_list[i + 1], ksize=1,
                         use_bn=use_bn, activation=activation,
                         residual=residual
                         ) for i in range(len(mlp2))])
     self.use_bn = use_bn
Esempio n. 2
0
    def test_addgrads(self):
        l1 = chainer.Link()
        with l1.init_scope():
            l1.x = chainer.Parameter(shape=(2, 3))
            l1.y = chainer.Parameter(shape=(2, 3))
        l2 = chainer.Link()
        with l2.init_scope():
            l2.x = chainer.Parameter(shape=2)
        l3 = chainer.Link()
        with l3.init_scope():
            l3.x = chainer.Parameter(shape=3)
        c1 = chainer.ChainList(l1, l2)
        c2 = chainer.ChainList(c1, l3)
        l1.x.grad.fill(1)
        l2.x.grad.fill(2)
        l3.x.grad.fill(3)
        l1.y.grad.fill(4)

        self.l1.x.grad.fill(-1)
        self.l1.y.cleargrad()
        self.l2.x.grad.fill(-2)
        self.l3.x.grad.fill(-3)

        self.c2.addgrads(c2)
        numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
        numpy.testing.assert_array_equal(self.l1.y.grad, l1.y.grad)
        numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
        numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
Esempio n. 3
0
    def __init__(self, ch=512, enable_blur=False):
        super(Discriminator, self).__init__()
        self.max_stage = 17

        with self.init_scope():
            # NOTE: called in reversed order.
            self.blocks = chainer.ChainList(
                DiscriminatorBlockBase(ch),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 2, ch, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 4, ch // 2, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 8, ch // 4, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 16, ch // 8, enable_blur=enable_blur),
                DiscriminatorBlock(ch // 32, ch // 16, enable_blur=enable_blur),)
            self.ins = chainer.ChainList(
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch, 1, 1, 0),
                EqualizedConv2d(3, ch // 2, 1, 1, 0),
                EqualizedConv2d(3, ch // 4, 1, 1, 0),
                EqualizedConv2d(3, ch // 8, 1, 1, 0),
                EqualizedConv2d(3, ch // 16, 1, 1, 0),
                EqualizedConv2d(3, ch // 32, 1, 1, 0),)
            self.enable_blur = enable_blur
Esempio n. 4
0
    def __init__(self,
                 n_in_node,
                 edge_types,
                 n_hid,
                 do_prob=0.,
                 skip_first=False):
        super(RNNDecoder, self).__init__()
        self.dropout_prob = do_prob
        self.msg_out_shape = n_hid
        self.skip_first_edge_type = skip_first

        with self.init_scope():
            self.msg_fc1 = chainer.ChainList(
                [L.Linear(2 * n_hid, n_hid) for _ in range(edge_types)])
            self.msg_fc2 = chainer.ChainList(
                [L.Linear(n_hid, n_hid) for _ in range(edge_types)])

            self.hidden_r = L.Linear(n_hid, n_hid, nobias=True)
            self.hidden_i = L.Linear(n_hid, n_hid, nobias=True)
            self.hidden_h = L.Linear(n_hid, n_hid, nobias=True)

            self.input_r = L.Linear(n_in_node, n_hid, nobias=True)
            self.input_i = L.Linear(n_in_node, n_hid, nobias=True)
            self.input_n = L.Linear(n_in_node, n_hid, nobias=True)

            self.out_fc1 = L.Linear(n_hid, n_hid)
            self.out_fc2 = L.Linear(n_hid, n_hid)
            self.out_fc3 = L.Linear(n_hid, n_in_node)

        logger = logging.getLogger(__name__)
        logger.info('Using learned recurrent interaction net decoder.')
Esempio n. 5
0
    def test_shared_link_copy(self):
        head = L.Linear(2, 2)
        model_a = chainer.ChainList(head.copy(), L.Linear(2, 3))
        model_b = chainer.ChainList(head.copy(), L.Linear(2, 4))
        a_params = dict(model_a.namedparams())
        b_params = dict(model_b.namedparams())
        self.assertEqual(a_params['/0/W'].array.ctypes.data,
                         b_params['/0/W'].array.ctypes.data)
        self.assertEqual(a_params['/0/b'].array.ctypes.data,
                         b_params['/0/b'].array.ctypes.data)
        import copy
        model_a_copy = copy.deepcopy(model_a)
        model_b_copy = copy.deepcopy(model_b)
        a_copy_params = dict(model_a_copy.namedparams())
        b_copy_params = dict(model_b_copy.namedparams())
        # When A and B are separately deepcopied, head is no longer shared
        self.assertNotEqual(a_copy_params['/0/W'].array.ctypes.data,
                            b_copy_params['/0/W'].array.ctypes.data)
        self.assertNotEqual(a_copy_params['/0/b'].array.ctypes.data,
                            b_copy_params['/0/b'].array.ctypes.data)

        model_total_copy = copy.deepcopy(chainer.ChainList(model_a, model_b))
        model_a_copy = model_total_copy[0]
        model_b_copy = model_total_copy[1]
        a_copy_params = dict(model_a_copy.namedparams())
        b_copy_params = dict(model_b_copy.namedparams())
        # When ChainList(A, B) is deepcopied, head is still shared!
        self.assertEqual(a_copy_params['/0/W'].array.ctypes.data,
                         b_copy_params['/0/W'].array.ctypes.data)
        self.assertEqual(a_copy_params['/0/b'].array.ctypes.data,
                         b_copy_params['/0/b'].array.ctypes.data)
Esempio n. 6
0
 def __init__(self,
              out_dim,
              hidden_dim=32,
              n_layers=4,
              n_atom_types=MAX_ATOMIC_NUM,
              use_batch_norm=False,
              readout=None):
     super(RSGCN, self).__init__()
     in_dims = [hidden_dim for _ in range(n_layers)]
     out_dims = [hidden_dim for _ in range(n_layers)]
     out_dims[n_layers - 1] = out_dim
     with self.init_scope():
         self.embed = chainer_chemistry.links.EmbedAtomID(
             in_size=n_atom_types, out_size=hidden_dim)
         self.gconvs = chainer.ChainList(*[
             RSGCNUpdate(in_dims[i], out_dims[i]) for i in range(n_layers)
         ])
         if use_batch_norm:
             self.bnorms = chainer.ChainList(*[
                 chainer_chemistry.links.GraphBatchNormalization(
                     out_dims[i]) for i in range(n_layers)
             ])
         else:
             self.bnorms = [None for _ in range(n_layers)]
         if isinstance(readout, chainer.Link):
             self.readout = readout
     if not isinstance(readout, chainer.Link):
         self.readout = readout or rsgcn_readout_sum
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.n_layers = n_layers
Esempio n. 7
0
 def __init__(self, out_channel, n_condition, n_layers, n_channel):
     super(WaveNet, self).__init__()
     dilated_convs = chainer.ChainList()
     residual_convs = chainer.ChainList()
     skip_convs = chainer.ChainList()
     condition_convs = chainer.ChainList()
     for i in range(n_layers):
         dilated_convs.add_link(
             weight_norm(
                 L.Convolution1D(n_channel,
                                 2 * n_channel,
                                 3,
                                 pad=2**i,
                                 dilate=2**i)))
         residual_convs.add_link(
             weight_norm(L.Convolution1D(n_channel, n_channel, 1)))
         skip_convs.add_link(
             weight_norm(L.Convolution1D(n_channel, n_channel, 1)))
         condition_convs.add_link(
             weight_norm(L.Convolution1D(n_condition, 2 * n_channel, 1)))
     with self.init_scope():
         self.input_conv = weight_norm(
             L.Convolution1D(out_channel // 2, n_channel, 1))
         self.dilated_convs = dilated_convs
         self.residual_convs = residual_convs
         self.skip_convs = skip_convs
         self.condition_convs = condition_convs
         self.output_conv = L.Convolution1D(
             n_channel,
             out_channel,
             1,
             initialW=chainer.initializers.Zero())
Esempio n. 8
0
    def __init__(self,
                 n_class,
                 aspect_ratios,
                 initialW=None,
                 initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios

        super(ExtendedMultibox, self).__init__()
        with self.init_scope():
            self.extconv = chainer.ChainList()
            self.loc = chainer.ChainList()
            self.conf = chainer.ChainList()
            self.ext = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for i in range(3):
            self.ext.add_link(ExtensionModule(i == 1, **init))

        for ar in aspect_ratios:
            n = (len(ar) + 1) * 2
            self.extconv.add_link(ExtendedConv(**init))
            self.loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.conf.add_link(
                L.Convolution2D(n * self.n_class, 3, pad=1, **init))
Esempio n. 9
0
    def __init__(self,
                 n_class,
                 aspect_ratios,
                 initialW=None,
                 initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios

        super(MultiboxWithTCB, self).__init__()
        with self.init_scope():
            self.arm_loc = chainer.ChainList()
            self.arm_conf = chainer.ChainList()
            self.tcb = chainer.ChainList()
            self.odm_loc = chainer.ChainList()
            self.odm_conf = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for i in range(3):
            self.tcb.add_link(TransferConnection(**init))
        self.tcb.add_link(TransferConnectionEnd(**init))

        for ar in aspect_ratios:
            n = (len(ar) + 1) * 2 - 1
            self.arm_loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.arm_conf.add_link(L.Convolution2D(n, 3, pad=1, **init))
            self.odm_loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.odm_conf.add_link(
                L.Convolution2D(n * self.n_class, 3, pad=1, **init))
Esempio n. 10
0
 def __init__(self,
              out_dim,
              hidden_dim=16,
              n_layers=4,
              max_degree=6,
              n_atom_types=MAX_ATOMIC_NUM,
              concat_hidden=False,
              dropout_ratio=0):
     super(NFPDrop, self).__init__()
     num_degree_type = max_degree + 1
     with self.init_scope():
         self.embed = chainer_chemistry.links.EmbedAtomID(
             in_size=n_atom_types, out_size=hidden_dim)
         self.layers = chainer.ChainList(*[
             NFPUpdate(hidden_dim, hidden_dim, max_degree=max_degree)
             for _ in range(n_layers)
         ])
         self.read_out_layers = chainer.ChainList(
             *[NFPReadout(hidden_dim, out_dim) for _ in range(n_layers)])
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.max_degree = max_degree
     self.num_degree_type = num_degree_type
     self.n_layers = n_layers
     self.concat_hidden = concat_hidden
     self.dropout_ratio = dropout_ratio
Esempio n. 11
0
    def __init__(self,
                 n_class,
                 aspect_ratios,
                 initialW=None,
                 initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios

        super(ResidualMultibox, self).__init__()
        with self.init_scope():
            self.res = chainer.ChainList()
            self.loc = chainer.ChainList()
            self.conf = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for ar in aspect_ratios:
            n = (len(ar) + 1) * 2
            self.res.add_link(Residual(**init))
            self.loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.conf.add_link(
                L.Convolution2D(n * self.n_class, 3, pad=1, **init))
Esempio n. 12
0
    def __init__(self,
                 n_hidden=128,
                 bottom_width=4,
                 ch=1024,
                 wscale=0.02,
                 z_distribution="normal"):
        super().__init__()
        self.n_hidden = n_hidden
        self.ch = ch
        self.bottom_width = bottom_width
        self.z_distribution = z_distribution

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            self.dense = ResNetInputDense(n_hidden, bottom_width, ch)
            self.resblockups = chainer.ChainList(
                ResNetResBlockUp(ch, ch),
                ResNetResBlockUp(ch, ch // 2),
                ResNetResBlockUp(ch // 2, ch // 4),
                ResNetResBlockUp(ch // 4, ch // 8),
                ResNetResBlockUp(ch // 8, ch // 16),
                ResNetResBlockUp(ch // 16, ch // 32),
            )
            self.finals = chainer.ChainList(
                L.BatchNormalization(ch // 32),
                LinkRelu(),
                L.Convolution2D(ch // 32, 3, 3, 1, 1, initialW=w),
                LinkTanh(),
            )
Esempio n. 13
0
    def __init__(self, base=32, res_layers=9):
        super(Generator, self).__init__()

        res_img = chainer.ChainList()
        res_mask = chainer.ChainList()

        for _ in range(res_layers):
            res_img.add_link(ResBlock(base * 4, base * 4))

        for _ in range(res_layers):
            res_mask.add_link(ResBlock(base * 4, base * 4))

        w = initializers.Normal(0.02)
        with self.init_scope():
            self.c0_img = L.Convolution2D(3, base, 7, 1, 3, initialW=w)
            self.cbr0_img = CBR(base, base * 2, down=True)
            self.cbr1_img = CBR(base * 2, base * 4, down=True)
            self.res_img = res_img
            self.cbr2_img = CBR(base * 8, base * 2, up=True)
            self.cbr3_img = CBR(base * 2, base, up=True)
            self.c1_img = L.Convolution2D(base, 3, 7, 1, 3, initialW=w)

            self.bn0_img = L.BatchNormalization(base)
            self.in0_img = InstanceNormalization(base)

            self.c0_mask = L.Convolution2D(1, base, 7, 1, 3, initialW=w)
            self.cbr0_mask = CBR(base, base * 2, down=True)
            self.cbr1_mask = CBR(base * 2, base * 4, down=True)
            self.res_mask = res_mask
            self.cbr2_mask = CBR(base * 12, base * 2, up=True)
            self.cbr3_mask = CBR(base * 2, base, up=True)
            self.c1_mask = L.Convolution2D(base, 1, 7, 1, 3, initialW=w)

            self.bn0_mask = L.BatchNormalization(base)
            self.in0_mask = InstanceNormalization(base)
Esempio n. 14
0
    def __init__(
            self,
            out_dim,  # type: int
            hidden_channels=16,  # type: int
            n_update_layers=4,  # type: int
            n_atom_types=MAX_ATOMIC_NUM,  # type: int
            concat_hidden=False,  # type: bool
            weight_tying=True,  # type: bool
            n_edge_types=4,  # type: int
            nn=None,  # type: Optional[chainer.Link]
            message_func='edgenet',  # type: str
            readout_func='set2set',  # type: str
    ):
        # type: (...) -> None
        super(MPNN, self).__init__()
        if message_func not in ('edgenet', 'ggnn'):
            raise ValueError(
                'Invalid message function: {}'.format(message_func))
        if readout_func not in ('set2set', 'ggnn'):
            raise ValueError(
                'Invalid readout function: {}'.format(readout_func))
        n_readout_layer = n_update_layers if concat_hidden else 1
        n_message_layer = 1 if weight_tying else n_update_layers
        with self.init_scope():
            # Update
            self.embed = EmbedAtomID(out_size=hidden_channels,
                                     in_size=n_atom_types)
            if message_func == 'ggnn':
                self.update_layers = chainer.ChainList(*[
                    GGNNUpdate(hidden_channels=hidden_channels,
                               n_edge_types=n_edge_types)
                    for _ in range(n_message_layer)
                ])
            else:
                self.update_layers = chainer.ChainList(*[
                    MPNNUpdate(hidden_channels=hidden_channels, nn=nn)
                    for _ in range(n_message_layer)
                ])

            # Readout
            if readout_func == 'ggnn':
                self.readout_layers = chainer.ChainList(*[
                    GGNNReadout(out_dim=out_dim,
                                in_channels=hidden_channels * 2)
                    for _ in range(n_readout_layer)
                ])
            else:
                self.readout_layers = chainer.ChainList(*[
                    MPNNReadout(out_dim=out_dim,
                                in_channels=hidden_channels,
                                n_layers=1) for _ in range(n_readout_layer)
                ])
        self.out_dim = out_dim
        self.hidden_channels = hidden_channels
        self.n_update_layers = n_update_layers
        self.n_edge_types = n_edge_types
        self.concat_hidden = concat_hidden
        self.weight_tying = weight_tying
        self.message_func = message_func
        self.readout_func = readout_func
Esempio n. 15
0
    def __init__(self, n_units, n_vocab, encoder, max_memory, hops):
        super(MemNN, self).__init__()

        with self.init_scope():
            self.embeds = chainer.ChainList()
            self.temporals = chainer.ChainList()

        normal = initializers.Normal()
        # Shares both embeded matrixes in adjacent layres
        for _ in six.moves.range(hops + 1):
            self.embeds.append(L.EmbedID(n_vocab, n_units, initialW=normal))
            self.temporals.append(
                L.EmbedID(max_memory, n_units, initialW=normal))

        self.memories = [
            Memory(self.embeds[i], self.embeds[i + 1], self.temporals[i],
                   self.temporals[i + 1], encoder)
            for i in six.moves.range(hops)
        ]
        # The question embedding is same as the input embedding of the
        # first layer
        self.B = self.embeds[0]
        # The answer prediction matrix W is same as the final output layer
        self.W = lambda u: F.linear(u, self.embeds[-1].W)

        self.encoder = encoder

        self.n_units = n_units
        self.max_memory = max_memory
        self.hops = hops
    def __init__(self,
                 n_class,
                 aspect_ratios,
                 initialW=None,
                 initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios

        self._input_multiplier = 128

        super().__init__()
        with self.init_scope():
            self.loc = chainer.ChainList()
            self.features = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for ar in aspect_ratios:
            n = (len(ar) + 1) * 2
            self.loc.add_link(L.Convolution2D(n * 4, 3, pad=1, **init))
            self.features.add_link(
                L.Convolution2D(n * self._input_multiplier, 3, pad=1, **init))
    def __init__(self,
                 n_class,
                 aspect_ratios,
                 feature_channel,
                 initialW=None,
                 initial_bias=None):
        self.n_class = n_class
        self.aspect_ratios = aspect_ratios
        self.feature_map_channel = feature_channel

        super(Multibox, self).__init__()
        with self.init_scope():
            self.loc = chainer.ChainList()
            self.conf = chainer.ChainList()

        if initialW is None:
            initialW = initializers.LeCunUniform()
        if initial_bias is None:
            initial_bias = initializers.Zero()
        init = {'initialW': initialW, 'initial_bias': initial_bias}

        for ar, f_channel in zip(aspect_ratios, feature_channel):
            #n = (len(ar) + 1) * 2
            n = len(ar) + 1
            self.loc.add_link(MB_module(f_channel, n * 4, 3))
            self.conf.add_link(MB_module(f_channel, n * self.n_class, 3))
Esempio n. 18
0
    def __init__(self, out_dim, hidden_dim=16,
                 n_layers=4, n_atom_types=MAX_ATOMIC_NUM,
                 dropout_ratio=0.5,
                 concat_hidden=False,
                 weight_tying=True,
                 activation=functions.identity):
        super(GIN, self).__init__()

        n_message_layer = 1 if weight_tying else n_layers
        n_readout_layer = n_layers if concat_hidden else 1
        with self.init_scope():
            # embedding
            self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)

            # two non-linear MLP part
            self.update_layers = chainer.ChainList(*[GINUpdate(
                hidden_dim=hidden_dim, dropout_ratio=dropout_ratio)
                for _ in range(n_message_layer)])

            # Readout
            self.readout_layers = chainer.ChainList(*[GGNNReadout(
                out_dim=out_dim, hidden_dim=hidden_dim,
                activation=activation, activation_agg=activation)
                for _ in range(n_readout_layer)])
        # end with

        self.out_dim = out_dim
        self.hidden_dim = hidden_dim
        self.n_message_layers = n_message_layer
        self.n_readout_layer = n_readout_layer
        self.dropout_ratio = dropout_ratio
        self.concat_hidden = concat_hidden
        self.weight_tying = weight_tying
Esempio n. 19
0
 def __init__(self, out_dim, hidden_dim=16, n_layers=4,
              n_atom_types=MAX_ATOMIC_NUM, concat_hidden=False,
              weight_tying=True, activation=functions.identity,
              num_edge_type=4):
     super(GGNN, self).__init__()
     n_readout_layer = n_layers if concat_hidden else 1
     n_message_layer = 1 if weight_tying else n_layers
     with self.init_scope():
         # Update
         self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
         self.update_layers = chainer.ChainList(*[GGNNUpdate(
             hidden_dim=hidden_dim, num_edge_type=num_edge_type)
             for _ in range(n_message_layer)])
         # Readout
         self.readout_layers = chainer.ChainList(*[GGNNReadout(
             out_dim=out_dim, hidden_dim=hidden_dim,
             activation=activation, activation_agg=activation)
             for _ in range(n_readout_layer)])
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.n_layers = n_layers
     self.num_edge_type = num_edge_type
     self.activation = activation
     self.concat_hidden = concat_hidden
     self.weight_tying = weight_tying
 def __init__(self, n_in, n_middle, n_turn):
     super(NaiveFCColorPainter, self).__init__(
         l1=L.Linear(n_middle, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_middle),
         l4=L.Linear(n_middle, n_in),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
         act3=L.PReLU(n_middle),
         bn_list2=chainer.ChainList(*[
             L.BatchNormalization(n_middle, use_cudnn=False)
             for i in range(n_turn)
         ]),
         bn_list3=chainer.ChainList(*[
             L.BatchNormalization(n_middle, use_cudnn=False)
             for i in range(n_turn)
         ]),
         l1_attention=L.Linear(n_middle, n_middle),
         act1_attention=L.PReLU(n_middle),
         l2_attention=L.Linear(n_middle, n_in),
     )
     field = n_in // 3
     rang = int(field**0.5)
     self.image_shape = (3, rang, rang)
     self.image_size = n_in
Esempio n. 21
0
 def __init__(self,
              hidden_dim,
              out_dim,
              n_layers,
              n_atom_types=MAX_ATOMIC_NUM,
              concat_hidden=False,
              weight_tying=True):
     super(GGNN, self).__init__()
     n_readout_layer = 1 if concat_hidden else n_layers
     n_message_layer = 1 if weight_tying else n_layers
     with self.init_scope():
         # Update
         self.embed = EmbedAtomID(out_size=hidden_dim, in_size=n_atom_types)
         self.message_layers = chainer.ChainList(*[
             GraphLinear(hidden_dim, self.NUM_EDGE_TYPE * hidden_dim)
             for _ in range(n_message_layer)
         ])
         self.update_layer = links.GRU(2 * hidden_dim, hidden_dim)
         # Readout
         self.i_layers = chainer.ChainList(*[
             GraphLinear(2 * hidden_dim, out_dim)
             for _ in range(n_readout_layer)
         ])
         self.j_layers = chainer.ChainList(*[
             GraphLinear(hidden_dim, out_dim)
             for _ in range(n_readout_layer)
         ])
     self.out_dim = out_dim
     self.hidden_dim = hidden_dim
     self.n_layers = n_layers
     self.concat_hidden = concat_hidden
     self.weight_tying = weight_tying
Esempio n. 22
0
    def __init__(self,
                 n_in_node,
                 edge_types,
                 msg_hid,
                 msg_out,
                 n_hid,
                 do_prob=0.,
                 skip_first=False):
        super(MLPDecoder, self).__init__()

        w = chainer.initializers.LeCunUniform(scale=(1. / np.sqrt(3)))
        b = self._bias_initializer

        with self.init_scope():
            self.msg_fc1 = chainer.ChainList(
                *[L.Linear(2 * n_in_node, msg_hid) for _ in range(edge_types)])
            self.msg_fc2 = chainer.ChainList(
                *[L.Linear(msg_hid, msg_out) for _ in range(edge_types)])
            self.out_fc1 = L.Linear(n_in_node + msg_out,
                                    n_hid,
                                    initialW=w,
                                    initial_bias=b)
            self.out_fc2 = L.Linear(n_hid, n_hid, initialW=w, initial_bias=b)
            self.out_fc3 = L.Linear(n_hid,
                                    n_in_node,
                                    initialW=w,
                                    initial_bias=b)

        self.msg_out_shape = msg_out
        self.skip_first_edge_type = skip_first

        logger = logging.getLogger(__name__)
        logger.info('Using learned interaction net decoder.')

        self.dropout_prob = do_prob
    def __init__(self, n_in, n_middle, n_units, n_turn,
                 sensor, language, painter, reconstructor=None):
        super(NaiveListener, self).__init__(
            sensor=sensor,
            painter=painter,
            language=language,
            l1_language=L.Linear(n_units, n_middle),
            l1_canvas=L.Linear(n_middle, n_middle),
            l2=L.Linear(n_middle, n_middle),
            l3=L.Linear(n_middle, n_middle),
            bn_list2=chainer.ChainList(*[
                L.BatchNormalization(n_middle, use_cudnn=False)
                for i in range(n_turn)
            ]),
            bn_list3=chainer.ChainList(*[
                L.BatchNormalization(n_middle, use_cudnn=False)
                for i in range(n_turn)
            ]),
            act1=L.PReLU(n_middle),
            act2=L.PReLU(n_middle),
            act3=L.PReLU(n_middle),
        )

        if reconstructor:
            self.add_link('reconstructor', reconstructor)
        else:
            self.reconstructor = None

        self.act = F.relu
Esempio n. 24
0
    def __init__(self,
                 action_space,
                 n_input_channels=4,
                 activation=F.relu,
                 bias=0.1,
                 hiddens=None):
        n_actions = action_space.high + 1
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.hiddens = [512] if hiddens is None else hiddens

        super(NetForMultiDimensionalSoftmax, self).__init__()
        with self.init_scope():
            self.conv_layers = chainer.ChainList(
                L.Convolution2D(n_input_channels,
                                32,
                                8,
                                stride=4,
                                initial_bias=bias),
                L.Convolution2D(32, 64, 4, stride=2, initial_bias=bias),
                L.Convolution2D(64, 64, 3, stride=1, initial_bias=bias))
            self.hidden_layers = chainer.ChainList(
                *[L.Linear(None, hidden) for hidden in self.hiddens])
            self.action_layers = chainer.ChainList(
                *[L.Linear(None, n) for n in n_actions])
Esempio n. 25
0
 def setUp(self):
     self.l1 = chainer.Link(x=(2, 3))
     self.l2 = chainer.Link(x=2)
     self.l3 = chainer.Link(x=3)
     self.c1 = chainer.ChainList(self.l1)
     self.c1.add_link(self.l2)
     self.c2 = chainer.ChainList(self.c1, self.l3)
Esempio n. 26
0
 def __init__(self,
              ch=512,
              pooling_comp=1.0,
              channel_evolution=(512, 512, 512, 512, 256, 128, 64, 32, 16),
              first_channel=3,
              use_both_conditional_and_latent=False):
     super().__init__()
     self.use_both_conditional_and_latent = use_both_conditional_and_latent
     self.max_stage = (len(channel_evolution) - 1) * 2
     self.pooling_comp = pooling_comp  # compensation of ave_pool is 0.5-Lipshitz
     with self.init_scope():
         ins = [
             EqualizedConv2d(first_channel, channel_evolution[0], 1, 1, 0)
         ]
         bs = [
             chainer.Link()  # dummy
         ]
         for i in range(1, len(channel_evolution)):
             ins.append(
                 EqualizedConv2d(first_channel, channel_evolution[i], 1, 1,
                                 0))
             bs.append(
                 DiscriminatorBlock(channel_evolution[i],
                                    channel_evolution[i - 1], pooling_comp))
         self.ins = chainer.ChainList(*ins)
         self.bs = chainer.ChainList(*bs)
         self.out0 = EqualizedConv2d(ch + 1, ch, 3, 1, 1)
         self.out1 = EqualizedConv2d(ch, ch, 4, 1, 0)
         self.out2 = EqualizedLinear(ch, 1)
Esempio n. 27
0
    def __init__(self, ch=512, enable_blur=False):
        super(StyleGenerator, self).__init__()
        self.max_stage = 17
        with self.init_scope():
            self.blocks = chainer.ChainList(
                SynthesisBlock(ch, ch, upsample=False), #4
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), #8 
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), #16
                SynthesisBlock(ch, ch, upsample=True, enable_blur=enable_blur), # 32
                SynthesisBlock(ch // 2, ch, upsample=True, enable_blur=enable_blur), #64
                SynthesisBlock(ch // 4, ch // 2, upsample=True, enable_blur=enable_blur), #128
                SynthesisBlock(ch // 8, ch // 4, upsample=True, enable_blur=enable_blur), #256
                SynthesisBlock(ch // 16, ch // 8, upsample=True, enable_blur=enable_blur), #512
                SynthesisBlock(ch // 32, ch // 16, upsample=True, enable_blur=enable_blur) #1024
            )
            self.outs = chainer.ChainList(
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 2, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 4, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 8, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 16, 3, 1, 1, 0, gain=1),
                EqualizedConv2d(ch // 32, 3, 1, 1, 0, gain=1)
            )

        self.n_blocks = len(self.blocks)
        self.image_size = 1024
        self.enable_blur = enable_blur
Esempio n. 28
0
    def __init__(self, n_hidden=512, ch=512,
                 channel_evolution=(512, 512, 512, 512, 256, 128, 64, 32, 16), conditional=False):
        super(ProgressiveGenerator, self).__init__()
        self.n_hidden = n_hidden
        self.max_stage = (len(channel_evolution) - 1) * 2
        with self.init_scope():
            self.c0 = EqualizedConv2d(n_hidden, ch, 4, 1, 3)
            if conditional:
                self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1)
            else:
                self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1)
            bs = [
                chainer.Link()  # dummy
            ]
            outs = [

            ]
            if conditional:
                outs.append(EqualizedConv2d(channel_evolution[0], 3, 1, 1, 0))
            else:
                outs.append(EqualizedConv2d(channel_evolution[0], 3, 1, 1, 0))

            for i in range(1, len(channel_evolution)):
                if conditional:
                    bs.append(GeneratorBlock(channel_evolution[i - 1] * 2, channel_evolution[i]))
                    outs.append(EqualizedConv2d(channel_evolution[i], 3, 1, 1, 0))
                else:
                    bs.append(GeneratorBlock(channel_evolution[i - 1], channel_evolution[i]))
                    outs.append(EqualizedConv2d(channel_evolution[i], 3, 1, 1, 0))
            self.bs = chainer.ChainList(*bs)
            self.outs = chainer.ChainList(*outs)
Esempio n. 29
0
 def __init__(self, out_dim, hidden_channels=32, n_update_layers=4,
              n_atom_types=MAX_ATOMIC_NUM,
              use_batch_norm=False, readout=None, dropout_ratio=0.5):
     super(RSGCN, self).__init__()
     in_dims = [hidden_channels for _ in range(n_update_layers)]
     out_dims = [hidden_channels for _ in range(n_update_layers)]
     out_dims[n_update_layers - 1] = out_dim
     if readout is None:
         readout = GeneralReadout()
     with self.init_scope():
         self.embed = chainer_chemistry.links.EmbedAtomID(out_size=hidden_channels, in_size=n_atom_types)
         self.gconvs = chainer.ChainList(
             *[RSGCNUpdate(in_dims[i], out_dims[i])
               for i in range(n_update_layers)])
         if use_batch_norm:
             self.bnorms = chainer.ChainList(
                 *[chainer_chemistry.links.GraphBatchNormalization(
                     out_dims[i]) for i in range(n_update_layers)])
         else:
             self.bnorms = [None for _ in range(n_update_layers)]
         if isinstance(readout, chainer.Link):
             self.readout = readout
     if not isinstance(readout, chainer.Link):
         self.readout = readout
     self.out_dim = out_dim
     self.hidden_channels = hidden_channels
     self.n_update_layers = n_update_layers
     self.dropout_ratio = dropout_ratio
Esempio n. 30
0
 def __init__(self,
              out_dim,
              hidden_channels=16,
              n_update_layers=4,
              max_degree=6,
              n_atom_types=MAX_ATOMIC_NUM,
              concat_hidden=False):
     super(NFP, self).__init__()
     n_degree_types = max_degree + 1
     with self.init_scope():
         self.embed = EmbedAtomID(in_size=n_atom_types,
                                  out_size=hidden_channels)
         self.layers = chainer.ChainList(*[
             NFPUpdate(
                 hidden_channels, hidden_channels, max_degree=max_degree)
             for _ in range(n_update_layers)
         ])
         self.readout_layers = chainer.ChainList(*[
             NFPReadout(out_dim=out_dim, in_channels=hidden_channels)
             for _ in range(n_update_layers)
         ])
     self.out_dim = out_dim
     self.hidden_channels = hidden_channels
     self.max_degree = max_degree
     self.n_degree_types = n_degree_types
     self.n_update_layers = n_update_layers
     self.concat_hidden = concat_hidden