コード例 #1
0
    def __init__(self,
                 v_in_size,
                 out_size=None,
                 nobias=False,
                 initialW=None,
                 initial_bias=None,
                 residual=False):
        super(NodeAverageLink, self).__init__()

        if out_size is None:
            v_in_size, out_size = None, v_in_size
        self.out_size = out_size
        self.residual = residual
        with self.init_scope():
            W_initializer = initializers._get_initializer(initialW)
            self.Wc = Parameter(W_initializer)
            self.Wn = Parameter(W_initializer)
            if v_in_size is not None:
                self._initialize_params_v(v_in_size)
            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_size)
コード例 #2
0
 def __init__(self, counts, vecDims):
     super(RVec, self).__init__()
     with self.init_scope():
         initializer = Normal(0.1)
         self.vecDims = vecDims
         self.edge2vec = Parameter(initializer)
         self.edge2vec.initialize((counts, vecDims))
コード例 #3
0
 def __init__(self, mu, sigma, x0):
     super(MVN, self).__init__()
     self.mu = mu
     self.sigma = sigma
     self.n_particle = x0.shape[0]
     with self.init_scope():
         self.theta = Parameter(initializer=x0)
コード例 #4
0
    def __init__(self, in_size, out_size=None, nobias=False,
                 initialV=None, initialU=None, initial_bias=None,
                 k=16):
        super(SVDLinear, self).__init__()

        if out_size is None:
            in_size, out_size = None, in_size
        self.out_size = out_size
        self.k = k
        with self.init_scope():
            U_initializer = initializers._get_initializer(initialU)
            V_initializer = initializers._get_initializer(initialV)

            # Is it dirty code?
            self.U = Parameter(V_initializer)
            self.U.to_gpu()
            self.V = Parameter(U_initializer)
            self.V.to_gpu()

            self.register_persistent('U')

            if in_size is not None:
                self._initialize_params(in_size)

            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_size)
コード例 #5
0
ファイル: networks.py プロジェクト: curegit/precure-stylegan
 def __init__(self, z_size=512, depth=8, channels=(512, 16), max_stage=9):
     super().__init__()
     self.z_size = z_size
     with self.init_scope():
         self.zero = Parameter(Zero(), 1)
         self.one = Parameter(One(), 1)
         self.mapper = FeatureMapper(z_size, depth)
         self.generator = ImageGenerator(z_size, *channels, max_stage)
コード例 #6
0
 def __init__(self, counts, vecDims):
     super(RVec, self).__init__()
     with self.init_scope():
         initializer = Uniform(6 / np.sqrt(vecDims))
         self.vecDims = vecDims
         self.edge2vec = Parameter(initializer)
         self.edge2vec.initialize((counts, vecDims))
         self.edge2vec2 = F.normalize(self.edge2vec)
コード例 #7
0
 def __init__(self, counts, vecDims):
     super(NVec, self).__init__()
     with self.init_scope():
         initializer = Uniform(6 / np.sqrt(vecDims))
         self.counts = counts
         self.vecDims = vecDims
         self.node2vec = Parameter(initializer)
         self._initialize_params()
コード例 #8
0
 def __init__(self, counts, vecDims):
     super(NVec, self).__init__()
     with self.init_scope():
         initializer = Normal(0.1)
         self.counts = counts
         self.vecDims = vecDims
         self.node2vec = Parameter(initializer)
         self._initialize_params()
コード例 #9
0
ファイル: modules.py プロジェクト: iszhaoxin/KGI
    def __init__(self, hidden_units):
        super(npUnconcat, self).__init__()
        with self.init_scope():
            initializer = Normal()

            self.encoderL = L.Linear(None, hidden_units[0])
            self.encoderR = L.Linear(None, hidden_units[0])
            self.z = Parameter(initializer)
            self.z.initialize(hidden_units[1])
            self.decoderL = L.Linear(None, hidden_units[2])
            self.decoderR = L.Linear(None, hidden_units[2])
コード例 #10
0
 def __init__(self, size, in_channels, out_channels):
     super().__init__()
     with self.init_scope():
         self.c1 = Parameter(shape=(in_channels, 4, 4),
                             initializer=Normal(1.0))
         self.s1 = StyleAffineTransformation(size, in_channels)
         self.w1 = WeightModulatedConvolution(in_channels, out_channels)
         self.n1 = NoiseAdder()
         self.a1 = LeakyRelu()
         self.s2 = StyleAffineTransformation(size, out_channels)
         self.trgb = ToRGB(out_channels)
コード例 #11
0
class RVec(link.Link):
    def __init__(self, counts, vecDims):
        super(RVec, self).__init__()
        with self.init_scope():
            initializer = Normal(0.1)
            self.vecDims = vecDims
            self.edge2vec = Parameter(initializer)
            self.edge2vec.initialize((counts, vecDims))

    def forward(self, indexs):
        vecs = F.embed_id(indexs, self.edge2vec).reshape(-1, self.vecDims)
        return vecs
コード例 #12
0
 def __init__(self,
              vocabulary_size: int,
              word_embeddings_size: int,
              hidden_layer_size: int,
              attention_hidden_layer_size: int,
              encoder_output_size: int,
              maxout_layer_size: int,
              maxout_pool_size: int = 2,
              ignore_label: int = -1,
              dynamic_attention: bool = False):
     super(Decoder, self).__init__()
     with self.init_scope():
         self.embed_id = L.EmbedID(vocabulary_size,
                                   word_embeddings_size,
                                   ignore_label=ignore_label)
         self.rnn = L.StatelessLSTM(
             word_embeddings_size + encoder_output_size,
             hidden_layer_size
         )
         self.maxout = L.Maxout(word_embeddings_size +
                                encoder_output_size +
                                hidden_layer_size,
                                maxout_layer_size,
                                maxout_pool_size)
         self.linear = L.Linear(maxout_layer_size, vocabulary_size)
         if dynamic_attention:
             self.attention = DynamicAttentionModule(
                 encoder_output_size,
                 attention_hidden_layer_size,
                 hidden_layer_size,
                 word_embeddings_size
             )
         else:
             self.attention = AttentionModule(
                 encoder_output_size,
                 attention_hidden_layer_size,
                 hidden_layer_size,
                 word_embeddings_size
             )
         self.bos_state = Parameter(
             initializer=self.xp.random.randn(
                 1,
                 hidden_layer_size
             ).astype('f')
         )
     self.vocabulary_size = vocabulary_size
     self.word_embeddings_size = word_embeddings_size
     self.hidden_layer_size = hidden_layer_size
     self.encoder_output_size = encoder_output_size
コード例 #13
0
ファイル: make_parameter.py プロジェクト: ototoi/drt
def make_parameter(x):
    if isinstance(x, Parameter):
        return x
    elif isinstance(x, Variable):
        return x
    elif isinstance(x, (np.ndarray, np.generic)):
        return Parameter(x)
    elif isinstance(x, list):
        x = np.array(x, dtype=np.float32)
        return Parameter(x)
    elif isinstance(x, float):
        x = np.array([x], dtype=np.float32)
        return Parameter(x)
    else:
        return Parameter(x)
コード例 #14
0
def test_mixed():
    try:
        instance_1 = NG_Mixed(Variable(np.array([0])), F.ReLU(), Parameter(np.array([0])), L.BatchNormalization(1))
        chainer.serializers.save_npz("tmp.npz", instance_1)
        instance_2 = NG_Mixed(Variable(np.array([1])), F.ReLU(), Parameter(np.array([1])), L.BatchNormalization(1))
        chainer.serializers.load_npz("tmp.npz", instance_2)
        assert (instance_1.p.data == instance_2.p.data).all()
        assert (instance_1.v.data == instance_2.v.data).all()
        print("Succeeded")
    except Exception as e:
        print("Failed")
        raise e
    finally:
        if exists("tmp.npz"):
            remove("tmp.npz")
コード例 #15
0
    def __init__(self, ch):
        super().__init__()

        with self.init_scope():
            self.gamma = Parameter(initializer=0,
                                   shape=(1, ch, 1, 1),
                                   name='noise_gamma')
コード例 #16
0
class MVN(Chain):
    def __init__(self, mu, sigma, x0):
        super(MVN, self).__init__()
        self.mu = mu
        self.sigma = sigma
        self.n_particle = x0.shape[0]
        with self.init_scope():
            self.theta = Parameter(initializer=x0)

    def logp(self):
        d = self.mu.shape[0]
        mean = np.broadcast_to(self.mu, (self.n_particle, d))
        ln_var = np.broadcast_to(2 * np.log(self.sigma), (self.n_particle, d))

        logp = -F.gaussian_nll(self.theta, mean, ln_var, reduce='no')
        logp = F.sum(logp, axis=1).reshape(-1)
        logp = F.broadcast_to(logp, (self.n_particle, self.n_particle))
        return logp

    def __call__(self):
        ker = rbf(self.theta.reshape(self.n_particle, -1))
        nlogp = -self.logp()
        loss = F.mean(F.sum(ker.data * nlogp + ker, axis=1))

        chainer.report(
            {
                'loss': loss,
                'nlogp': F.mean(nlogp[0]),
            },
            observer=self,
        )
        return loss
コード例 #17
0
 def __init__(self,
              in_channels,
              out_channels,
              pointwise=False,
              demod=True,
              gain=root(2)):
     super().__init__()
     self.demod = demod
     self.ksize = 1 if pointwise else 3
     self.pad = 0 if pointwise else 1
     self.c = gain * root(1 / (in_channels * self.ksize**2))
     with self.init_scope():
         self.w = Parameter(shape=(out_channels, in_channels, self.ksize,
                                   self.ksize),
                            initializer=Normal(1.0))
         self.b = Parameter(shape=out_channels, initializer=Zero())
コード例 #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride=1,
                 pad=0,
                 initialV=None,
                 nobias=False,
                 cover_all=False):
        super(Convolution1D, self).__init__()
        ksize = conv_nd.as_tuple(ksize, 1)
        self.ksize = ksize
        self.nobias = nobias
        self.stride = stride
        self.pad = pad
        self.out_channels = out_channels
        self.in_channels = in_channels
        self.cover_all = cover_all

        self.initialV = initialV

        with self.init_scope():
            V_shape = (out_channels, in_channels) + ksize
            initialV = initializers._get_initializer(initialV)
            self.V = Parameter(initialV, V_shape)

        if nobias:
            self.b = None
コード例 #19
0
class SVDLinear(link.Link):
    """
        U x V
    """
    def __init__(self, in_size, out_size=None, nobias=False,
                 initialV=None, initialU=None, initial_bias=None,
                 k=16):
        super(SVDLinear, self).__init__()

        if out_size is None:
            in_size, out_size = None, in_size
        self.out_size = out_size
        self.k = k
        with self.init_scope():
            U_initializer = initializers._get_initializer(initialU)
            V_initializer = initializers._get_initializer(initialV)

            # Is it dirty code?
            self.U = Parameter(V_initializer)
            self.U.to_gpu()
            self.V = Parameter(U_initializer)
            self.V.to_gpu()

            self.register_persistent('U')

            if in_size is not None:
                self._initialize_params(in_size)

            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_size)

    def _initialize_params(self, in_size):
        self.U.initialize((self.k, in_size))
        self.V.initialize((self.out_size, self.k))

    def __call__(self, x):
        """Applies the linear layer. However, I checked this code for simple data, It does not work...
        Args:
            x (~chainer.Variable): Batch of input vectors.
        Returns:
            ~chainer.Variable: Output of the linear layer.
        """
        if self.U.data is None or self.V.data is not None:
            in_size = x.shape[1]
            self._initialize_params(in_size)

        # x: (batch_size, CxHxW)
        # V: (CxHxW, k)
        # W: (k, CxHxW)
        # (V*(U*x))+b = Wx + b
        W1 = linear.linear(x, self.U)
        return linear.linear(W1, self.V, self.b)
コード例 #20
0
class NVec(link.Link):
    def __init__(self, counts, vecDims):
        super(NVec, self).__init__()
        with self.init_scope():
            initializer = Uniform(6 / np.sqrt(vecDims))
            self.counts = counts
            self.vecDims = vecDims
            self.node2vec = Parameter(initializer)
            self._initialize_params()

    def _initialize_params(self):
        self.node2vec.initialize((self.counts, self.vecDims))

    def forward(self, indexs):
        self.nodeVecs = F.normalize(self.node2vec)
        vecs = F.embed_id(indexs, self.node2vec).reshape(-1, self.vecDims)
        return vecs
コード例 #21
0
class NodeAverageLink(link.Link):
    def __init__(self,
                 v_in_size,
                 out_size=None,
                 nobias=False,
                 initialW=None,
                 initial_bias=None,
                 residual=False):
        super(NodeAverageLink, self).__init__()

        if out_size is None:
            v_in_size, out_size = None, v_in_size
        self.out_size = out_size
        self.residual = residual
        with self.init_scope():
            W_initializer = initializers._get_initializer(initialW)
            self.Wc = Parameter(W_initializer)
            self.Wn = Parameter(W_initializer)
            if v_in_size is not None:
                self._initialize_params_v(v_in_size)
            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_size)

    def _initialize_params_v(self, v_in_size):
        self.Wc.initialize((v_in_size, self.out_size))
        self.Wn.initialize((v_in_size, self.out_size))

    def __call__(self, vertex, edge, adj, num_array):
        if self.Wc.array is None:
            v_in_size = vertex.shape[1]
            self._initialize_params_v(v_in_size)

        neighbor = F.matmul(vertex, self.Wn)
        neighbor = F.sparse_matmul(adj, neighbor) / num_array
        center = F.matmul(vertex, self.Wc)
        output = center + neighbor
        if self.residual:
            output = vertex + output
        if self.b is not None:
            output += self.b
        return output, edge, adj, num_array
コード例 #22
0
    def __init__(self, D, K):
        super(VLADpooling, self).__init__()
        self.D = D
        self.K = K
        initializer = I._get_initializer(None)

        with self.init_scope():
            self.wb = L.Convolution2D(D, K, ksize=1, stride=1, pad=0)
            self.c  = Parameter(initializer, shape=(D, K))
コード例 #23
0
    def _initialize_params(self, t):
        xp = cuda.get_array_module(t)

        self.mean_t = xp.mean(t,
                              axis=(0,
                                    2))  # calculate average for each channel
        self.std_t = xp.sqrt(xp.var(
            t, axis=(0, 2)))  # calculate stddev for each channel
        g = 1 / self.std_t
        b = -self.mean_t / self.std_t

        # print("g <- {}, b <- {}".format(g.reshape((-1,)), b.reshape((-1,))))

        with self.init_scope():
            if self.nobias == False:
                self.b = Parameter(b, b.shape)

            g_shape = (self.out_channels, 1) + (1, ) * len(self.ksize)
            self.g = Parameter(g.reshape(g_shape), g_shape)
コード例 #24
0
    def __init__(self, attn_size):
        super(Attn_module, self).__init__()

        with self.init_scope():
            self.attn_size = attn_size
            # "self.ux = Variable(self.xp.random.normal(0, 0.5, (self.attn_size, 1), dtype=float))
            self.ux = Parameter(
                initializer=self.xp.random.normal(0, 0.5, (self.attn_size,
                                                           1)).astype('f'))
            self.l = L.Linear(None, self.attn_size)
コード例 #25
0
 def __init__(self, in_channels, hidden_channel, initialW=None):
     super().__init__()
     ksize = (1, 1, 1)
     self.W_shape = (hidden_channel, in_channels) + ksize
     self.stride = 1
     self.pad = 0
     self.nobias = True
     self.b = None
     self.initialW = initialW
     with self.init_scope():
         self.W = Parameter(_get_initializer(self.initialW), self.W_shape)
コード例 #26
0
    def __init__(self, opt, ch):
        super().__init__()

        he_w = HeNormal()
        mid_ch = ch // opt.division_ch

        with self.init_scope():
            self.f_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.g_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.h_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w)
            self.v_conv = define_conv(opt)(mid_ch, ch, ksize=1, initialW=he_w)
            self.gamma = Parameter(initializer=0, shape=1, name='SA-gamma')
コード例 #27
0
    def __init__(self, in_shape, out_shape, nobias=False, initial_bias=None):
        """
        in_shape: input sample dimension
        out_shape: output sample dimension
        """
        super(NAC, self).__init__()
        with self.init_scope():
            self.in_shape = in_shape
            self.out_shape = out_shape

            W_initializer = initializers.GlorotUniform()
            self.W_ = Parameter(W_initializer, (out_shape, in_shape))
            M_initializer = initializers.GlorotUniform()
            self.M_ = Parameter(M_initializer, (out_shape, in_shape))
            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_shape)
コード例 #28
0
 def __init__(self, in_shape, out_shape, nobias=False, initial_bias=None):
     """
     in_shape: input sample dimension
     out_shape: output sample dimension
     """
     super(NALU, self).__init__()
     with self.init_scope():
         self.in_shape = in_shape
         self.out_shape = out_shape
         
         G_initializer = initializers.GlorotUniform()
         self.G = Parameter(G_initializer, (out_shape, in_shape))
         self.nac = NAC(in_shape, out_shape)
         self.eps = 1e-5
         
         if nobias:
             self.b = None
         else:
             if initial_bias is None:
                 initial_bias = 0
             bias_initializer = initializers._get_initializer(initial_bias)
             self.b = Parameter(bias_initializer, out_shape)
コード例 #29
0
    def __init__(self,
                 in_size,
                 out_size=None,
                 nobias=False,
                 initialW=None,
                 initial_bias=None):
        super(GraphConvolution, self).__init__()

        if out_size is None:
            in_size, out_size = None, in_size
        self.out_size = out_size

        with self.init_scope():
            W_initializer = initializers.HeUniform()
            self.W = Parameter(W_initializer, (in_size, out_size))
            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = Parameter(bias_initializer, out_size)
コード例 #30
0
    def sample(self, model):
        xp = model.xp
        eps = self.eps
        tau = self.tau

        batch = self.initializer.initialize()
        batch = Parameter(xp.asarray(batch))
        for i in range(self.step):
            loss = model(batch)
            grad, = chainer.grad([loss], [batch])
            z = xp.random.randn(*batch.shape)
            batch = batch - eps * eps * 0.5 * grad + eps * tau * z
        return batch.data